Remove the now-unused NET_{LOCK,UNLOCK,ASSERT}_GIANT() macros, which

previously conditionally acquired Giant based on debug.mpsafenet.  As that
has now been removed, they are no longer required.  Removing them
significantly simplifies error-handling in the socket layer, eliminated
quite a bit of unwinding of locking in error cases.

While here clean up the now unneeded opt_net.h, which previously was used
for the NET_WITH_GIANT kernel option.  Clean up some related gotos for
consistency.

Reviewed by:	bz, csjp
Tested by:	kris
Approved by:	re (kensmith)
This commit is contained in:
Robert Watson 2007-08-06 14:26:03 +00:00
parent ec2af96ad1
commit 0bf686c125
34 changed files with 56 additions and 310 deletions

View File

@ -670,7 +670,6 @@ linux_connect(struct thread *td, struct linux_connect_args *args)
* socket and use the file descriptor reference instead of
* creating a new one.
*/
NET_LOCK_GIANT();
error = fgetsock(td, linux_args.s, &so, &fflag);
if (error == 0) {
error = EISCONN;
@ -683,7 +682,6 @@ linux_connect(struct thread *td, struct linux_connect_args *args)
}
fputsock(so);
}
NET_UNLOCK_GIANT();
return (error);
}

View File

@ -383,7 +383,6 @@ LIBALIAS
MBUF_STRESS_TEST
NCP
NETATALK opt_atalk.h
NET_WITH_GIANT opt_net.h
PPP_BSDCOMP opt_ppp.h
PPP_DEFLATE opt_ppp.h
PPP_FILTER opt_ppp.h

View File

@ -888,11 +888,9 @@ ath_bmiss_proc(void *arg, int pending)
* truly a bmiss we'll get another interrupt soon and that'll
* be dispatched up for processing.
*/
if (tsf - lastrx > bmisstimeout) {
NET_LOCK_GIANT();
if (tsf - lastrx > bmisstimeout)
ieee80211_beacon_miss(ic);
NET_UNLOCK_GIANT();
} else
else
sc->sc_stats.ast_bmiss_phantom++;
}
}
@ -3447,7 +3445,6 @@ ath_rx_proc(void *arg, int npending)
int16_t nf;
u_int64_t tsf;
NET_LOCK_GIANT(); /* XXX */
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
ngood = 0;
@ -3715,7 +3712,6 @@ ath_rx_proc(void *arg, int npending)
!IFQ_IS_EMPTY(&ifp->if_snd))
ath_start(ifp);
NET_UNLOCK_GIANT(); /* XXX */
#undef PA2DESC
}

View File

@ -1474,7 +1474,6 @@ em_handle_rxtx(void *context, int pending)
struct adapter *adapter = context;
struct ifnet *ifp;
NET_LOCK_GIANT();
ifp = adapter->ifp;
/*
@ -1493,7 +1492,6 @@ em_handle_rxtx(void *context, int pending)
}
em_enable_intr(adapter);
NET_UNLOCK_GIANT();
}
/*********************************************************************

View File

@ -244,9 +244,7 @@ streamsopen(struct cdev *dev, int oflags, int devtype, struct thread *td)
return error;
/* An extra reference on `fp' has been held for us by falloc(). */
NET_LOCK_GIANT();
error = socreate(family, &so, type, protocol, td->td_ucred, td);
NET_UNLOCK_GIANT();
if (error) {
fdclose(fdp, fp, fd, td);
fdrop(fp, td);

View File

@ -2098,8 +2098,6 @@ fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp)
struct file *fp;
int error;
NET_ASSERT_GIANT();
*spp = NULL;
if (fflagp != NULL)
*fflagp = 0;
@ -2129,7 +2127,6 @@ void
fputsock(struct socket *so)
{
NET_ASSERT_GIANT();
ACCEPT_LOCK();
SOCK_LOCK(so);
sorele(so);

View File

@ -329,7 +329,6 @@ ether_poll(int count)
{
int i;
NET_LOCK_GIANT();
mtx_lock(&poll_mtx);
if (count > poll_each_burst)
@ -339,7 +338,6 @@ ether_poll(int count)
pr[i].handler(pr[i].ifp, POLL_ONLY, count);
mtx_unlock(&poll_mtx);
NET_UNLOCK_GIANT();
}
/*
@ -366,8 +364,6 @@ netisr_pollmore()
struct timeval t;
int kern_load;
NET_ASSERT_GIANT();
mtx_lock(&poll_mtx);
phase = 5;
if (residual_burst > 0) {
@ -417,8 +413,6 @@ netisr_poll(void)
int i, cycles;
enum poll_cmd arg = POLL_ONLY;
NET_ASSERT_GIANT();
mtx_lock(&poll_mtx);
phase = 3;
if (residual_burst == 0) { /* first call in this tick */
@ -456,8 +450,6 @@ ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
KASSERT(h != NULL, ("%s: handler is NULL", __func__));
KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
NET_ASSERT_GIANT();
mtx_lock(&poll_mtx);
if (poll_handlers >= POLL_LIST_LEN) {
/*
@ -504,7 +496,6 @@ ether_poll_deregister(struct ifnet *ifp)
KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
NET_ASSERT_GIANT();
mtx_lock(&poll_mtx);
for (i = 0 ; i < poll_handlers ; i++)
@ -547,7 +538,6 @@ poll_switch(SYSCTL_HANDLER_ARGS)
polling = val;
NET_LOCK_GIANT();
IFNET_RLOCK();
TAILQ_FOREACH(ifp, &ifnet, if_link) {
if (ifp->if_capabilities & IFCAP_POLLING) {
@ -565,7 +555,6 @@ poll_switch(SYSCTL_HANDLER_ARGS)
}
}
IFNET_RUNLOCK();
NET_UNLOCK_GIANT();
log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)");

View File

@ -73,21 +73,16 @@ soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
int flags, struct thread *td)
{
struct socket *so = fp->f_data;
#ifdef MAC
int error;
NET_LOCK_GIANT();
#ifdef MAC
SOCK_LOCK(so);
error = mac_check_socket_receive(active_cred, so);
SOCK_UNLOCK(so);
if (error) {
NET_UNLOCK_GIANT();
if (error)
return (error);
}
#endif
error = soreceive(so, 0, uio, 0, 0, 0);
NET_UNLOCK_GIANT();
return (error);
return (soreceive(so, 0, uio, 0, 0, 0));
}
/* ARGSUSED */
@ -98,15 +93,12 @@ soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
struct socket *so = fp->f_data;
int error;
NET_LOCK_GIANT();
#ifdef MAC
SOCK_LOCK(so);
error = mac_check_socket_send(active_cred, so);
SOCK_UNLOCK(so);
if (error) {
NET_UNLOCK_GIANT();
if (error)
return (error);
}
#endif
error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td);
if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
@ -114,7 +106,6 @@ soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
psignal(uio->uio_td->td_proc, SIGPIPE);
PROC_UNLOCK(uio->uio_td->td_proc);
}
NET_UNLOCK_GIANT();
return (error);
}
@ -125,9 +116,7 @@ soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
struct socket *so = fp->f_data;
int error = 0;
NET_LOCK_GIANT();
switch (cmd) {
case FIONBIO:
SOCK_LOCK(so);
if (*(int *)data)
@ -207,8 +196,7 @@ soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
(so, cmd, data, 0, td));
break;
}
NET_UNLOCK_GIANT();
return(error);
return (error);
}
int
@ -216,22 +204,16 @@ soo_poll(struct file *fp, int events, struct ucred *active_cred,
struct thread *td)
{
struct socket *so = fp->f_data;
#ifdef MAC
int error;
NET_LOCK_GIANT();
#ifdef MAC
SOCK_LOCK(so);
error = mac_check_socket_poll(active_cred, so);
SOCK_UNLOCK(so);
if (error) {
NET_UNLOCK_GIANT();
if (error)
return (error);
}
#endif
error = sopoll(so, events, fp->f_cred, td);
NET_UNLOCK_GIANT();
return (error);
return (sopoll(so, events, fp->f_cred, td));
}
int
@ -239,19 +221,18 @@ soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
struct thread *td)
{
struct socket *so = fp->f_data;
#ifdef MAC
int error;
#endif
bzero((caddr_t)ub, sizeof (*ub));
ub->st_mode = S_IFSOCK;
NET_LOCK_GIANT();
#ifdef MAC
SOCK_LOCK(so);
error = mac_check_socket_stat(active_cred, so);
SOCK_UNLOCK(so);
if (error) {
NET_UNLOCK_GIANT();
if (error)
return (error);
}
#endif
/*
* If SBS_CANTRCVMORE is set, but there's still data left in the
@ -269,9 +250,7 @@ soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
ub->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
ub->st_uid = so->so_cred->cr_uid;
ub->st_gid = so->so_cred->cr_gid;
error = (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
NET_UNLOCK_GIANT();
return (error);
return (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
}
/*
@ -287,13 +266,11 @@ soo_close(struct file *fp, struct thread *td)
int error = 0;
struct socket *so;
NET_LOCK_GIANT();
so = fp->f_data;
fp->f_ops = &badfileops;
fp->f_data = NULL;
if (so)
error = soclose(so);
NET_UNLOCK_GIANT();
return (error);
}

View File

@ -453,8 +453,6 @@ pfslowtimo(void *arg)
struct domain *dp;
struct protosw *pr;
NET_ASSERT_GIANT();
for (dp = domains; dp; dp = dp->dom_next)
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
if (pr->pr_slowtimo)
@ -468,8 +466,6 @@ pffasttimo(void *arg)
struct domain *dp;
struct protosw *pr;
NET_ASSERT_GIANT();
for (dp = domains; dp; dp = dp->dom_next)
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
if (pr->pr_fasttimo)

View File

@ -175,10 +175,8 @@ socket(td, uap)
if (error)
return (error);
/* An extra reference on `fp' has been held for us by falloc(). */
NET_LOCK_GIANT();
error = socreate(uap->domain, &so, uap->type, uap->protocol,
td->td_ucred, td);
NET_UNLOCK_GIANT();
if (error) {
fdclose(fdp, fp, fd, td);
} else {
@ -225,25 +223,22 @@ kern_bind(td, fd, sa)
struct file *fp;
int error;
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
if (error)
goto done2;
return (error);
so = fp->f_data;
#ifdef MAC
SOCK_LOCK(so);
error = mac_check_socket_bind(td->td_ucred, so, sa);
SOCK_UNLOCK(so);
if (error)
goto done1;
goto done;
#endif
error = sobind(so, sa, td);
#ifdef MAC
done1:
done:
#endif
fdrop(fp, td);
done2:
NET_UNLOCK_GIANT();
return (error);
}
@ -260,7 +255,6 @@ listen(td, uap)
struct file *fp;
int error;
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
if (error == 0) {
so = fp->f_data;
@ -277,7 +271,6 @@ listen(td, uap)
#endif
fdrop(fp, td);
}
NET_UNLOCK_GIANT();
return(error);
}
@ -357,10 +350,9 @@ kern_accept(struct thread *td, int s, struct sockaddr **name,
}
fdp = td->td_proc->p_fd;
NET_LOCK_GIANT();
error = getsock(fdp, s, &headfp, &fflag);
if (error)
goto done2;
return (error);
head = headfp->f_data;
if ((head->so_options & SO_ACCEPTCONN) == 0) {
error = EINVAL;
@ -491,8 +483,6 @@ kern_accept(struct thread *td, int s, struct sockaddr **name,
if (nfp != NULL)
fdrop(nfp, td);
fdrop(headfp, td);
done2:
NET_UNLOCK_GIANT();
return (error);
}
@ -550,10 +540,9 @@ kern_connect(td, fd, sa)
int error;
int interrupted = 0;
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
if (error)
goto done2;
return (error);
so = fp->f_data;
if (so->so_state & SS_ISCONNECTING) {
error = EALREADY;
@ -595,8 +584,6 @@ kern_connect(td, fd, sa)
error = EINTR;
done1:
fdrop(fp, td);
done2:
NET_UNLOCK_GIANT();
return (error);
}
@ -623,11 +610,10 @@ socketpair(td, uap)
return (error);
#endif
NET_LOCK_GIANT();
error = socreate(uap->domain, &so1, uap->type, uap->protocol,
td->td_ucred, td);
if (error)
goto done2;
return (error);
error = socreate(uap->domain, &so2, uap->type, uap->protocol,
td->td_ucred, td);
if (error)
@ -670,7 +656,7 @@ socketpair(td, uap)
goto free4;
fdrop(fp1, td);
fdrop(fp2, td);
goto done2;
return (0);
free4:
fdclose(fdp, fp2, sv[1], td);
fdrop(fp2, td);
@ -683,8 +669,6 @@ socketpair(td, uap)
free1:
if (so1 != NULL)
(void)soclose(so1);
done2:
NET_UNLOCK_GIANT();
return (error);
}
@ -770,10 +754,9 @@ kern_sendit(td, s, mp, flags, control, segflg)
struct uio *ktruio = NULL;
#endif
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
if (error)
goto bad2;
return (error);
so = (struct socket *)fp->f_data;
#ifdef MAC
@ -826,8 +809,6 @@ kern_sendit(td, s, mp, flags, control, segflg)
#endif
bad:
fdrop(fp, td);
bad2:
NET_UNLOCK_GIANT();
return (error);
}
@ -968,12 +949,9 @@ kern_recvit(td, s, mp, fromseg, controlp)
if(controlp != NULL)
*controlp = 0;
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
if (error) {
NET_UNLOCK_GIANT();
if (error)
return (error);
}
so = fp->f_data;
#ifdef MAC
@ -982,7 +960,6 @@ kern_recvit(td, s, mp, fromseg, controlp)
SOCK_UNLOCK(so);
if (error) {
fdrop(fp, td);
NET_UNLOCK_GIANT();
return (error);
}
#endif
@ -998,7 +975,6 @@ kern_recvit(td, s, mp, fromseg, controlp)
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
if ((auio.uio_resid += iov->iov_len) < 0) {
fdrop(fp, td);
NET_UNLOCK_GIANT();
return (EINVAL);
}
}
@ -1094,7 +1070,6 @@ kern_recvit(td, s, mp, fromseg, controlp)
}
out:
fdrop(fp, td);
NET_UNLOCK_GIANT();
if (fromsa)
FREE(fromsa, M_SONAME);
@ -1285,14 +1260,12 @@ shutdown(td, uap)
struct file *fp;
int error;
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
if (error == 0) {
so = fp->f_data;
error = soshutdown(so, uap->how);
fdrop(fp, td);
}
NET_UNLOCK_GIANT();
return (error);
}
@ -1349,14 +1322,12 @@ kern_setsockopt(td, s, level, name, val, valseg, valsize)
panic("kern_setsockopt called with bad valseg");
}
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
if (error == 0) {
so = fp->f_data;
error = sosetopt(so, &sopt);
fdrop(fp, td);
}
NET_UNLOCK_GIANT();
return(error);
}
@ -1429,7 +1400,6 @@ kern_getsockopt(td, s, level, name, val, valseg, valsize)
panic("kern_getsockopt called with bad valseg");
}
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
if (error == 0) {
so = fp->f_data;
@ -1437,7 +1407,6 @@ kern_getsockopt(td, s, level, name, val, valseg, valsize)
*valsize = sopt.sopt_valsize;
fdrop(fp, td);
}
NET_UNLOCK_GIANT();
return (error);
}
@ -1492,10 +1461,9 @@ kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
if (*alen < 0)
return (EINVAL);
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
if (error)
goto done;
return (error);
so = fp->f_data;
*sa = NULL;
error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
@ -1512,8 +1480,6 @@ kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
free(*sa, M_SONAME);
*sa = NULL;
}
done:
NET_UNLOCK_GIANT();
return (error);
}
@ -1588,14 +1554,13 @@ kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
if (*alen < 0)
return (EINVAL);
NET_LOCK_GIANT();
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
if (error)
goto done2;
return (error);
so = fp->f_data;
if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
error = ENOTCONN;
goto done1;
goto done;
}
*sa = NULL;
error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
@ -1611,10 +1576,8 @@ kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
free(*sa, M_SONAME);
*sa = NULL;
}
done1:
done:
fdrop(fp, td);
done2:
NET_UNLOCK_GIANT();
return (error);
}
@ -1823,8 +1786,6 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
int error, hdrlen = 0, mnw = 0;
int vfslocked;
NET_LOCK_GIANT();
/*
* The file descriptor must be a regular file and have a
* backing VM object.
@ -2242,8 +2203,6 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
if (m)
m_freem(m);
NET_UNLOCK_GIANT();
if (error == ERESTART)
error = EINTR;

View File

@ -645,16 +645,12 @@ bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
BPFD_UNLOCK(d);
#endif
NET_LOCK_GIANT();
error = (*ifp->if_output)(ifp, m, &dst, NULL);
NET_UNLOCK_GIANT();
if (mc != NULL) {
if (error == 0) {
NET_LOCK_GIANT();
if (error == 0)
(*ifp->if_input)(ifp, mc);
NET_UNLOCK_GIANT();
} else
else
m_freem(mc);
}
@ -776,10 +772,8 @@ bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
if (d->bd_bif == NULL)
error = EINVAL;
else {
NET_LOCK_GIANT();
ifp = d->bd_bif->bif_ifp;
error = (*ifp->if_ioctl)(ifp, cmd, addr);
NET_UNLOCK_GIANT();
}
break;
}
@ -837,9 +831,7 @@ bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
break;
}
if (d->bd_promisc == 0) {
NET_LOCK_GIANT();
error = ifpromisc(d->bd_bif->bif_ifp, 1);
NET_UNLOCK_GIANT();
if (error == 0)
d->bd_promisc = 1;
}

View File

@ -102,10 +102,7 @@ struct bpf_d {
#define BPFD_LOCK(bd) mtx_lock(&(bd)->bd_mtx)
#define BPFD_UNLOCK(bd) mtx_unlock(&(bd)->bd_mtx)
#define BPFD_LOCK_ASSERT(bd) do { \
mtx_assert(&(bd)->bd_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define BPFD_LOCK_ASSERT(bd) mtx_assert(&(bd)->bd_mtx, MA_OWNED);
/* Test whether a BPF is ready for read(). */
#define bpf_ready(bd) \

View File

@ -28,7 +28,6 @@
*/
#include "opt_device_polling.h"
#include "opt_net.h"
#include <sys/param.h>
#include <sys/bus.h>

View File

@ -616,7 +616,6 @@ export_send(priv_p priv, item_p item, int flags)
header->count = htons(header->count);
if (priv->export != NULL)
/* Should also NET_LOCK_GIANT(). */
NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
return (error);

View File

@ -269,10 +269,7 @@ struct inpcbinfo {
#define INP_LOCK_DESTROY(inp) mtx_destroy(&(inp)->inp_mtx)
#define INP_LOCK(inp) mtx_lock(&(inp)->inp_mtx)
#define INP_UNLOCK(inp) mtx_unlock(&(inp)->inp_mtx)
#define INP_LOCK_ASSERT(inp) do { \
mtx_assert(&(inp)->inp_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define INP_LOCK_ASSERT(inp) mtx_assert(&(inp)->inp_mtx, MA_OWNED)
#define INP_UNLOCK_ASSERT(inp) mtx_assert(&(inp)->inp_mtx, MA_NOTOWNED)
#define INP_INFO_LOCK_INIT(ipi, d) \
@ -282,17 +279,9 @@ struct inpcbinfo {
#define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_mtx)
#define INP_INFO_RUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_mtx)
#define INP_INFO_WUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_mtx)
#define INP_INFO_RLOCK_ASSERT(ipi) do { \
mtx_assert(&(ipi)->ipi_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define INP_INFO_WLOCK_ASSERT(ipi) do { \
mtx_assert(&(ipi)->ipi_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define INP_INFO_UNLOCK_ASSERT(ipi) do { \
mtx_assert(&(ipi)->ipi_mtx, MA_NOTOWNED); \
} while (0)
#define INP_INFO_RLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_mtx, MA_OWNED)
#define INP_INFO_WLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_mtx, MA_OWNED)
#define INP_INFO_UNLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_mtx, MA_NOTOWNED)
#define INP_PCBHASH(faddr, lport, fport, mask) \
(((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask))

View File

@ -203,10 +203,7 @@ static struct mtx dummynet_mtx;
#define DUMMYNET_LOCK_DESTROY() mtx_destroy(&dummynet_mtx)
#define DUMMYNET_LOCK() mtx_lock(&dummynet_mtx)
#define DUMMYNET_UNLOCK() mtx_unlock(&dummynet_mtx)
#define DUMMYNET_LOCK_ASSERT() do { \
mtx_assert(&dummynet_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define DUMMYNET_LOCK_ASSERT() mtx_assert(&dummynet_mtx, MA_OWNED)
static int config_pipe(struct dn_pipe *p);
static int ip_dn_ctl(struct sockopt *sopt);
@ -738,7 +735,6 @@ dummynet_task(void *context, int pending)
void *p; /* generic parameter to handler */
int i;
NET_LOCK_GIANT();
DUMMYNET_LOCK();
heaps[0] = &ready_heap; /* fixed-rate queues */
@ -825,8 +821,6 @@ dummynet_task(void *context, int pending)
dummynet_send(head);
callout_reset(&dn_timeout, 1, dummynet, NULL);
NET_UNLOCK_GIANT();
}
static void

View File

@ -147,10 +147,7 @@ struct ip_fw_chain {
#define IPFW_LOCK_INIT(_chain) \
rw_init(&(_chain)->rwmtx, "IPFW static rules")
#define IPFW_LOCK_DESTROY(_chain) rw_destroy(&(_chain)->rwmtx)
#define IPFW_WLOCK_ASSERT(_chain) do { \
rw_assert(&(_chain)->rwmtx, RA_WLOCKED); \
NET_ASSERT_GIANT(); \
} while (0)
#define IPFW_WLOCK_ASSERT(_chain) rw_assert(&(_chain)->rwmtx, RA_WLOCKED)
#define IPFW_RLOCK(p) rw_rlock(&(p)->rwmtx)
#define IPFW_RUNLOCK(p) rw_runlock(&(p)->rwmtx)

View File

@ -149,10 +149,7 @@ SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD,
static struct mtx mrouter_mtx;
#define MROUTER_LOCK() mtx_lock(&mrouter_mtx)
#define MROUTER_UNLOCK() mtx_unlock(&mrouter_mtx)
#define MROUTER_LOCK_ASSERT() do { \
mtx_assert(&mrouter_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define MROUTER_LOCK_ASSERT() mtx_assert(&mrouter_mtx, MA_OWNED)
#define MROUTER_LOCK_INIT() \
mtx_init(&mrouter_mtx, "IPv4 multicast forwarding", NULL, MTX_DEF)
#define MROUTER_LOCK_DESTROY() mtx_destroy(&mrouter_mtx)
@ -160,10 +157,7 @@ static struct mtx mrouter_mtx;
static struct mtx mfc_mtx;
#define MFC_LOCK() mtx_lock(&mfc_mtx)
#define MFC_UNLOCK() mtx_unlock(&mfc_mtx)
#define MFC_LOCK_ASSERT() do { \
mtx_assert(&mfc_mtx, MA_OWNED); \
NET_ASSERT_GIANT(); \
} while (0)
#define MFC_LOCK_ASSERT() mtx_assert(&mfc_mtx, MA_OWNED)
#define MFC_LOCK_INIT() mtx_init(&mfc_mtx, "mroute mfc table", NULL, MTX_DEF)
#define MFC_LOCK_DESTROY() mtx_destroy(&mfc_mtx)

View File

@ -2190,11 +2190,9 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
* We do this to keep the sockets side happy durin
* the sonewcon ONLY.
*/
NET_LOCK_GIANT();
SCTP_TCB_UNLOCK((*stcb));
so = sonewconn(oso, 0
);
NET_UNLOCK_GIANT();
SCTP_INP_WLOCK((*stcb)->sctp_ep);
SCTP_TCB_LOCK((*stcb));
SCTP_INP_WUNLOCK((*stcb)->sctp_ep);

View File

@ -613,7 +613,6 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
struct tcpcb *tp;
char *s;
NET_ASSERT_GIANT();
INP_INFO_WLOCK_ASSERT(&tcbinfo);
/*

View File

@ -735,8 +735,6 @@ ah_input_cb(struct cryptop *crp)
caddr_t ptr;
int authsize;
NET_LOCK_GIANT();
crd = crp->crp_desc;
tc = (struct tdb_crypto *) crp->crp_opaque;
@ -769,7 +767,6 @@ ah_input_cb(struct cryptop *crp)
if (crp->crp_etype == EAGAIN) {
error = crypto_dispatch(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -863,7 +860,6 @@ ah_input_cb(struct cryptop *crp)
IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag);
KEY_FREESAV(&sav);
NET_UNLOCK_GIANT();
return error;
bad:
if (sav)
@ -874,7 +870,6 @@ ah_input_cb(struct cryptop *crp)
free(tc, M_XDATA);
if (crp != NULL)
crypto_freereq(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -1125,8 +1120,6 @@ ah_output_cb(struct cryptop *crp)
caddr_t ptr;
int err;
NET_LOCK_GIANT();
tc = (struct tdb_crypto *) crp->crp_opaque;
IPSEC_ASSERT(tc != NULL, ("null opaque data area!"));
skip = tc->tc_skip;
@ -1154,7 +1147,6 @@ ah_output_cb(struct cryptop *crp)
KEY_FREESAV(&sav);
IPSECREQUEST_UNLOCK(isr);
error = crypto_dispatch(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -1201,7 +1193,6 @@ ah_output_cb(struct cryptop *crp)
err = ipsec_process_done(m, isr);
KEY_FREESAV(&sav);
IPSECREQUEST_UNLOCK(isr);
NET_UNLOCK_GIANT();
return err;
bad:
if (sav)
@ -1211,7 +1202,6 @@ ah_output_cb(struct cryptop *crp)
m_freem(m);
free(tc, M_XDATA);
crypto_freereq(crp);
NET_UNLOCK_GIANT();
return error;
}

View File

@ -462,8 +462,6 @@ esp_input_cb(struct cryptop *crp)
struct secasindex *saidx;
caddr_t ptr;
NET_LOCK_GIANT();
crd = crp->crp_desc;
IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!"));
@ -501,7 +499,6 @@ esp_input_cb(struct cryptop *crp)
if (crp->crp_etype == EAGAIN) {
KEY_FREESAV(&sav);
error = crypto_dispatch(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -631,7 +628,6 @@ esp_input_cb(struct cryptop *crp)
IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag);
KEY_FREESAV(&sav);
NET_UNLOCK_GIANT();
return error;
bad:
if (sav)
@ -642,7 +638,6 @@ esp_input_cb(struct cryptop *crp)
free(tc, M_XDATA);
if (crp != NULL)
crypto_freereq(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -896,8 +891,6 @@ esp_output_cb(struct cryptop *crp)
struct mbuf *m;
int err, error;
NET_LOCK_GIANT();
tc = (struct tdb_crypto *) crp->crp_opaque;
IPSEC_ASSERT(tc != NULL, ("null opaque data area!"));
m = (struct mbuf *) crp->crp_buf;
@ -926,7 +919,6 @@ esp_output_cb(struct cryptop *crp)
KEY_FREESAV(&sav);
IPSECREQUEST_UNLOCK(isr);
error = crypto_dispatch(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -973,7 +965,6 @@ esp_output_cb(struct cryptop *crp)
err = ipsec_process_done(m, isr);
KEY_FREESAV(&sav);
IPSECREQUEST_UNLOCK(isr);
NET_UNLOCK_GIANT();
return err;
bad:
if (sav)
@ -983,7 +974,6 @@ esp_output_cb(struct cryptop *crp)
m_freem(m);
free(tc, M_XDATA);
crypto_freereq(crp);
NET_UNLOCK_GIANT();
return error;
}

View File

@ -219,8 +219,6 @@ ipcomp_input_cb(struct cryptop *crp)
u_int8_t nproto;
caddr_t addr;
NET_LOCK_GIANT();
crd = crp->crp_desc;
tc = (struct tdb_crypto *) crp->crp_opaque;
@ -252,7 +250,6 @@ ipcomp_input_cb(struct cryptop *crp)
if (crp->crp_etype == EAGAIN) {
KEY_FREESAV(&sav);
error = crypto_dispatch(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -306,7 +303,6 @@ ipcomp_input_cb(struct cryptop *crp)
IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, NULL);
KEY_FREESAV(&sav);
NET_UNLOCK_GIANT();
return error;
bad:
if (sav)
@ -317,7 +313,6 @@ ipcomp_input_cb(struct cryptop *crp)
free(tc, M_XDATA);
if (crp)
crypto_freereq(crp);
NET_UNLOCK_GIANT();
return error;
}
@ -499,8 +494,6 @@ ipcomp_output_cb(struct cryptop *crp)
struct mbuf *m;
int error, skip, rlen;
NET_LOCK_GIANT();
tc = (struct tdb_crypto *) crp->crp_opaque;
IPSEC_ASSERT(tc != NULL, ("null opaque data area!"));
m = (struct mbuf *) crp->crp_buf;
@ -528,7 +521,6 @@ ipcomp_output_cb(struct cryptop *crp)
KEY_FREESAV(&sav);
IPSECREQUEST_UNLOCK(isr);
error = crypto_dispatch(crp);
NET_UNLOCK_GIANT();
return error;
}
ipcompstat.ipcomps_noxform++;
@ -582,7 +574,6 @@ ipcomp_output_cb(struct cryptop *crp)
error = ipsec_process_done(m, isr);
KEY_FREESAV(&sav);
IPSECREQUEST_UNLOCK(isr);
NET_UNLOCK_GIANT();
return error;
bad:
if (sav)
@ -592,7 +583,6 @@ ipcomp_output_cb(struct cryptop *crp)
m_freem(m);
free(tc, M_XDATA);
crypto_freereq(crp);
NET_UNLOCK_GIANT();
return error;
}

View File

@ -590,8 +590,6 @@ bootpc_call(struct bootpc_globalcontext *gctx, struct thread *td)
int retry;
const char *s;
NET_ASSERT_GIANT();
/*
* Create socket and set its recieve timeout.
*/
@ -982,8 +980,6 @@ bootpc_fakeup_interface(struct bootpc_ifcontext *ifctx,
struct ifaddr *ifa;
struct sockaddr_dl *sdl;
NET_ASSERT_GIANT();
error = socreate(AF_INET, &ifctx->so, SOCK_DGRAM, 0, td->td_ucred, td);
if (error != 0)
panic("nfs_boot: socreate, error=%d", error);

View File

@ -215,8 +215,6 @@ krpc_call(struct sockaddr_in *sa, u_int prog, u_int vers, u_int func,
nam = mhead = NULL;
from = NULL;
NET_ASSERT_GIANT();
/*
* Create socket and set its recieve timeout.
*/

View File

@ -265,8 +265,6 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
struct sockaddr *saddr;
struct thread *td = &thread0; /* only used for socreate and sobind */
NET_LOCK_GIANT();
if (nmp->nm_sotype == SOCK_STREAM) {
mtx_lock(&nmp->nm_mtx);
nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
@ -458,12 +456,10 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
nmp->nm_sent = 0;
nmp->nm_timeouts = 0;
mtx_unlock(&nmp->nm_mtx);
NET_UNLOCK_GIANT();
return (0);
bad:
nfs_disconnect(nmp);
NET_UNLOCK_GIANT();
return (error);
}
@ -531,8 +527,6 @@ nfs_disconnect(struct nfsmount *nmp)
{
struct socket *so;
NET_ASSERT_GIANT();
mtx_lock(&nmp->nm_mtx);
if (nmp->nm_so) {
so = nmp->nm_so;
@ -573,14 +567,12 @@ nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
struct sockaddr *sendnam;
int error, error2, soflags, flags;
NET_LOCK_GIANT();
KASSERT(rep, ("nfs_send: called with rep == NULL"));
error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
if (error) {
m_freem(top);
goto out;
return (error);
}
mtx_lock(&rep->r_nmp->nm_mtx);
mtx_lock(&rep->r_mtx);
@ -589,8 +581,7 @@ nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
mtx_unlock(&rep->r_mtx);
mtx_unlock(&rep->r_nmp->nm_mtx);
m_freem(top);
error = 0;
goto out;
return (0);
}
rep->r_flags &= ~R_MUSTRESEND;
soflags = rep->r_nmp->nm_soflags;
@ -644,8 +635,6 @@ nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
if (error != EINTR && error != ERESTART && error != EIO && error != EPIPE)
error = 0;
}
out:
NET_UNLOCK_GIANT();
return (error);
}
@ -656,8 +645,6 @@ nfs_reply(struct nfsreq *rep)
register struct mbuf *m;
int error = 0, sotype, slpflag;
NET_LOCK_GIANT();
sotype = rep->r_nmp->nm_sotype;
/*
* For reliable protocols, lock against other senders/receivers
@ -666,7 +653,7 @@ nfs_reply(struct nfsreq *rep)
if (sotype != SOCK_DGRAM) {
error = nfs_sndlock(rep);
if (error)
goto out;
return (error);
tryagain:
mtx_lock(&rep->r_nmp->nm_mtx);
mtx_lock(&rep->r_mtx);
@ -674,15 +661,13 @@ nfs_reply(struct nfsreq *rep)
mtx_unlock(&rep->r_mtx);
mtx_unlock(&rep->r_nmp->nm_mtx);
nfs_sndunlock(rep);
error = 0;
goto out;
return (0);
}
if (rep->r_flags & R_SOFTTERM) {
mtx_unlock(&rep->r_mtx);
mtx_unlock(&rep->r_nmp->nm_mtx);
nfs_sndunlock(rep);
error = EINTR;
goto out;
return (EINTR);
}
so = rep->r_nmp->nm_so;
if (!so ||
@ -692,7 +677,7 @@ nfs_reply(struct nfsreq *rep)
error = nfs_reconnect(rep);
if (error) {
nfs_sndunlock(rep);
goto out;
return (error);
}
goto tryagain;
}
@ -706,7 +691,7 @@ nfs_reply(struct nfsreq *rep)
if (error == EINTR || error == ERESTART ||
(error = nfs_reconnect(rep)) != 0) {
nfs_sndunlock(rep);
goto out;
return (error);
}
goto tryagain;
}
@ -730,15 +715,13 @@ nfs_reply(struct nfsreq *rep)
slpflag | (PZERO - 1), "nfsreq", 0);
if (error == EINTR || error == ERESTART) {
/* NFS operations aren't restartable. Map ERESTART to EINTR */
error = EINTR;
mtx_unlock(&rep->r_mtx);
goto out;
return (EINTR);
}
if (rep->r_flags & R_SOFTTERM) {
/* Request was terminated because we exceeded the retries (soft mount) */
error = ETIMEDOUT;
mtx_unlock(&rep->r_mtx);
goto out;
return (ETIMEDOUT);
}
mtx_unlock(&rep->r_mtx);
if (sotype == SOCK_STREAM) {
@ -750,15 +733,13 @@ nfs_reply(struct nfsreq *rep)
mtx_unlock(&rep->r_nmp->nm_mtx);
error = nfs_sndlock(rep);
if (error)
goto out;
return (error);
goto tryagain;
} else {
mtx_unlock(&rep->r_mtx);
mtx_unlock(&rep->r_nmp->nm_mtx);
}
}
out:
NET_UNLOCK_GIANT();
return (error);
}
@ -1475,7 +1456,6 @@ nfs_timer(void *arg)
rep->r_flags |= R_PIN_REQ;
mtx_unlock(&rep->r_mtx);
mtx_unlock(&nfs_reqq_mtx);
NET_LOCK_GIANT();
if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
error = (*so->so_proto->pr_usrreqs->pru_send)
(so, 0, m, NULL, NULL, curthread);
@ -1483,7 +1463,6 @@ nfs_timer(void *arg)
error = (*so->so_proto->pr_usrreqs->pru_send)
(so, 0, m, nmp->nm_nam, NULL,
curthread);
NET_UNLOCK_GIANT();
mtx_lock(&nfs_reqq_mtx);
mtx_lock(&nmp->nm_mtx);
mtx_lock(&rep->r_mtx);
@ -1689,7 +1668,6 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
{
struct proc *p;
sigset_t tmpset;
int error = 0;
if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
return nfs4_sigintr(nmp, rep, td);
@ -1697,18 +1675,15 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
mtx_lock(&rep->r_mtx);
if (rep->r_flags & R_SOFTTERM) {
mtx_unlock(&rep->r_mtx);
error = EIO;
goto out;
return (EIO);
} else
mtx_unlock(&rep->r_mtx);
}
/* Terminate all requests while attempting a forced unmount. */
if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) {
error = EIO;
goto out;
}
if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
return (EIO);
if (!(nmp->nm_flag & NFSMNT_INT))
goto out;
return (0);
if (td == NULL)
return (0);
p = td->td_proc;
@ -1725,10 +1700,7 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
return (EINTR);
}
PROC_UNLOCK(p);
return (0);
out:
return(error);
}
/*

View File

@ -408,8 +408,6 @@ nfs_mountroot(struct mount *mp, struct thread *td)
char buf[128];
char *cp;
NET_ASSERT_GIANT();
#if defined(BOOTP_NFSROOT) && defined(BOOTP)
bootpc_init(); /* use bootp to get nfs_diskless filled in */
#elif defined(NFS_ROOT)

View File

@ -413,12 +413,6 @@ nfsrv_rcv(struct socket *so, void *arg, int waitflag)
struct uio auio;
int flags, error;
/*
* XXXRW: For now, assert Giant here since the NFS server upcall
* will perform socket operations requiring Giant in a non-mpsafe
* kernel.
*/
NET_ASSERT_GIANT();
NFSD_UNLOCK_ASSERT();
/* XXXRW: Unlocked read. */
@ -761,7 +755,6 @@ nfsrv_send(struct socket *so, struct sockaddr *nam, struct mbuf *top)
struct sockaddr *sendnam;
int error, soflags, flags;
NET_ASSERT_GIANT();
NFSD_UNLOCK_ASSERT();
soflags = so->so_proto->pr_flags;

View File

@ -526,7 +526,6 @@ nfsrv_modevent(module_t mod, int type, void *data)
static int registered;
int error = 0;
NET_LOCK_GIANT();
switch (type) {
case MOD_LOAD:
mtx_init(&nfsd_mtx, "nfsd_mtx", NULL, MTX_DEF);
@ -577,7 +576,6 @@ nfsrv_modevent(module_t mod, int type, void *data)
error = EOPNOTSUPP;
break;
}
NET_UNLOCK_GIANT();
return error;
}
static moduledata_t nfsserver_mod = {

View File

@ -134,7 +134,6 @@ nfssvc(struct thread *td, struct nfssvc_args *uap)
error = priv_check(td, PRIV_NFS_DAEMON);
if (error)
return (error);
NET_LOCK_GIANT();
NFSD_LOCK();
while (nfssvc_sockhead_flag & SLP_INIT) {
nfssvc_sockhead_flag |= SLP_WANTINIT;
@ -145,12 +144,12 @@ nfssvc(struct thread *td, struct nfssvc_args *uap)
if (uap->flag & NFSSVC_ADDSOCK) {
error = copyin(uap->argp, (caddr_t)&nfsdarg, sizeof(nfsdarg));
if (error)
goto done2;
return (error);
if ((error = fget(td, nfsdarg.sock, &fp)) != 0)
goto done2;
return (error);
if (fp->f_type != DTYPE_SOCKET) {
fdrop(fp, td);
goto done2;
return (error); /* XXXRW: Should be EINVAL? */
}
/*
* Get the client address for connected sockets.
@ -162,7 +161,7 @@ nfssvc(struct thread *td, struct nfssvc_args *uap)
nfsdarg.namelen);
if (error) {
fdrop(fp, td);
goto done2;
return (error);
}
}
error = nfssvc_addsock(fp, nam, td);
@ -174,8 +173,6 @@ nfssvc(struct thread *td, struct nfssvc_args *uap)
}
if (error == EINTR || error == ERESTART)
error = 0;
done2:
NET_UNLOCK_GIANT();
return (error);
}
@ -190,8 +187,6 @@ nfssvc_addsock(struct file *fp, struct sockaddr *mynam, struct thread *td)
struct socket *so;
int error, s;
NET_ASSERT_GIANT();
so = fp->f_data;
#if 0
/*
@ -305,8 +300,6 @@ nfssvc_nfsd(struct thread *td)
int procrastinate;
u_quad_t cur_usec;
NET_ASSERT_GIANT();
#ifndef nolint
cacherep = RC_DOIT;
writes_todo = 0;
@ -590,7 +583,6 @@ nfsrv_zapsock(struct nfssvc_sock *slp)
struct nfsrv_rec *rec;
int s;
NET_ASSERT_GIANT();
NFSD_LOCK_ASSERT();
/*
@ -703,7 +695,6 @@ nfsrv_init(int terminating)
{
struct nfssvc_sock *slp, *nslp;
NET_ASSERT_GIANT();
NFSD_LOCK_ASSERT();
if (nfssvc_sockhead_flag & SLP_INIT)

View File

@ -2102,12 +2102,10 @@ xl_rxeof_task(void *arg, int pending)
{
struct xl_softc *sc = (struct xl_softc *)arg;
NET_LOCK_GIANT();
XL_LOCK(sc);
if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
xl_rxeof(sc);
XL_UNLOCK(sc);
NET_UNLOCK_GIANT();
}
/*

View File

@ -365,11 +365,8 @@ rpcclnt_connect(rpc, td)
saddr = rpc->rc_name;
NET_LOCK_GIANT();
error = socreate(saddr->sa_family, &rpc->rc_so, rpc->rc_sotype,
rpc->rc_soproto, td->td_ucred, td);
NET_UNLOCK_GIANT();
if (error) {
RPCDEBUG("error %d in socreate()", error);
RPC_RETURN(error);
@ -627,10 +624,8 @@ rpcclnt_disconnect(rpc)
if (rpc->rc_so) {
so = rpc->rc_so;
rpc->rc_so = NULL;
NET_LOCK_GIANT();
soshutdown(so, 2);
soclose(so);
NET_UNLOCK_GIANT();
}
}
@ -708,10 +703,7 @@ rpcclnt_send(so, nam, top, rep)
* to be conditionally acquired earlier for the stack so has to avoid
* lock order reversals with any locks held over rpcclnt_send().
*/
NET_LOCK_GIANT();
error = sosend(so, sendnam, NULL, top, NULL, flags, td);
NET_UNLOCK_GIANT();
if (error) {
if (rep) {
log(LOG_INFO, "rpc send error %d for service %s\n", error,
@ -838,9 +830,7 @@ rpcclnt_receive(rep, aname, mp, td)
#endif
do {
rcvflg = MSG_WAITALL;
NET_LOCK_GIANT();
error = soreceive(so, NULL, &auio, NULL, NULL, &rcvflg);
NET_UNLOCK_GIANT();
if (error == EWOULDBLOCK && rep) {
if (rep->r_flags & R_SOFTTERM)
RPC_RETURN(EINTR);
@ -873,9 +863,7 @@ rpcclnt_receive(rep, aname, mp, td)
auio.uio_resid = len;
do {
rcvflg = MSG_WAITALL;
NET_LOCK_GIANT();
error = soreceive(so, NULL, &auio, mp, NULL, &rcvflg);
NET_UNLOCK_GIANT();
} while (error == EWOULDBLOCK || error == EINTR ||
error == ERESTART);
if (!error && auio.uio_resid > 0) {
@ -901,9 +889,7 @@ rpcclnt_receive(rep, aname, mp, td)
#endif
do {
rcvflg = 0;
NET_LOCK_GIANT();
error = soreceive(so, NULL, &auio, mp, &control, &rcvflg);
NET_UNLOCK_GIANT();
if (control)
m_freem(control);
if (error == EWOULDBLOCK && rep) {
@ -949,9 +935,7 @@ rpcclnt_receive(rep, aname, mp, td)
do {
rcvflg = 0;
NET_LOCK_GIANT();
error = soreceive(so, getnam, &auio, mp, NULL, &rcvflg);
NET_UNLOCK_GIANT();
RPCDEBUG("soreceive returns %d", error);
if (error == EWOULDBLOCK && (rep->r_flags & R_SOFTTERM)) {
RPCDEBUG("wouldblock && softerm -> EINTR");

View File

@ -278,11 +278,9 @@ __mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap)
case DTYPE_SOCKET:
so = fp->f_data;
intlabel = mac_socket_label_alloc(M_WAITOK);
NET_LOCK_GIANT();
SOCK_LOCK(so);
mac_copy_socket_label(so->so_label, intlabel);
SOCK_UNLOCK(so);
NET_UNLOCK_GIANT();
error = mac_externalize_socket_label(intlabel, elements,
buffer, mac.m_buflen);
mac_socket_label_free(intlabel);
@ -476,10 +474,8 @@ __mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap)
error = mac_internalize_socket_label(intlabel, buffer);
if (error == 0) {
so = fp->f_data;
NET_LOCK_GIANT();
error = mac_socket_label_set(td->td_ucred, so,
intlabel);
NET_UNLOCK_GIANT();
}
mac_socket_label_free(intlabel);
break;

View File

@ -396,19 +396,6 @@ do { \
return (_val); \
} while (0)
/*
* With the advent of fine-grained locking, the Giant lock is no longer
* required around the network stack. These macros exist for historical
* reasons, allowing conditional acquisition of Giant based on a debugging
* setting, and will be removed.
*/
#define NET_LOCK_GIANT() do { \
} while (0)
#define NET_UNLOCK_GIANT() do { \
} while (0)
#define NET_ASSERT_GIANT() do { \
} while (0)
struct mtx_args {
struct mtx *ma_mtx;
const char *ma_desc;