Introduce xprt_inactive_self() -- variant for use when sure that port

is assigned to thread.  For example, withing receive handlers.  In that
case the function reduces to single assignment and can avoid locking.
This commit is contained in:
Alexander Motin 2013-12-29 11:19:09 +00:00
parent a08c151546
commit 5c42b9dc1f
4 changed files with 25 additions and 11 deletions

View File

@ -405,6 +405,19 @@ xprt_inactive(SVCXPRT *xprt)
mtx_unlock(&pool->sp_lock);
}
/*
* Variant of xprt_inactive() for use only when sure that port is
* assigned to thread. For example, withing receive handlers.
*/
void
xprt_inactive_self(SVCXPRT *xprt)
{
KASSERT(xprt->xp_thread != NULL,
("xprt_inactive_self(%p) with NULL xp_thread", xprt));
xprt->xp_active = FALSE;
}
/*
* Add a service program to the callout list.
* The dispatch routine will be called when a rpc request for this

View File

@ -523,6 +523,7 @@ __BEGIN_DECLS
extern void xprt_active(SVCXPRT *);
extern void xprt_inactive(SVCXPRT *);
extern void xprt_inactive_locked(SVCXPRT *);
extern void xprt_inactive_self(SVCXPRT *);
__END_DECLS
#endif

View File

@ -196,10 +196,10 @@ svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg,
* from racing the upcall after our soreadable() call
* returns false.
*/
mtx_lock(&xprt->xp_pool->sp_lock);
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
if (!soreadable(xprt->xp_socket))
xprt_inactive_locked(xprt);
mtx_unlock(&xprt->xp_pool->sp_lock);
xprt_inactive_self(xprt);
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
}
@ -208,7 +208,7 @@ svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg,
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
soupcall_clear(xprt->xp_socket, SO_RCV);
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
xprt_inactive(xprt);
xprt_inactive_self(xprt);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
}

View File

@ -385,7 +385,7 @@ svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
*/
ACCEPT_LOCK();
if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
xprt_inactive(xprt);
xprt_inactive_self(xprt);
ACCEPT_UNLOCK();
sx_xunlock(&xprt->xp_lock);
return (FALSE);
@ -398,7 +398,7 @@ svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
soupcall_clear(xprt->xp_socket, SO_RCV);
}
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
xprt_inactive(xprt);
xprt_inactive_self(xprt);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
}
@ -667,7 +667,7 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
if (cd->mreq == NULL || cd->resid != 0) {
SOCKBUF_LOCK(&so->so_rcv);
if (!soreadable(so))
xprt_inactive(xprt);
xprt_inactive_self(xprt);
SOCKBUF_UNLOCK(&so->so_rcv);
}
@ -709,7 +709,7 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
*/
SOCKBUF_LOCK(&so->so_rcv);
if (!soreadable(so))
xprt_inactive(xprt);
xprt_inactive_self(xprt);
SOCKBUF_UNLOCK(&so->so_rcv);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
@ -722,7 +722,7 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
soupcall_clear(so, SO_RCV);
}
SOCKBUF_UNLOCK(&so->so_rcv);
xprt_inactive(xprt);
xprt_inactive_self(xprt);
cd->strm_stat = XPRT_DIED;
sx_xunlock(&xprt->xp_lock);
return (FALSE);
@ -732,7 +732,7 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
/*
* EOF - the other end has closed the socket.
*/
xprt_inactive(xprt);
xprt_inactive_self(xprt);
cd->strm_stat = XPRT_DIED;
sx_xunlock(&xprt->xp_lock);
return (FALSE);
@ -763,7 +763,7 @@ svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg,
mtx_lock(&ct->ct_lock);
m = cd->mreq;
if (m == NULL) {
xprt_inactive(xprt);
xprt_inactive_self(xprt);
mtx_unlock(&ct->ct_lock);
sx_xunlock(&xprt->xp_lock);
return (FALSE);