Remove several linear list traversals per request from RPC server code.

Do not insert active ports into pool->sp_active list if they are success-
fully assigned to some thread.  This makes that list include only ports that
really require attention, and so traversal can be reduced to simple taking
the first one.

  Remove idle thread from pool->sp_idlethreads list when assigning some
work (port of requests) to it.  That again makes possible to replace list
traversals with simple taking the first element.
This commit is contained in:
Alexander Motin 2013-12-20 17:39:07 +00:00
parent c63e7b9304
commit ba981145d6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=259659
3 changed files with 47 additions and 57 deletions

View File

@ -289,19 +289,6 @@ fha_hash_entry_add_op(struct fha_hash_entry *fhe, int locktype, int count)
fhe->num_rw += count;
}
static SVCTHREAD *
get_idle_thread(SVCPOOL *pool)
{
SVCTHREAD *st;
LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) {
if (st->st_xprt == NULL && STAILQ_EMPTY(&st->st_reqs))
return (st);
}
return (NULL);
}
/*
* Get the service thread currently associated with the fhe that is
* appropriate to handle this operation.
@ -386,7 +373,7 @@ fha_hash_entry_choose_thread(struct fha_params *softc,
ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
"fha: %p(%d)t", thread, thread->st_reqcount);
#endif
} else if ((thread = get_idle_thread(pool))) {
} else if ((thread = LIST_FIRST(&pool->sp_idlethreads))) {
#if 0
ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
"fha: %p(%d)i", thread, thread->st_reqcount);
@ -418,7 +405,6 @@ SVCTHREAD *
fha_assign(SVCTHREAD *this_thread, struct svc_req *req,
struct fha_params *softc)
{
SVCPOOL *pool;
SVCTHREAD *thread;
struct fha_info i;
struct fha_hash_entry *fhe;
@ -439,7 +425,6 @@ fha_assign(SVCTHREAD *this_thread, struct svc_req *req,
if (req->rq_vers != 2 && req->rq_vers != 3)
return (this_thread);
pool = req->rq_xprt->xp_pool;
fha_extract_info(req, &i, cb);
/*

View File

@ -293,12 +293,10 @@ xprt_unregister_locked(SVCXPRT *xprt)
{
SVCPOOL *pool = xprt->xp_pool;
mtx_assert(&xprt->xp_mlock, MA_OWNED);
KASSERT(xprt->xp_registered == TRUE,
("xprt_unregister_locked: not registered"));
if (xprt->xp_active) {
TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
xprt->xp_active = FALSE;
}
xprt_inactive_locked(xprt);
TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link);
xprt->xp_registered = FALSE;
}
@ -320,25 +318,25 @@ xprt_unregister(SVCXPRT *xprt)
SVC_RELEASE(xprt);
}
static void
/*
* Attempt to assign a service thread to this transport.
*/
static int
xprt_assignthread(SVCXPRT *xprt)
{
SVCPOOL *pool = xprt->xp_pool;
SVCTHREAD *st;
/*
* Attempt to assign a service thread to this
* transport.
*/
LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) {
if (st->st_xprt == NULL && STAILQ_EMPTY(&st->st_reqs))
break;
}
mtx_assert(&xprt->xp_mlock, MA_OWNED);
st = LIST_FIRST(&pool->sp_idlethreads);
if (st) {
LIST_REMOVE(st, st_ilink);
st->st_idle = FALSE;
SVC_ACQUIRE(xprt);
xprt->xp_thread = st;
st->st_xprt = xprt;
cv_signal(&st->st_cond);
return (TRUE);
} else {
/*
* See if we can create a new thread. The
@ -354,6 +352,7 @@ xprt_assignthread(SVCXPRT *xprt)
pool->sp_state = SVCPOOL_THREADWANTED;
}
}
return (FALSE);
}
void
@ -372,9 +371,12 @@ xprt_active(SVCXPRT *xprt)
}
if (!xprt->xp_active) {
TAILQ_INSERT_TAIL(&pool->sp_active, xprt, xp_alink);
xprt->xp_active = TRUE;
xprt_assignthread(xprt);
if (xprt->xp_thread == NULL) {
if (!xprt_assignthread(xprt))
TAILQ_INSERT_TAIL(&pool->sp_active, xprt,
xp_alink);
}
}
mtx_unlock(&pool->sp_lock);
@ -385,8 +387,10 @@ xprt_inactive_locked(SVCXPRT *xprt)
{
SVCPOOL *pool = xprt->xp_pool;
mtx_assert(&xprt->xp_mlock, MA_OWNED);
if (xprt->xp_active) {
TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
if (xprt->xp_thread == NULL)
TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
xprt->xp_active = FALSE;
}
}
@ -948,10 +952,11 @@ svc_assign_waiting_sockets(SVCPOOL *pool)
{
SVCXPRT *xprt;
TAILQ_FOREACH(xprt, &pool->sp_active, xp_alink) {
if (!xprt->xp_thread) {
xprt_assignthread(xprt);
}
while ((xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
if (xprt_assignthread(xprt))
TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
else
break;
}
}
@ -1042,21 +1047,17 @@ svc_run_internal(SVCPOOL *pool, bool_t ismaster)
* active transport which isn't being serviced
* by a thread.
*/
if (svc_request_space_available(pool)) {
TAILQ_FOREACH(xprt, &pool->sp_active,
xp_alink) {
if (!xprt->xp_thread) {
SVC_ACQUIRE(xprt);
xprt->xp_thread = st;
st->st_xprt = xprt;
break;
}
}
}
if (st->st_xprt)
if (svc_request_space_available(pool) &&
(xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
SVC_ACQUIRE(xprt);
xprt->xp_thread = st;
st->st_xprt = xprt;
continue;
}
LIST_INSERT_HEAD(&pool->sp_idlethreads, st, st_ilink);
st->st_idle = TRUE;
if (ismaster || (!ismaster &&
pool->sp_threadcount > pool->sp_minthreads))
error = cv_timedwait_sig(&st->st_cond,
@ -1064,7 +1065,10 @@ svc_run_internal(SVCPOOL *pool, bool_t ismaster)
else
error = cv_wait_sig(&st->st_cond,
&pool->sp_lock);
LIST_REMOVE(st, st_ilink);
if (st->st_idle) {
LIST_REMOVE(st, st_ilink);
st->st_idle = FALSE;
}
/*
* Reduce worker thread count when idle.
@ -1132,11 +1136,12 @@ svc_run_internal(SVCPOOL *pool, bool_t ismaster)
* execute the request
* immediately.
*/
if (stpref != st) {
cv_signal(&stpref->st_cond);
continue;
} else {
if (stpref == st)
break;
if (stpref->st_idle) {
LIST_REMOVE(stpref, st_ilink);
stpref->st_idle = FALSE;
cv_signal(&stpref->st_cond);
}
}
} while (stat == XPRT_MOREREQS
@ -1153,10 +1158,9 @@ svc_run_internal(SVCPOOL *pool, bool_t ismaster)
xprt->xp_thread = NULL;
st->st_xprt = NULL;
if (xprt->xp_active) {
xprt_assignthread(xprt);
TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
TAILQ_INSERT_TAIL(&pool->sp_active, xprt,
xp_alink);
if (!xprt_assignthread(xprt))
TAILQ_INSERT_TAIL(&pool->sp_active,
xprt, xp_alink);
}
mtx_unlock(&pool->sp_lock);
SVC_RELEASE(xprt);

View File

@ -278,6 +278,7 @@ typedef struct __rpc_svcthread {
SVCXPRT *st_xprt; /* transport we are processing */
struct svc_reqlist st_reqs; /* RPC requests to execute */
int st_reqcount; /* number of queued reqs */
int st_idle; /* thread is on idle list */
struct cv st_cond; /* sleeping for work */
LIST_ENTRY(__rpc_svcthread) st_link; /* all threads list */
LIST_ENTRY(__rpc_svcthread) st_ilink; /* idle threads list */