Optimize umtx_lock_pi() a bit by moving some heavy code out of the loop,

make a fast path when a umtx_pi can be allocated without being blocked.
This commit is contained in:
David Xu 2006-10-26 09:33:34 +00:00
parent 7e75ef1374
commit 4c9b02c253
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=163697

View File

@ -206,7 +206,7 @@ static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
static int umtx_key_get(void *addr, int type, int share, static int umtx_key_get(void *addr, int type, int share,
struct umtx_key *key); struct umtx_key *key);
static void umtx_key_release(struct umtx_key *key); static void umtx_key_release(struct umtx_key *key);
static struct umtx_pi *umtx_pi_alloc(void); static struct umtx_pi *umtx_pi_alloc(int);
static void umtx_pi_free(struct umtx_pi *pi); static void umtx_pi_free(struct umtx_pi *pi);
static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags); static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
static void umtx_thread_cleanup(struct thread *td); static void umtx_thread_cleanup(struct thread *td);
@ -1172,11 +1172,11 @@ do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
} }
static inline struct umtx_pi * static inline struct umtx_pi *
umtx_pi_alloc(void) umtx_pi_alloc(int flags)
{ {
struct umtx_pi *pi; struct umtx_pi *pi;
pi = uma_zalloc(umtx_pi_zone, M_ZERO | M_WAITOK); pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
TAILQ_INIT(&pi->pi_blocked); TAILQ_INIT(&pi->pi_blocked);
atomic_add_int(&umtx_pi_allocated, 1); atomic_add_int(&umtx_pi_allocated, 1);
return (pi); return (pi);
@ -1571,32 +1571,35 @@ _do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo,
if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
&uq->uq_key)) != 0) &uq->uq_key)) != 0)
return (error); return (error);
for (;;) { umtxq_lock(&uq->uq_key);
pi = NULL; pi = umtx_pi_lookup(&uq->uq_key);
umtxq_lock(&uq->uq_key); if (pi == NULL) {
pi = umtx_pi_lookup(&uq->uq_key); new_pi = umtx_pi_alloc(M_NOWAIT);
if (pi == NULL) { if (new_pi == NULL) {
umtxq_unlock(&uq->uq_key); umtxq_unlock(&uq->uq_key);
new_pi = umtx_pi_alloc(); new_pi = umtx_pi_alloc(M_WAITOK);
new_pi->pi_key = uq->uq_key; new_pi->pi_key = uq->uq_key;
umtxq_lock(&uq->uq_key); umtxq_lock(&uq->uq_key);
pi = umtx_pi_lookup(&uq->uq_key); pi = umtx_pi_lookup(&uq->uq_key);
if (pi != NULL) if (pi != NULL) {
umtx_pi_free(new_pi); umtx_pi_free(new_pi);
else { new_pi = NULL;
umtx_pi_insert(new_pi);
pi = new_pi;
} }
} }
if (new_pi != NULL) {
new_pi->pi_key = uq->uq_key;
umtx_pi_insert(new_pi);
pi = new_pi;
}
}
umtx_pi_ref(pi);
umtxq_unlock(&uq->uq_key);
umtx_pi_ref(pi); /*
umtxq_unlock(&uq->uq_key); * Care must be exercised when dealing with umtx structure. It
* can fault on any access.
/* */
* Care must be exercised when dealing with umtx structure. It for (;;) {
* can fault on any access.
*/
/* /*
* Try the uncontested case. This should be done in userland. * Try the uncontested case. This should be done in userland.
*/ */
@ -1633,10 +1636,6 @@ _do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo,
} }
/* If this failed the lock has changed, restart. */ /* If this failed the lock has changed, restart. */
umtxq_lock(&uq->uq_key);
umtx_pi_unref(pi);
umtxq_unlock(&uq->uq_key);
pi = NULL;
continue; continue;
} }
@ -1689,16 +1688,12 @@ _do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo,
if (old == owner) if (old == owner)
error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED, error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
"umtxpi", timo); "umtxpi", timo);
umtx_pi_unref(pi);
umtxq_unlock(&uq->uq_key); umtxq_unlock(&uq->uq_key);
pi = NULL;
} }
if (pi != NULL) { umtxq_lock(&uq->uq_key);
umtxq_lock(&uq->uq_key); umtx_pi_unref(pi);
umtx_pi_unref(pi); umtxq_unlock(&uq->uq_key);
umtxq_unlock(&uq->uq_key);
}
umtx_key_release(&uq->uq_key); umtx_key_release(&uq->uq_key);
return (error); return (error);