Allocate umtx_q from heap instead of stack, this avoids

page fault panic in kernel under heavy swapping.
This commit is contained in:
David Xu 2005-03-05 09:15:03 +00:00
parent 9e18aa07bb
commit bc8e6d817d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=143149
4 changed files with 54 additions and 35 deletions

View File

@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sleepqueue.h> #include <sys/sleepqueue.h>
#include <sys/turnstile.h> #include <sys/turnstile.h>
#include <sys/ktr.h> #include <sys/ktr.h>
#include <sys/umtx.h>
#include <vm/vm.h> #include <vm/vm.h>
#include <vm/vm_extern.h> #include <vm/vm_extern.h>
@ -240,6 +241,7 @@ thread_init(void *mem, int size, int flags)
cpu_thread_setup(td); cpu_thread_setup(td);
td->td_sleepqueue = sleepq_alloc(); td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc(); td->td_turnstile = turnstile_alloc();
td->td_umtxq = umtxq_alloc();
td->td_sched = (struct td_sched *)&td[1]; td->td_sched = (struct td_sched *)&td[1];
sched_newthread(td); sched_newthread(td);
return (0); return (0);
@ -259,6 +261,7 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem; td = (struct thread *)mem;
turnstile_free(td->td_turnstile); turnstile_free(td->td_turnstile);
sleepq_free(td->td_sleepqueue); sleepq_free(td->td_sleepqueue);
umtxq_free(td->td_umtxq);
vm_thread_dispose(td); vm_thread_dispose(td);
STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) { STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) {

View File

@ -119,6 +119,18 @@ static void umtx_key_release(struct umtx_key *key);
SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL); SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL);
struct umtx_q *
umtxq_alloc(void)
{
return (malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK));
}
void
umtxq_free(struct umtx_q *uq)
{
free(uq, M_UMTX);
}
static void static void
umtxq_init_chains(void *arg __unused) umtxq_init_chains(void *arg __unused)
{ {
@ -210,7 +222,6 @@ umtxq_insert(struct umtx_q *uq)
mtx_assert(umtxq_mtx(chain), MA_OWNED); mtx_assert(umtxq_mtx(chain), MA_OWNED);
head = &umtxq_chains[chain].uc_queue; head = &umtxq_chains[chain].uc_queue;
LIST_INSERT_HEAD(head, uq, uq_next); LIST_INSERT_HEAD(head, uq, uq_next);
uq->uq_thread->td_umtxq = uq;
mtx_lock_spin(&sched_lock); mtx_lock_spin(&sched_lock);
uq->uq_thread->td_flags |= TDF_UMTXQ; uq->uq_thread->td_flags |= TDF_UMTXQ;
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&sched_lock);
@ -225,7 +236,6 @@ umtxq_remove(struct umtx_q *uq)
mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED); mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED);
if (uq->uq_thread->td_flags & TDF_UMTXQ) { if (uq->uq_thread->td_flags & TDF_UMTXQ) {
LIST_REMOVE(uq, uq_next); LIST_REMOVE(uq, uq_next);
uq->uq_thread->td_umtxq = NULL;
/* turning off TDF_UMTXQ should be the last thing. */ /* turning off TDF_UMTXQ should be the last thing. */
mtx_lock_spin(&sched_lock); mtx_lock_spin(&sched_lock);
uq->uq_thread->td_flags &= ~TDF_UMTXQ; uq->uq_thread->td_flags &= ~TDF_UMTXQ;
@ -434,11 +444,12 @@ fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags)
static int static int
_do_lock(struct thread *td, struct umtx *umtx, long id, int timo) _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
{ {
struct umtx_q uq; struct umtx_q *uq;
intptr_t owner; intptr_t owner;
intptr_t old; intptr_t old;
int error = 0; int error = 0;
uq = td->td_umtxq;
/* /*
* Care must be exercised when dealing with umtx structure. It * Care must be exercised when dealing with umtx structure. It
* can fault on any access. * can fault on any access.
@ -479,7 +490,7 @@ _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
* If we caught a signal, we have retried and now * If we caught a signal, we have retried and now
* exit immediately. * exit immediately.
*/ */
if (error || (error = umtxq_queue_me(td, umtx, &uq)) != 0) if (error || (error = umtxq_queue_me(td, umtx, uq)) != 0)
return (error); return (error);
/* /*
@ -493,12 +504,12 @@ _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
/* The address was invalid. */ /* The address was invalid. */
if (old == -1) { if (old == -1) {
umtxq_lock(&uq.uq_key); umtxq_lock(&uq->uq_key);
umtxq_busy(&uq.uq_key); umtxq_busy(&uq->uq_key);
umtxq_remove(&uq); umtxq_remove(uq);
umtxq_unbusy(&uq.uq_key); umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
umtx_key_release(&uq.uq_key); umtx_key_release(&uq->uq_key);
return (EFAULT); return (EFAULT);
} }
@ -507,17 +518,17 @@ _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
* and we need to retry or we lost a race to the thread * and we need to retry or we lost a race to the thread
* unlocking the umtx. * unlocking the umtx.
*/ */
umtxq_lock(&uq.uq_key); umtxq_lock(&uq->uq_key);
if (old == owner && (td->td_flags & TDF_UMTXQ)) { if (old == owner && (td->td_flags & TDF_UMTXQ)) {
error = umtxq_sleep(td, &uq.uq_key, error = umtxq_sleep(td, &uq->uq_key,
td->td_priority | PCATCH, td->td_priority | PCATCH,
"umtx", timo); "umtx", timo);
} }
umtxq_busy(&uq.uq_key); umtxq_busy(&uq->uq_key);
umtxq_remove(&uq); umtxq_remove(uq);
umtxq_unbusy(&uq.uq_key); umtxq_unbusy(&uq->uq_key);
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
umtx_key_release(&uq.uq_key); umtx_key_release(&uq->uq_key);
} }
return (0); return (0);
@ -614,45 +625,46 @@ do_unlock(struct thread *td, struct umtx *umtx, long id)
static int static int
do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout) do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
{ {
struct umtx_q uq; struct umtx_q *uq;
struct timespec ts, ts2, ts3; struct timespec ts, ts2, ts3;
struct timeval tv; struct timeval tv;
long tmp; long tmp;
int error = 0; int error = 0;
if ((error = umtxq_queue_me(td, umtx, &uq)) != 0) uq = td->td_umtxq;
if ((error = umtxq_queue_me(td, umtx, uq)) != 0)
return (error); return (error);
tmp = fuword(&umtx->u_owner); tmp = fuword(&umtx->u_owner);
if (tmp != id) { if (tmp != id) {
umtxq_lock(&uq.uq_key); umtxq_lock(&uq->uq_key);
umtxq_remove(&uq); umtxq_remove(uq);
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
} else if (timeout == NULL) { } else if (timeout == NULL) {
umtxq_lock(&uq.uq_key); umtxq_lock(&uq->uq_key);
if (td->td_flags & TDF_UMTXQ) if (td->td_flags & TDF_UMTXQ)
error = umtxq_sleep(td, &uq.uq_key, error = umtxq_sleep(td, &uq->uq_key,
td->td_priority | PCATCH, "ucond", 0); td->td_priority | PCATCH, "ucond", 0);
if (!(td->td_flags & TDF_UMTXQ)) if (!(td->td_flags & TDF_UMTXQ))
error = 0; error = 0;
else else
umtxq_remove(&uq); umtxq_remove(uq);
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
} else { } else {
getnanouptime(&ts); getnanouptime(&ts);
timespecadd(&ts, timeout); timespecadd(&ts, timeout);
TIMESPEC_TO_TIMEVAL(&tv, timeout); TIMESPEC_TO_TIMEVAL(&tv, timeout);
for (;;) { for (;;) {
umtxq_lock(&uq.uq_key); umtxq_lock(&uq->uq_key);
if (td->td_flags & TDF_UMTXQ) { if (td->td_flags & TDF_UMTXQ) {
error = umtxq_sleep(td, &uq.uq_key, error = umtxq_sleep(td, &uq->uq_key,
td->td_priority | PCATCH, td->td_priority | PCATCH,
"ucond", tvtohz(&tv)); "ucond", tvtohz(&tv));
} }
if (!(td->td_flags & TDF_UMTXQ)) { if (!(td->td_flags & TDF_UMTXQ)) {
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
goto out; goto out;
} }
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
if (error != ETIMEDOUT) if (error != ETIMEDOUT)
break; break;
getnanouptime(&ts2); getnanouptime(&ts2);
@ -664,12 +676,12 @@ do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
timespecsub(&ts3, &ts2); timespecsub(&ts3, &ts2);
TIMESPEC_TO_TIMEVAL(&tv, &ts3); TIMESPEC_TO_TIMEVAL(&tv, &ts3);
} }
umtxq_lock(&uq.uq_key); umtxq_lock(&uq->uq_key);
umtxq_remove(&uq); umtxq_remove(uq);
umtxq_unlock(&uq.uq_key); umtxq_unlock(&uq->uq_key);
} }
out: out:
umtx_key_release(&uq.uq_key); umtx_key_release(&uq->uq_key);
if (error == ERESTART) if (error == ERESTART)
error = EINTR; error = EINTR;
return (error); return (error);

View File

@ -250,6 +250,7 @@ struct thread {
TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */ TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
lwpid_t td_tid; /* (b) Thread ID. */ lwpid_t td_tid; /* (b) Thread ID. */
/* Cleared during fork1() or thread_schedule_upcall(). */ /* Cleared during fork1() or thread_schedule_upcall(). */
@ -281,7 +282,6 @@ struct thread {
sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */
sigset_t td_sigmask; /* (c) Current signal mask. */ sigset_t td_sigmask; /* (c) Current signal mask. */
sigset_t td_siglist; /* (c) Sigs arrived, not delivered. */ sigset_t td_siglist; /* (c) Sigs arrived, not delivered. */
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
volatile u_int td_generation; /* (k) For detection of preemption */ volatile u_int td_generation; /* (k) For detection of preemption */
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
int td_kflags; /* (c) Flags for KSE threading. */ int td_kflags; /* (c) Flags for KSE threading. */

View File

@ -131,6 +131,10 @@ umtx_wake(struct umtx *umtx, int nr_wakeup)
return (errno); return (errno);
return (0); return (0);
} }
#else
struct umtx_q *umtxq_alloc(void);
void umtxq_free(struct umtx_q *);
#endif /* !_KERNEL */ #endif /* !_KERNEL */
#endif /* !_SYS_UMTX_H_ */ #endif /* !_SYS_UMTX_H_ */