Change msleep() and tsleep() to not alter the calling thread's priority
if the specified priority is zero. This avoids a race where the calling thread could read a snapshot of it's current priority, then a different thread could change the first thread's priority, then the original thread would call sched_prio() inside msleep() undoing the change made by the second thread. I used a priority of zero as no thread that calls msleep() or tsleep() should be specifying a priority of zero anyway. The various places that passed 'curthread->td_priority' or some variant as the priority now pass 0.
This commit is contained in:
parent
00d02f943b
commit
0f180a7cce
@ -646,15 +646,8 @@ pmc_select_cpu(int cpu)
|
||||
static void
|
||||
pmc_force_context_switch(void)
|
||||
{
|
||||
u_char curpri;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
curpri = curthread->td_priority;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
(void) tsleep((void *) pmc_force_context_switch, curpri,
|
||||
"pmcctx", 1);
|
||||
|
||||
(void) tsleep((void *) pmc_force_context_switch, 0, "pmcctx", 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -211,8 +211,7 @@ random_yarrow_deinit(void)
|
||||
* Command the hash/reseed thread to end and wait for it to finish
|
||||
*/
|
||||
random_kthread_control = -1;
|
||||
tsleep((void *)&random_kthread_control, curthread->td_priority, "term",
|
||||
0);
|
||||
tsleep((void *)&random_kthread_control, 0, "term", 0);
|
||||
|
||||
/* Destroy the harvest fifos */
|
||||
while (!STAILQ_EMPTY(&emptyfifo.head)) {
|
||||
@ -285,8 +284,7 @@ random_kthread(void *arg __unused)
|
||||
|
||||
/* Found nothing, so don't belabour the issue */
|
||||
if (!active)
|
||||
tsleep(&harvestfifo, curthread->td_priority, "-",
|
||||
hz / 10);
|
||||
tsleep(&harvestfifo, 0, "-", hz / 10);
|
||||
|
||||
}
|
||||
|
||||
|
@ -373,8 +373,7 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
|
||||
/* Create a thread if we need one. */
|
||||
while (ie->ie_thread == NULL && !(flags & INTR_FAST)) {
|
||||
if (ie->ie_flags & IE_ADDING_THREAD)
|
||||
msleep(ie, &ie->ie_lock, curthread->td_priority,
|
||||
"ithread", 0);
|
||||
msleep(ie, &ie->ie_lock, 0, "ithread", 0);
|
||||
else {
|
||||
ie->ie_flags |= IE_ADDING_THREAD;
|
||||
mtx_unlock(&ie->ie_lock);
|
||||
@ -460,8 +459,7 @@ intr_event_remove_handler(void *cookie)
|
||||
TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
while (handler->ih_flags & IH_DEAD)
|
||||
msleep(handler, &ie->ie_lock, curthread->td_priority, "iev_rmh",
|
||||
0);
|
||||
msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
|
||||
intr_event_update(ie);
|
||||
#ifdef notyet
|
||||
/*
|
||||
@ -685,7 +683,7 @@ ithread_execute_handlers(struct proc *p, struct intr_event *ie)
|
||||
ie->ie_name);
|
||||
ie->ie_warned = 1;
|
||||
}
|
||||
tsleep(&ie->ie_count, curthread->td_priority, "istorm", 1);
|
||||
tsleep(&ie->ie_count, 0, "istorm", 1);
|
||||
} else
|
||||
ie->ie_count++;
|
||||
|
||||
|
@ -577,13 +577,11 @@ poll_idle(void)
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
struct rtprio rtp;
|
||||
int pri;
|
||||
|
||||
rtp.prio = RTP_PRIO_MAX; /* lowest priority */
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rtp_to_pri(&rtp, td->td_ksegrp);
|
||||
pri = td->td_priority;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
for (;;) {
|
||||
@ -595,7 +593,7 @@ poll_idle(void)
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
} else {
|
||||
idlepoll_sleeping = 1;
|
||||
tsleep(&idlepoll_sleeping, pri, "pollid", hz * 3);
|
||||
tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -195,9 +195,11 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
/*
|
||||
* Adjust this thread's priority.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(td, priority & PRIMASK);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if ((priority & PRIMASK) != 0) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(td, priority & PRIMASK);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
if (timo && catch)
|
||||
rval = sleepq_timedwait_sig(ident);
|
||||
|
@ -368,8 +368,8 @@ thr_suspend(struct thread *td, struct thr_suspend_args *uap)
|
||||
}
|
||||
PROC_LOCK(td->td_proc);
|
||||
if ((td->td_flags & TDF_THRWAKEUP) == 0)
|
||||
error = msleep((void *)td, &td->td_proc->p_mtx,
|
||||
td->td_priority | PCATCH, "lthr", hz);
|
||||
error = msleep((void *)td, &td->td_proc->p_mtx, PCATCH, "lthr",
|
||||
hz);
|
||||
if (td->td_flags & TDF_THRWAKEUP) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_flags &= ~TDF_THRWAKEUP;
|
||||
|
@ -168,7 +168,7 @@ umtxq_busy(struct umtx_key *key)
|
||||
while (umtxq_chains[chain].uc_flags & UCF_BUSY) {
|
||||
umtxq_chains[chain].uc_flags |= UCF_WANT;
|
||||
msleep(&umtxq_chains[chain], umtxq_mtx(chain),
|
||||
curthread->td_priority, "umtxq_busy", 0);
|
||||
0, "umtxq_busy", 0);
|
||||
}
|
||||
umtxq_chains[chain].uc_flags |= UCF_BUSY;
|
||||
}
|
||||
@ -424,8 +424,7 @@ _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
|
||||
*/
|
||||
umtxq_lock(&uq->uq_key);
|
||||
if (old == owner && (td->td_flags & TDF_UMTXQ)) {
|
||||
error = umtxq_sleep(td, &uq->uq_key,
|
||||
td->td_priority | PCATCH,
|
||||
error = umtxq_sleep(td, &uq->uq_key, PCATCH,
|
||||
"umtx", timo);
|
||||
}
|
||||
umtxq_busy(&uq->uq_key);
|
||||
@ -547,7 +546,7 @@ do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
|
||||
umtxq_lock(&uq->uq_key);
|
||||
if (td->td_flags & TDF_UMTXQ)
|
||||
error = umtxq_sleep(td, &uq->uq_key,
|
||||
td->td_priority | PCATCH, "ucond", 0);
|
||||
PCATCH, "ucond", 0);
|
||||
if (!(td->td_flags & TDF_UMTXQ))
|
||||
error = 0;
|
||||
else
|
||||
@ -560,8 +559,7 @@ do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
|
||||
for (;;) {
|
||||
umtxq_lock(&uq->uq_key);
|
||||
if (td->td_flags & TDF_UMTXQ) {
|
||||
error = umtxq_sleep(td, &uq->uq_key,
|
||||
td->td_priority | PCATCH,
|
||||
error = umtxq_sleep(td, &uq->uq_key, PCATCH,
|
||||
"ucond", tvtohz(&tv));
|
||||
}
|
||||
if (!(td->td_flags & TDF_UMTXQ)) {
|
||||
|
@ -543,7 +543,7 @@ schedcpu_thread(void)
|
||||
|
||||
for (;;) {
|
||||
schedcpu();
|
||||
tsleep(&nowake, curthread->td_priority, "-", hz);
|
||||
tsleep(&nowake, 0, "-", hz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ taskqueue_thread_loop(void *arg)
|
||||
TQ_LOCK(tq);
|
||||
do {
|
||||
taskqueue_run(tq);
|
||||
TQ_SLEEP(tq, tq, &tq->tq_mutex, curthread->td_priority, "-", 0);
|
||||
TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
|
||||
} while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0);
|
||||
|
||||
/* rendezvous with thread that asked us to terminate */
|
||||
|
@ -1657,7 +1657,7 @@ _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
|
||||
}
|
||||
mq->mq_senders++;
|
||||
error = msleep(&mq->mq_senders, &mq->mq_mutex,
|
||||
curthread->td_priority | PCATCH, "mqsend", timo);
|
||||
PCATCH, "mqsend", timo);
|
||||
mq->mq_senders--;
|
||||
if (error == EAGAIN)
|
||||
error = ETIMEDOUT;
|
||||
@ -1809,7 +1809,7 @@ _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
|
||||
}
|
||||
mq->mq_receivers++;
|
||||
error = msleep(&mq->mq_receivers, &mq->mq_mutex,
|
||||
curthread->td_priority | PCATCH, "mqrecv", timo);
|
||||
PCATCH, "mqrecv", timo);
|
||||
mq->mq_receivers--;
|
||||
if (error == EAGAIN)
|
||||
error = ETIMEDOUT;
|
||||
|
@ -140,9 +140,7 @@ vm_page_zero_idle_wakeup(void)
|
||||
static void
|
||||
vm_pagezero(void __unused *arg)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
idlezero_enable = idlezero_enable_default;
|
||||
|
||||
for (;;) {
|
||||
@ -159,7 +157,7 @@ vm_pagezero(void __unused *arg)
|
||||
vm_page_lock_queues();
|
||||
wakeup_needed = TRUE;
|
||||
msleep(&zero_state, &vm_page_queue_mtx,
|
||||
PDROP | td->td_priority, "pgzero", hz * 300);
|
||||
PDROP, "pgzero", hz * 300);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user