From e7573e7ad73506300ecc8852a6befeac3c4b62fb Mon Sep 17 00:00:00 2001 From: John Baldwin Date: Fri, 9 Mar 2007 22:41:01 +0000 Subject: [PATCH] Allow threads to atomically release rw and sx locks while waiting for an event. Locking primitives that support this (mtx, rw, and sx) now each include their own foo_sleep() routine. - Rename msleep() to _sleep() and change it's 'struct mtx' object to a 'struct lock_object' pointer. _sleep() uses the recently added lc_unlock() and lc_lock() function pointers for the lock class of the specified lock to release the lock while the thread is suspended. - Add wrappers around _sleep() for mutexes (mtx_sleep()), rw locks (rw_sleep()), and sx locks (sx_sleep()). msleep() still exists and is now identical to mtx_sleep(), but it is deprecated. - Rename SLEEPQ_MSLEEP to SLEEPQ_SLEEP. - Rewrite much of sleep.9 to not be msleep(9) centric. - Flesh out the 'RETURN VALUES' section in sleep.9 and add an 'ERRORS' section. - Add __nonnull(1) to _sleep() and msleep_spin() so that the compiler will warn if you try to pass a NULL wait channel. The functions already have a KASSERT to that effect. --- share/man/man9/Makefile | 3 + share/man/man9/condvar.9 | 2 +- share/man/man9/lock.9 | 8 +- share/man/man9/mi_switch.9 | 13 +-- share/man/man9/mtx_pool.9 | 5 +- share/man/man9/mutex.9 | 14 ++- share/man/man9/rwlock.9 | 10 ++ share/man/man9/sleep.9 | 183 +++++++++++++++++++++-------------- share/man/man9/sleepqueue.9 | 10 +- share/man/man9/sx.9 | 12 ++- share/man/man9/thread_exit.9 | 4 +- sys/kern/kern_synch.c | 61 ++++++------ sys/sys/mutex.h | 3 + sys/sys/rwlock.h | 2 + sys/sys/sleepqueue.h | 2 +- sys/sys/sx.h | 2 + sys/sys/systm.h | 13 ++- 17 files changed, 221 insertions(+), 126 deletions(-) diff --git a/share/man/man9/Makefile b/share/man/man9/Makefile index 64793deb57f0..970430f689e1 100644 --- a/share/man/man9/Makefile +++ b/share/man/man9/Makefile @@ -835,6 +835,7 @@ MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ + mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ @@ -925,6 +926,7 @@ MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ + rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_wlock.9 \ @@ -1059,6 +1061,7 @@ MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ + sx.9 sx_sleep.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlocked.9 \ diff --git a/share/man/man9/condvar.9 b/share/man/man9/condvar.9 index dbb2ccea1fa7..1976504f5794 100644 --- a/share/man/man9/condvar.9 +++ b/share/man/man9/condvar.9 @@ -192,9 +192,9 @@ will fail if: Timeout expired. .El .Sh SEE ALSO -.Xr msleep 9 , .Xr mtx_pool 9 , .Xr mutex 9 , .Xr rwlock 9 , .Xr sema 9 , +.Xr sleep 9 , .Xr sx 9 diff --git a/share/man/man9/lock.9 b/share/man/man9/lock.9 index fbbced9b3692..94e03dd0cf51 100644 --- a/share/man/man9/lock.9 +++ b/share/man/man9/lock.9 @@ -63,14 +63,14 @@ Its arguments are: A pointer to the lock to initialize. .It Fa prio The priority passed to -.Xr msleep 9 . +.Xr sleep 9 . .It Fa wmesg The lock message. This is used for both debugging output and -.Xr msleep 9 . +.Xr sleep 9 . .It Fa timo The timeout value passed to -.Xr msleep 9 . +.Xr sleep 9 . .It Fa flags The flags the lock is to be initialized with. .Bl -tag -width ".Dv LG_CANRECURSE" @@ -272,7 +272,7 @@ exclusive lock, and a .Xr panic 9 will be the result of trying. .Sh SEE ALSO -.Xr msleep 9 , +.Xr sleep 9 , .Xr mtx_assert 9 , .Xr panic 9 , .Xr VOP_PRINT 9 diff --git a/share/man/man9/mi_switch.9 b/share/man/man9/mi_switch.9 index a60293670701..fe97bc5ed921 100644 --- a/share/man/man9/mi_switch.9 +++ b/share/man/man9/mi_switch.9 @@ -65,13 +65,13 @@ The various major uses of can be enumerated as follows: .Bl -enum -offset indent .It -From within -.Xr sleep 9 , -.Xr tsleep 9 -and -.Xr msleep 9 +From within a function such as +.Xr cv_wait 9 , +.Xr mtx_lock , +or +.Xr tsleep 9 when the current thread -voluntarily relinquishes the CPU to wait for some resource to become +voluntarily relinquishes the CPU to wait for some resource or lock to become available. .It After handling a trap @@ -157,6 +157,7 @@ all of these functions must be called with the .Va sched_lock mutex held. .Sh SEE ALSO +.Xr cv_wait 9 , .Xr issignal 9 , .Xr mutex 9 , .Xr runqueue 9 , diff --git a/share/man/man9/mtx_pool.9 b/share/man/man9/mtx_pool.9 index 0084bb5667b8..0d6af1c810ed 100644 --- a/share/man/man9/mtx_pool.9 +++ b/share/man/man9/mtx_pool.9 @@ -64,7 +64,7 @@ .Sh DESCRIPTION Mutex pools are designed to be used as short term leaf mutexes; i.e., the last mutex one might acquire before calling -.Xr msleep 9 . +.Xr mtx_sleep 9 . They operate using a shared pool of mutexes. A mutex may be chosen from the pool based on a supplied pointer, which may or may not point to anything valid, @@ -110,7 +110,7 @@ mutexes to interlock destructor operations. No initialization or destruction overhead. .It Can be used with -.Xr msleep 9 . +.Xr mtx_sleep 9 . .El .Pp And the following disadvantages: @@ -177,7 +177,6 @@ on each mutex in the specified pool, deallocates the memory associated with the pool, and assigns NULL to the pool pointer. .Sh SEE ALSO -.Xr msleep 9 , .Xr mutex 9 .Sh HISTORY These routines first appeared in diff --git a/share/man/man9/mutex.9 b/share/man/man9/mutex.9 index cf8917515981..c73dfee74875 100644 --- a/share/man/man9/mutex.9 +++ b/share/man/man9/mutex.9 @@ -45,6 +45,7 @@ .Nm mtx_unlock_spin , .Nm mtx_unlock_flags , .Nm mtx_unlock_spin_flags , +.Nm mtx_sleep , .Nm mtx_initialized , .Nm mtx_owned , .Nm mtx_recursed , @@ -80,6 +81,8 @@ .Ft void .Fn mtx_unlock_spin_flags "struct mtx *mutex" "int flags" .Ft int +.Fn mtx_sleep "void *chan" "struct mtx *mtx" "int priority" "const char *wmesg" "int timo" +.Ft int .Fn mtx_initialized "struct mtx *mutex" .Ft int .Fn mtx_owned "struct mtx *mutex" @@ -305,6 +308,15 @@ or have another thread blocked on the mutex when it is destroyed. .Pp The +.Fn mtx_sleep +function is used to atomically release +.Fa mtx +while waiting for an event. +For more details on the parameters to this function, +see +.Xr sleep 9 . +.Pp +The .Fn mtx_initialized function returns non-zero if .Fa mutex @@ -498,11 +510,11 @@ No locks are needed when calling these functions. .Sh SEE ALSO .Xr condvar 9 , .Xr LOCK_PROFILING 9 , -.Xr msleep 9 , .Xr mtx_pool 9 , .Xr panic 9 , .Xr rwlock 9 , .Xr sema 9 , +.Xr sleep 9 , .Xr sx 9 .Sh HISTORY These diff --git a/share/man/man9/rwlock.9 b/share/man/man9/rwlock.9 index 73bd1ae33517..7f1a4314058f 100644 --- a/share/man/man9/rwlock.9 +++ b/share/man/man9/rwlock.9 @@ -37,6 +37,7 @@ .Nm rw_wunlock , .Nm rw_try_upgrade , .Nm rw_downgrade , +.Nm rw_sleep , .Nm rw_initialized , .Nm rw_wowned , .Nm rw_assert , @@ -63,6 +64,8 @@ .Ft void .Fn rw_downgrade "struct rwlock *rw" .Ft int +.Fn rw_sleep "void *chan" "struct rwlock *rw" "int priority" "const char *wmesg" "int timo" +.Ft int .Fn rw_initialized "struct rwlock *rw" .Ft int .Fn rw_wowned "struct rwlock *rw" @@ -162,6 +165,13 @@ and the current thread will still hold a shared lock. Convert an exclusive lock into a single shared lock. The current thread must hold an exclusive lock of .Fa rw . +.It Fn rw_sleep "void *chan" "struct rwlock *rw" "int priority" "const char *wmesg" "int timo" +Atomically release +.Fa rw +while waiting for an event. +For more details on the parameters to this function, +see +.Xr sleep 9 . .It Fn rw_initialized "struct rwlock *rw" This function returns non-zero if .Fa rw diff --git a/share/man/man9/sleep.9 b/share/man/man9/sleep.9 index eafa15071502..67fba577ee79 100644 --- a/share/man/man9/sleep.9 +++ b/share/man/man9/sleep.9 @@ -68,6 +68,12 @@ external event, it is put to sleep by .Fn msleep_spin , or .Fn pause . +Threads may also wait using one of the locking primitive sleep routines +.Xr mtx_sleep 9 , +.Xr rw_sleep 9 , +or +.Xr sx_sleep 9 . +.Pp The parameter .Fa chan is an arbitrary address that uniquely identifies the event on which @@ -80,66 +86,19 @@ often called from inside an interrupt routine, to indicate that the resource the thread was blocking on is available now. .Pp The parameter -.Fa wmesg -is a string describing the sleep condition for tools like -.Xr ps 1 . -Due to the limited space of those programs to display arbitrary strings, -this message should not be longer than 6 characters. -.Pp -The -.Fn msleep -function is the general sleep call. -It suspends the current thread until a wakeup is -performed on the specified identifier. -The -.Fa mtx -parameter is a mutex which will be released before sleeping and reacquired -before -.Fn msleep -returns. -If .Fa priority -includes the -.Dv PDROP -flag, the -.Fa mtx -parameter will not be reacquired before returning. -The mutex is used to ensure that a condition can be checked atomically, -and that the current thread can be suspended without missing a -change to the condition, or an associated wakeup. -If -.Fa priority -is not 0, +specifies a new priority for the thread as well as some optional flags. +If the new priority is not 0, then the thread will be made runnable with the specified .Fa priority when it resumes. If -.Fa timo -is not 0, -then the thread will sleep for at most -.Fa timo No / Va hz -seconds. -If the -.Va Giant -lock is not held and -.Fa mtx -is -.Dv NULL , -then -.Fa timo -must be non-zero. -If .Fa priority includes the .Dv PCATCH flag, signals are checked before and after sleeping, otherwise signals are not checked. -The -.Fn msleep -function returns 0 if awakened, -.Er EWOULDBLOCK -if the timeout expires. If .Dv PCATCH is set and a signal needs to be delivered, @@ -151,33 +110,86 @@ is returned if the system call should be interrupted by the signal (return .Er EINTR ) . .Pp -The -.Fn tsleep -function is a variation on -.Fn msleep . -It is identical to invoking -.Fn msleep -with a -.Dv NULL -.Fa mtx +The parameter +.Fa wmesg +is a string describing the sleep condition for tools like +.Xr ps 1 . +Due to the limited space of those programs to display arbitrary strings, +this message should not be longer than 6 characters. +.Pp +The parameter +.Fa timo +specifies a timeout for the sleep. +If +.Fa timo +is not 0, +then the thread will sleep for at most +.Fa timo No / Va hz +seconds. +If the timeout expires, +then the sleep function will return +.Er EWOULDBLOCK . +.Pp +Several of the sleep functions including +.Fn msleep , +.Fn msleep_spin , +and the locking primitive sleep routines specify an additional lock parameter. +The lock will be released before sleeping and reacquired +before the sleep routine returns. +If +.Fa priority +includes the +.Dv PDROP +flag, then +the lock will not be reacquired before returning. +The lock is used to ensure that a condition can be checked atomically, +and that the current thread can be suspended without missing a +change to the condition, or an associated wakeup. +In addition, all of the sleep routines will fully drop the +.Va Giant +mutex +(even if recursed) +while the thread is suspended and will reacquire the +.Va Giant +mutex before the function returns. +.Pp +To avoid lost wakeups, +either a lock should be used to protect against races, +or a timeout should be specified to place an upper bound on the delay due +to a lost wakeup. +As a result, +the +.Fn tsleep +function should only be invoked with a timeout of 0 when the +.Va Giant +mutex is held. +.Pp +The +.Fn msleep +function requires that +.Fa mtx +reference a default, i.e. non-spin, mutex. +It's use is deprecated in favor of +.Xr mtx_sleep 9 +which provides identical behavior. .Pp The .Fn msleep_spin -function is another variation on -.Fn msleep . -This function accepts a spin mutex rather than a default mutex for its +function requires that .Fa mtx -parameter. -It is also more limited in that it does not accept a +reference a spin mutex. +The +.Fn msleep_spin +function does not accept a .Fa priority -parameter. -Thus, it will not change the priority of a sleeping thread, -and it does not support the +parameter and thus does not support changing the current thread's priority, +the .Dv PDROP -and +flag, +or catching signals via the .Dv PCATCH -flags. +flag. .Pp The .Fn pause @@ -221,11 +233,42 @@ pay particular attention to ensure that no other threads wait on the same .Fa chan . .Sh RETURN VALUES -See above. +If the thread is awakened by a call to +.Fn wakeup +or +.Fn wakeup_one , +the +.Fn msleep , +.Fn msleep_spin , +.Fn tsleep , +and locking primitive sleep functions return 0. +Otherwise, a non-zero error code is returned. +.Sh ERRORS +.Fn msleep , +.Fn msleep_spin , +.Fn tsleep , +and the locking primitive sleep functions will fail if: +.Bl -tag -width Er +.It Bq Er EINTR +The +.Dv PCATCH +flag was specified, a signal was caught, and the system call should be +interrupted. +.It Bq Er ERESTART +The +.Dv PCATCH +flag was specified, a signal was caught, and the system call should be +restarted. +.It Bq Er EWOULDBLOCK +A non-zero timeout was specified and the timeout expired. +.El .Sh SEE ALSO .Xr ps 1 , .Xr malloc 9 , -.Xr mi_switch 9 +.Xr mi_switch 9 , +.Xr mtx_sleep 9 , +.Xr rw_sleep 9 , +.Xr sx_sleep 9 .Sh HISTORY The functions .Fn sleep diff --git a/share/man/man9/sleepqueue.9 b/share/man/man9/sleepqueue.9 index 882a3aa183ed..c3815392f6af 100644 --- a/share/man/man9/sleepqueue.9 +++ b/share/man/man9/sleepqueue.9 @@ -178,9 +178,9 @@ There are currently three types of sleep queues: .Bl -tag -width ".Dv SLEEPQ_CONDVAR" -compact .It Dv SLEEPQ_CONDVAR A sleep queue used to implement condition variables. -.It Dv SLEEPQ_MSLEEP +.It Dv SLEEPQ_SLEEP A sleep queue used to implement -.Xr msleep 9 , +.Xr sleep 9 , .Xr wakeup 9 and .Xr wakeup_one 9 . @@ -339,7 +339,7 @@ One possible use is waking up a specific thread from a widely shared sleep channel. .Pp The sleep queue interface is currently used to implement the -.Xr msleep 9 +.Xr sleep 9 and .Xr condvar 9 interfaces. @@ -347,6 +347,6 @@ Almost all other code in the kernel should use one of those interfaces rather than manipulating sleep queues directly. .Sh SEE ALSO .Xr condvar 9 , -.Xr msleep 9 , .Xr runqueue 9 , -.Xr scheduler 9 +.Xr scheduler 9 , +.Xr sleep 9 diff --git a/share/man/man9/sx.9 b/share/man/man9/sx.9 index a204ad5968b2..50c0bc4f2773 100644 --- a/share/man/man9/sx.9 +++ b/share/man/man9/sx.9 @@ -42,6 +42,7 @@ .Nm sx_unlock , .Nm sx_try_upgrade , .Nm sx_downgrade , +.Nm sx_sleep , .Nm sx_xlocked , .Nm sx_assert , .Nm SX_SYSINIT @@ -73,6 +74,8 @@ .Ft void .Fn sx_downgrade "struct sx *sx" .Ft int +.Fn sx_sleep "void *chan" "struct sx *sx" "int priority" "const char *wmesg" "int timo" +.Ft int .Fn sx_xlocked "struct sx *sx" .Pp .Cd "options INVARIANTS" @@ -134,6 +137,13 @@ will return 0 if the shared lock cannot be upgraded to an exclusive lock immediately; otherwise the exclusive lock will be acquired and a non-zero value will be returned. .Pp +A thread can atomically release a shared/exclusive lock while waiting for an +event by calling +.Fn sx_sleep . +For more details on the parameters to this function, +see +.Xr sleep 9 . +.Pp When compiled with .Cd "options INVARIANTS" and @@ -169,7 +179,7 @@ by the first argument. .El .Pp .Fn sx_xlocked -will return non-zero if the current process holds the exclusive lock; +will return non-zero if the current thread holds the exclusive lock; otherwise, it will return zero. .Pp For ease of programming, diff --git a/share/man/man9/thread_exit.9 b/share/man/man9/thread_exit.9 index 9dc8acb1b3cc..6a06433acebd 100644 --- a/share/man/man9/thread_exit.9 +++ b/share/man/man9/thread_exit.9 @@ -58,6 +58,6 @@ must be called with the mutex held. .Sh SEE ALSO .Xr mi_switch 9 , -.Xr msleep 9 , .Xr mutex 9 , -.Xr runqueue 9 +.Xr runqueue 9 , +.Xr sleep 9 diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 0edd670da1fa..25b77e31d6b0 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -112,21 +112,22 @@ sleepinit(void) * call should be restarted if possible, and EINTR is returned if the system * call should be interrupted by the signal (return EINTR). * - * The mutex argument is unlocked before the caller is suspended, and - * re-locked before msleep returns. If priority includes the PDROP - * flag the mutex is not re-locked before returning. + * The lock argument is unlocked before the caller is suspended, and + * re-locked before _sleep() returns. If priority includes the PDROP + * flag the lock is not re-locked before returning. */ int -msleep(ident, mtx, priority, wmesg, timo) +_sleep(ident, lock, priority, wmesg, timo) void *ident; - struct mtx *mtx; + struct lock_object *lock; int priority, timo; const char *wmesg; { struct thread *td; struct proc *p; - int catch, rval, flags, pri; - WITNESS_SAVE_DECL(mtx); + struct lock_class *class; + int catch, flags, lock_state, pri, rval; + WITNESS_SAVE_DECL(lock_witness); td = curthread; p = td->td_proc; @@ -134,12 +135,16 @@ msleep(ident, mtx, priority, wmesg, timo) if (KTRPOINT(td, KTR_CSW)) ktrcsw(1, 0); #endif - WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, mtx == NULL ? NULL : - &mtx->mtx_object, "Sleeping on \"%s\"", wmesg); - KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL || - ident == &lbolt, ("sleeping without a mutex")); + WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, + "Sleeping on \"%s\"", wmesg); + KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL || + ident == &lbolt, ("sleeping without a lock")); KASSERT(p != NULL, ("msleep1")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); + if (lock != NULL) + class = LOCK_CLASS(lock); + else + class = NULL; if (cold) { /* @@ -150,8 +155,8 @@ msleep(ident, mtx, priority, wmesg, timo) * splx(s);" to give interrupts a chance, but there is * no way to give interrupts a chance now. */ - if (mtx != NULL && priority & PDROP) - mtx_unlock(mtx); + if (lock != NULL && priority & PDROP) + class->lc_unlock(lock); return (0); } catch = priority & PCATCH; @@ -168,20 +173,21 @@ msleep(ident, mtx, priority, wmesg, timo) if (ident == &pause_wchan) flags = SLEEPQ_PAUSE; else - flags = SLEEPQ_MSLEEP; + flags = SLEEPQ_SLEEP; if (catch) flags |= SLEEPQ_INTERRUPTIBLE; sleepq_lock(ident); - CTR5(KTR_PROC, "msleep: thread %ld (pid %ld, %s) on %s (%p)", + CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)", td->td_tid, p->p_pid, p->p_comm, wmesg, ident); DROP_GIANT(); - if (mtx != NULL) { - mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); - WITNESS_SAVE(&mtx->mtx_object, mtx); - mtx_unlock(mtx); - } + if (lock != NULL) { + WITNESS_SAVE(lock, lock_witness); + lock_state = class->lc_unlock(lock); + } else + /* GCC needs to follow the Yellow Brick Road */ + lock_state = -1; /* * We put ourselves on the sleep queue and start our timeout @@ -192,8 +198,7 @@ msleep(ident, mtx, priority, wmesg, timo) * stopped, then td will no longer be on a sleep queue upon * return from cursig(). */ - sleepq_add(ident, ident == &lbolt ? NULL : &mtx->mtx_object, wmesg, - flags, 0); + sleepq_add(ident, ident == &lbolt ? NULL : lock, wmesg, flags, 0); if (timo) sleepq_set_timeout(ident, timo); @@ -222,9 +227,9 @@ msleep(ident, mtx, priority, wmesg, timo) ktrcsw(0, 0); #endif PICKUP_GIANT(); - if (mtx != NULL && !(priority & PDROP)) { - mtx_lock(mtx); - WITNESS_RESTORE(&mtx->mtx_object, mtx); + if (lock != NULL && !(priority & PDROP)) { + class->lc_lock(lock, lock_state); + WITNESS_RESTORE(lock, lock_witness); } return (rval); } @@ -271,7 +276,7 @@ msleep_spin(ident, mtx, wmesg, timo) /* * We put ourselves on the sleep queue and start our timeout. */ - sleepq_add(ident, &mtx->mtx_object, wmesg, SLEEPQ_MSLEEP, 0); + sleepq_add(ident, &mtx->mtx_object, wmesg, SLEEPQ_SLEEP, 0); if (timo) sleepq_set_timeout(ident, timo); @@ -336,7 +341,7 @@ wakeup(ident) { sleepq_lock(ident); - sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1, 0); + sleepq_broadcast(ident, SLEEPQ_SLEEP, -1, 0); } /* @@ -350,7 +355,7 @@ wakeup_one(ident) { sleepq_lock(ident); - sleepq_signal(ident, SLEEPQ_MSLEEP, -1, 0); + sleepq_signal(ident, SLEEPQ_SLEEP, -1, 0); } /* diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index cde6e444c8c4..a8d349f99586 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -339,6 +339,9 @@ extern struct mtx_pool *mtxpool_sleep; #define mtx_trylock_flags(m, opts) \ _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE) +#define mtx_sleep(chan, mtx, pri, wmesg, timo) \ + _sleep((chan), &(mtx)->mtx_object, (pri), (wmesg), (timo)) + #define mtx_initialized(m) lock_initalized(&(m)->mtx_object) #define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread) diff --git a/sys/sys/rwlock.h b/sys/sys/rwlock.h index c387e6516d4e..05f49cc4fcd3 100644 --- a/sys/sys/rwlock.h +++ b/sys/sys/rwlock.h @@ -163,6 +163,8 @@ void _rw_assert(struct rwlock *rw, int what, const char *file, int line); #define rw_runlock(rw) _rw_runlock((rw), LOCK_FILE, LOCK_LINE) #define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE) #define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE) +#define rw_sleep(chan, rw, pri, wmesg, timo) \ + _sleep((chan), &(rw)->rw_object, (pri), (wmesg), (timo)) #define rw_initialized(rw) lock_initalized(&(rw)->rw_object) diff --git a/sys/sys/sleepqueue.h b/sys/sys/sleepqueue.h index 15a673dfe632..f25bb59edf44 100644 --- a/sys/sys/sleepqueue.h +++ b/sys/sys/sleepqueue.h @@ -82,7 +82,7 @@ struct thread; #ifdef _KERNEL #define SLEEPQ_TYPE 0x0ff /* Mask of sleep queue types. */ -#define SLEEPQ_MSLEEP 0x00 /* Used by msleep/wakeup. */ +#define SLEEPQ_SLEEP 0x00 /* Used by sleep/wakeup. */ #define SLEEPQ_CONDVAR 0x01 /* Used for a cv. */ #define SLEEPQ_PAUSE 0x02 /* Used by pause. */ #define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */ diff --git a/sys/sys/sx.h b/sys/sys/sx.h index 2be6e3949c59..7153acdf1c57 100644 --- a/sys/sys/sx.h +++ b/sys/sys/sx.h @@ -94,6 +94,8 @@ struct sx_args { else \ sx_sunlock(sx); \ } while (0) +#define sx_sleep(chan, sx, pri, wmesg, timo) \ + _sleep((chan), &(sx)->sx_object, (pri), (wmesg), (timo)) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define SX_LOCKED LA_LOCKED diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 1b005d8f8806..af412d3b3c60 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -116,6 +116,7 @@ extern char **kenvp; * General function declarations. */ +struct lock_object; struct malloc_type; struct mtx; struct proc; @@ -307,11 +308,15 @@ static __inline void splx(intrmask_t ipl __unused) { return; } * Common `proc' functions are declared here so that proc.h can be included * less often. */ -int msleep(void *chan, struct mtx *mtx, int pri, const char *wmesg, - int timo); -int msleep_spin(void *chan, struct mtx *mtx, const char *wmesg, int timo); +int _sleep(void *chan, struct lock_object *lock, int pri, const char *wmesg, + int timo) __nonnull(1); +#define msleep(chan, mtx, pri, wmesg, timo) \ + _sleep((chan), &(mtx)->mtx_object, (pri), (wmesg), (timo)) +int msleep_spin(void *chan, struct mtx *mtx, const char *wmesg, int timo) + __nonnull(1); int pause(const char *wmesg, int timo); -#define tsleep(chan, pri, wmesg, timo) msleep(chan, NULL, pri, wmesg, timo) +#define tsleep(chan, pri, wmesg, timo) \ + _sleep((chan), NULL, (pri), (wmesg), (timo)) void wakeup(void *chan) __nonnull(1); void wakeup_one(void *chan) __nonnull(1);