Several improvements to rmlock(9). Many of these are based on patches

provided by Isilon.
- Add an rm_assert() supporting various lock assertions similar to other
  locking primitives.  Because rmlocks track readers the assertions are
  always fully accurate unlike rw_assert() and sx_assert().
- Flesh out the lock class methods for rmlocks to support sleeping via
  condvars and rm_sleep() (but only while holding write locks), rmlock
  details in 'show lock' in DDB, and the lc_owner method used by
  dtrace.
- Add an internal destroyed cookie so that API functions can assert
  that an rmlock is not destroyed.
- Make use of rm_assert() to add various assertions to the API (e.g.
  to assert locks are held when an unlock routine is called).
- Give RM_SLEEPABLE locks their own lock class and always use the
  rmlock's own lock_object with WITNESS.
- Use THREAD_NO_SLEEPING() / THREAD_SLEEPING_OK() to disallow sleeping
  while holding a read lock on an rmlock.

Submitted by:	andre
Obtained from:	EMC/Isilon
This commit is contained in:
jhb 2013-06-25 18:44:15 +00:00
parent 43efcb27b6
commit 0807c44cdd
9 changed files with 389 additions and 79 deletions

View File

@ -1077,12 +1077,15 @@ MLINKS+=rman.9 rman_activate_resource.9 \
rman.9 rman_set_bustag.9 \
rman.9 rman_set_rid.9 \
rman.9 rman_set_virtual.9
MLINKS+=rmlock.9 rm_destroy.9 \
MLINKS+=rmlock.9 rm_assert.9 \
rmlock.9 rm_destroy.9 \
rmlock.9 rm_init.9 \
rmlock.9 rm_init_flags.9 \
rmlock.9 rm_rlock.9 \
rmlock.9 rm_try_rlock.9 \
rmlock.9 rm_runlock.9 \
rmlock.9 rm_sleep.9 \
rmlock.9 RM_SYSINIT.9 \
rmlock.9 rm_try_rlock.9 \
rmlock.9 rm_wlock.9 \
rmlock.9 rm_wowned.9 \
rmlock.9 rm_wunlock.9

View File

@ -26,7 +26,7 @@
.\" $FreeBSD$
.\"
.\" Based on rwlock.9 man page
.Dd June 8, 2012
.Dd June 25, 2013
.Dt RMLOCK 9
.Os
.Sh NAME
@ -40,6 +40,8 @@
.Nm rm_runlock ,
.Nm rm_wunlock ,
.Nm rm_wowned ,
.Nm rm_sleep ,
.Nm rm_assert ,
.Nm RM_SYSINIT
.Nd kernel reader/writer lock optimized for read-mostly access patterns
.Sh SYNOPSIS
@ -64,6 +66,13 @@
.Fn rm_wunlock "struct rmlock *rm"
.Ft int
.Fn rm_wowned "const struct rmlock *rm"
.Ft int
.Fn rm_sleep "void *wchan" "struct rmlock *rm" "int priority" "const char *wmesg" "int timo"
.Pp
.Cd "options INVARIANTS"
.Cd "options INVARIANT_SUPPORT"
.Ft void
.Fn rm_assert "struct rmlock *rm" "int what"
.In sys/kernel.h
.Fn RM_SYSINIT "name" "struct rmlock *rm" "const char *desc" "int opts"
.Sh DESCRIPTION
@ -215,12 +224,63 @@ lock must be unlocked.
This function returns a non-zero value if the current thread owns an
exclusive lock on
.Fa rm .
.It Fn rm_sleep "void *wchan" "struct rmlock *rm" "int priority" "const char *wmesg" "int timo"
This function atomically releases
.Fa rm
while waiting for an event.
The
.Fa rm
lock must be exclusively locked.
For more details on the parameters to this function,
see
.Xr sleep 9 .
.It Fn rm_assert "struct rmlock *rm" "int what"
This function asserts that the
.Fa rm
lock is in the state specified by
.Fa what .
If the assertions are not true and the kernel is compiled with
.Cd "options INVARIANTS"
and
.Cd "options INVARIANT_SUPPORT" ,
the kernel will panic.
Currently the following base assertions are supported:
.Bl -tag -width ".Dv RA_UNLOCKED"
.It Dv RA_LOCKED
Assert that current thread holds either a shared or exclusive lock
of
.Fa rm .
.It Dv RA_RLOCKED
Assert that current thread holds a shared lock of
.Fa rm .
.It Dv RA_WLOCKED
Assert that current thread holds an exclusive lock of
.Fa rm .
.It Dv RA_UNLOCKED
Assert that current thread holds neither a shared nor exclusive lock of
.Fa rm .
.El
.Pp
In addition, one of the following optional flags may be specified with
.Dv RA_LOCKED ,
.Dv RA_RLOCKED ,
or
.Dv RA_WLOCKED :
.Bl -tag -width ".Dv RA_NOTRECURSED"
.It Dv RA_RECURSED
Assert that the current thread holds a recursive lock of
.Fa rm .
.It Dv RA_NOTRECURSED
Assert that the current thread does not hold a recursive lock of
.Fa rm .
.El
.El
.Sh SEE ALSO
.Xr locking 9 ,
.Xr mutex 9 ,
.Xr panic 9 ,
.Xr rwlock 9 ,
.Xr sleep 9 ,
.Xr sema 9 ,
.Xr sx 9
.Sh HISTORY
@ -252,8 +312,3 @@ implementation uses a single per CPU list shared by all
rmlocks in the system.
If rmlocks become popular, hashing to multiple per CPU queues may
be needed to speed up the writer lock process.
.Pp
The
.Nm
can currently not be used as a lock argument for condition variable
wait functions.

View File

@ -1130,25 +1130,34 @@ sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
}
#ifdef DDB
void
ddb_display_cpuset(const cpuset_t *set)
{
int cpu, once;
for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
if (CPU_ISSET(cpu, set)) {
if (once == 0) {
db_printf("%d", cpu);
once = 1;
} else
db_printf(",%d", cpu);
}
}
if (once == 0)
db_printf("<none>");
}
DB_SHOW_COMMAND(cpusets, db_show_cpusets)
{
struct cpuset *set;
int cpu, once;
LIST_FOREACH(set, &cpuset_ids, cs_link) {
db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
set, set->cs_id, set->cs_ref, set->cs_flags,
(set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
db_printf(" mask=");
for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
if (CPU_ISSET(cpu, &set->cs_mask)) {
if (once == 0) {
db_printf("%d", cpu);
once = 1;
} else
db_printf(",%d", cpu);
}
}
ddb_display_cpuset(&set->cs_mask);
db_printf("\n");
if (db_pager_quit)
break;

View File

@ -57,16 +57,26 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
#endif
/*
* A cookie to mark destroyed rmlocks. This is stored in the head of
* rm_activeReaders.
*/
#define RM_DESTROYED ((void *)0xdead)
#define rm_destroyed(rm) \
(LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
#define RMPF_ONQUEUE 1
#define RMPF_SIGNAL 2
/*
* To support usage of rmlock in CVs and msleep yet another list for the
* priority tracker would be needed. Using this lock for cv and msleep also
* does not seem very useful
*/
#ifndef INVARIANTS
#define _rm_assert(c, what, file, line)
#endif
static void assert_rm(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_rm(const struct lock_object *lock);
#endif
static void lock_rm(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_rm(const struct lock_object *lock, struct thread **owner);
@ -77,10 +87,22 @@ struct lock_class lock_class_rm = {
.lc_name = "rm",
.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
.lc_assert = assert_rm,
#if 0
#ifdef DDB
.lc_ddb_show = db_show_rwlock,
.lc_ddb_show = db_show_rm,
#endif
.lc_lock = lock_rm,
.lc_unlock = unlock_rm,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_rm,
#endif
};
struct lock_class lock_class_rm_sleepable = {
.lc_name = "sleepable rm",
.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
.lc_assert = assert_rm,
#ifdef DDB
.lc_ddb_show = db_show_rm,
#endif
.lc_lock = lock_rm,
.lc_unlock = unlock_rm,
@ -93,29 +115,49 @@ static void
assert_rm(const struct lock_object *lock, int what)
{
panic("assert_rm called");
rm_assert((const struct rmlock *)lock, what);
}
/*
* These do not support read locks because it would be hard to make
* the tracker work correctly with the current lock_class API as you
* would need to have the tracker pointer available when calling
* rm_rlock() in lock_rm().
*/
static void
lock_rm(struct lock_object *lock, int how)
{
struct rmlock *rm;
panic("lock_rm called");
rm = (struct rmlock *)lock;
if (how)
rm_wlock(rm);
#ifdef INVARIANTS
else
panic("lock_rm called in read mode");
#endif
}
static int
unlock_rm(struct lock_object *lock)
{
struct rmlock *rm;
panic("unlock_rm called");
rm = (struct rmlock *)lock;
rm_wunlock(rm);
return (1);
}
#ifdef KDTRACE_HOOKS
static int
owner_rm(const struct lock_object *lock, struct thread **owner)
{
const struct rmlock *rm;
struct lock_class *lc;
panic("owner_rm called");
rm = (const struct rmlock *)lock;
lc = LOCK_CLASS(&rm->rm_wlock_object);
return (lc->lc_owner(&rm->rm_wlock_object, owner));
}
#endif
@ -146,6 +188,28 @@ rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
}
/*
* Return a count of the number of trackers the thread 'td' already
* has on this CPU for the lock 'rm'.
*/
static int
rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
const struct thread *td)
{
struct rm_queue *queue;
struct rm_priotracker *tracker;
int count;
count = 0;
for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
queue = queue->rmq_next) {
tracker = (struct rm_priotracker *)queue;
if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
count++;
}
return (count);
}
static void inline
rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
{
@ -183,11 +247,10 @@ rm_cleanIPI(void *arg)
}
}
CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
void
rm_init_flags(struct rmlock *rm, const char *name, int opts)
{
struct lock_class *lc;
int liflags;
liflags = 0;
@ -198,11 +261,14 @@ rm_init_flags(struct rmlock *rm, const char *name, int opts)
rm->rm_writecpus = all_cpus;
LIST_INIT(&rm->rm_activeReaders);
if (opts & RM_SLEEPABLE) {
liflags |= RM_SLEEPABLE;
sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
} else
liflags |= LO_SLEEPABLE;
lc = &lock_class_rm_sleepable;
sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
} else {
lc = &lock_class_rm;
mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
}
lock_init(&rm->lock_object, lc, name, NULL, liflags);
}
void
@ -216,7 +282,9 @@ void
rm_destroy(struct rmlock *rm)
{
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
rm_assert(rm, RA_UNLOCKED);
LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_destroy(&rm->rm_lock_sx);
else
mtx_destroy(&rm->rm_lock_mtx);
@ -227,7 +295,7 @@ int
rm_wowned(const struct rmlock *rm)
{
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
return (sx_xlocked(&rm->rm_lock_sx));
else
return (mtx_owned(&rm->rm_lock_mtx));
@ -253,8 +321,6 @@ static int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
struct pcpu *pc;
struct rm_queue *queue;
struct rm_priotracker *atracker;
critical_enter();
pc = pcpu_find(curcpu);
@ -285,20 +351,15 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* Just grant the lock if this thread already has a tracker
* for this lock on the per-cpu queue.
*/
for (queue = pc->pc_rm_queue.rmq_next;
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
atracker = (struct rm_priotracker *)queue;
if ((atracker->rmp_rmlock == rm) &&
(atracker->rmp_thread == tracker->rmp_thread)) {
mtx_lock_spin(&rm_spinlock);
LIST_INSERT_HEAD(&rm->rm_activeReaders,
tracker, rmp_qentry);
tracker->rmp_flags = RMPF_ONQUEUE;
mtx_unlock_spin(&rm_spinlock);
rm_tracker_add(pc, tracker);
critical_exit();
return (1);
}
if (rm_trackers_present(pc, rm, curthread) != 0) {
mtx_lock_spin(&rm_spinlock);
LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
rmp_qentry);
tracker->rmp_flags = RMPF_ONQUEUE;
mtx_unlock_spin(&rm_spinlock);
rm_tracker_add(pc, tracker);
critical_exit();
return (1);
}
}
@ -306,7 +367,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
critical_exit();
if (trylock) {
if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
if (!sx_try_xlock(&rm->rm_lock_sx))
return (0);
} else {
@ -314,7 +375,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
return (0);
}
} else {
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
@ -327,7 +388,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
sched_pin();
critical_exit();
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xunlock(&rm->rm_lock_sx);
else
mtx_unlock(&rm->rm_lock_mtx);
@ -348,6 +409,9 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
tracker->rmp_thread = td;
tracker->rmp_rmlock = rm;
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
THREAD_NO_SLEEPING();
td->td_critnest++; /* critical_enter(); */
__compiler_membar();
@ -422,6 +486,9 @@ _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
td->td_critnest--;
sched_unpin();
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
THREAD_SLEEPING_OK();
if (0 == (td->td_owepreempt | tracker->rmp_flags))
return;
@ -438,7 +505,7 @@ _rm_wlock(struct rmlock *rm)
if (SCHEDULER_STOPPED())
return;
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
@ -481,7 +548,7 @@ void
_rm_wunlock(struct rmlock *rm)
{
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xunlock(&rm->rm_lock_sx);
else
mtx_unlock(&rm->rm_lock_mtx);
@ -489,7 +556,8 @@ _rm_wunlock(struct rmlock *rm)
#ifdef LOCK_DEBUG
void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
void
_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
{
if (SCHEDULER_STOPPED())
@ -498,6 +566,10 @@ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
curthread, rm->lock_object.lo_name, file, line));
KASSERT(!rm_destroyed(rm),
("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
_rm_assert(rm, RA_UNLOCKED, file, line);
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
file, line, NULL);
@ -505,11 +577,7 @@ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
file, line);
else
WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
@ -522,14 +590,13 @@ _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
curthread->td_locks--;
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
file, line);
else
WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
KASSERT(!rm_destroyed(rm),
("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
_rm_assert(rm, RA_WLOCKED, file, line);
WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
_rm_wunlock(rm);
curthread->td_locks--;
}
int
@ -540,23 +607,43 @@ _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
if (SCHEDULER_STOPPED())
return (1);
#ifdef INVARIANTS
if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
critical_enter();
KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
curthread) == 0,
("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
rm->lock_object.lo_name, file, line));
critical_exit();
}
#endif
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
curthread, rm->lock_object.lo_name, file, line));
if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
file, line, NULL);
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
KASSERT(!rm_destroyed(rm),
("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
if (!trylock) {
KASSERT(!rm_wowned(rm),
("rm_rlock: wlock already held for %s @ %s:%d",
rm->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
NULL);
}
if (_rm_rlock(rm, tracker, trylock)) {
LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
if (trylock)
LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
line);
else
LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
line);
WITNESS_LOCK(&rm->lock_object, 0, file, line);
curthread->td_locks++;
return (1);
}
} else if (trylock)
LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
return (0);
}
@ -569,10 +656,13 @@ _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
if (SCHEDULER_STOPPED())
return;
curthread->td_locks--;
KASSERT(!rm_destroyed(rm),
("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
_rm_assert(rm, RA_RLOCKED, file, line);
WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
_rm_runlock(rm, tracker);
curthread->td_locks--;
}
#else
@ -612,3 +702,126 @@ _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
}
#endif
#ifdef INVARIANT_SUPPORT
/*
* Note that this does not need to use witness_assert() for read lock
* assertions since an exact count of read locks held by this thread
* is computable.
*/
void
_rm_assert(const struct rmlock *rm, int what, const char *file, int line)
{
int count;
if (panicstr != NULL)
return;
switch (what) {
case RA_LOCKED:
case RA_LOCKED | RA_RECURSED:
case RA_LOCKED | RA_NOTRECURSED:
case RA_RLOCKED:
case RA_RLOCKED | RA_RECURSED:
case RA_RLOCKED | RA_NOTRECURSED:
/*
* Handle the write-locked case. Unlike other
* primitives, writers can never recurse.
*/
if (rm_wowned(rm)) {
if (what & RA_RLOCKED)
panic("Lock %s exclusively locked @ %s:%d\n",
rm->lock_object.lo_name, file, line);
if (what & RA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
rm->lock_object.lo_name, file, line);
break;
}
critical_enter();
count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
critical_exit();
if (count == 0)
panic("Lock %s not %slocked @ %s:%d\n",
rm->lock_object.lo_name, (what & RA_RLOCKED) ?
"read " : "", file, line);
if (count > 1) {
if (what & RA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
rm->lock_object.lo_name, file, line);
} else if (what & RA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
rm->lock_object.lo_name, file, line);
break;
case RA_WLOCKED:
if (!rm_wowned(rm))
panic("Lock %s not exclusively locked @ %s:%d\n",
rm->lock_object.lo_name, file, line);
break;
case RA_UNLOCKED:
if (rm_wowned(rm))
panic("Lock %s exclusively locked @ %s:%d\n",
rm->lock_object.lo_name, file, line);
critical_enter();
count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
critical_exit();
if (count != 0)
panic("Lock %s read locked @ %s:%d\n",
rm->lock_object.lo_name, file, line);
break;
default:
panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
line);
}
}
#endif /* INVARIANT_SUPPORT */
#ifdef DDB
static void
print_tracker(struct rm_priotracker *tr)
{
struct thread *td;
td = tr->rmp_thread;
db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
td->td_proc->p_pid, td->td_name);
if (tr->rmp_flags & RMPF_ONQUEUE) {
db_printf("ONQUEUE");
if (tr->rmp_flags & RMPF_SIGNAL)
db_printf(",SIGNAL");
} else
db_printf("0");
db_printf("}\n");
}
static void
db_show_rm(const struct lock_object *lock)
{
struct rm_priotracker *tr;
struct rm_queue *queue;
const struct rmlock *rm;
struct lock_class *lc;
struct pcpu *pc;
rm = (const struct rmlock *)lock;
db_printf(" writecpus: ");
ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
db_printf("\n");
db_printf(" per-CPU readers:\n");
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
for (queue = pc->pc_rm_queue.rmq_next;
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
tr = (struct rm_priotracker *)queue;
if (tr->rmp_rmlock == rm)
print_tracker(tr);
}
db_printf(" active readers:\n");
LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
print_tracker(tr);
lc = LOCK_CLASS(&rm->rm_wlock_object);
db_printf("Backing write-lock (%s):\n", lc->lc_name);
lc->lc_ddb_show(&rm->rm_wlock_object);
}
#endif

View File

@ -66,6 +66,7 @@ struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
&lock_class_mtx_sleep,
&lock_class_sx,
&lock_class_rm,
&lock_class_rm_sleepable,
&lock_class_rw,
&lock_class_lockmgr,
};

View File

@ -44,14 +44,17 @@ struct rm_queue {
};
struct rmlock {
struct lock_object lock_object;
struct lock_object lock_object;
volatile cpuset_t rm_writecpus;
LIST_HEAD(,rm_priotracker) rm_activeReaders;
union {
struct lock_object _rm_wlock_object;
struct mtx _rm_lock_mtx;
struct sx _rm_lock_sx;
} _rm_lock;
};
#define rm_wlock_object _rm_lock._rm_wlock_object
#define rm_lock_mtx _rm_lock._rm_lock_mtx
#define rm_lock_sx _rm_lock._rm_lock_sx

View File

@ -121,6 +121,9 @@ int cpuset_create_root(struct prison *, struct cpuset **);
int cpuset_setproc_update_set(struct proc *, struct cpuset *);
char *cpusetobj_strprint(char *, const cpuset_t *);
int cpusetobj_strscan(cpuset_t *, const char *);
#ifdef DDB
void ddb_display_cpuset(const cpuset_t *);
#endif
#else
__BEGIN_DECLS

View File

@ -193,6 +193,7 @@ extern struct lock_class lock_class_mtx_spin;
extern struct lock_class lock_class_sx;
extern struct lock_class lock_class_rw;
extern struct lock_class lock_class_rm;
extern struct lock_class lock_class_rm_sleepable;
extern struct lock_class lock_class_lockmgr;
extern struct lock_class *lock_classes[];

View File

@ -65,6 +65,10 @@ void _rm_wunlock(struct rmlock *rm);
int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker,
int trylock);
void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _rm_assert(const struct rmlock *rm, int what, const char *file,
int line);
#endif
/*
* Public interface for lock operations.
@ -89,6 +93,9 @@ void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker);
#define rm_try_rlock(rm,tracker) _rm_rlock((rm),(tracker), 1)
#define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker))
#endif
#define rm_sleep(chan, rm, pri, wmesg, timo) \
_sleep((chan), &(rm)->lock_object, (pri), (wmesg), \
tick_sbt * (timo), 0, C_HARDCLOCK)
struct rm_args {
struct rmlock *ra_rm;
@ -123,5 +130,20 @@ struct rm_args_flags {
SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
rm_destroy, (rm))
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define RA_LOCKED LA_LOCKED
#define RA_RLOCKED LA_SLOCKED
#define RA_WLOCKED LA_XLOCKED
#define RA_UNLOCKED LA_UNLOCKED
#define RA_RECURSED LA_RECURSED
#define RA_NOTRECURSED LA_NOTRECURSED
#endif
#ifdef INVARIANTS
#define rm_assert(rm, what) _rm_assert((rm), (what), LOCK_FILE, LOCK_LINE)
#else
#define rm_assert(rm, what)
#endif
#endif /* _KERNEL */
#endif /* !_SYS_RMLOCK_H_ */