rms: several cleanups + debug read lockers handling
This adds a dedicated counter updated with atomics when INVARIANTS is used. As a side effect one can reliably determine the lock is held for reading by at least one thread, but it's still not possible to find out whether curthread has the lock in said mode. This should be good enough in practice. Problem spotted by avg.
This commit is contained in:
parent
ecb4fdf943
commit
42e7abd5db
@ -878,10 +878,105 @@ db_show_rm(const struct lock_object *lock)
|
||||
* problem at some point. The easiest way to lessen it is to provide a bitmap.
|
||||
*/
|
||||
|
||||
#define rms_int_membar() __compiler_membar()
|
||||
|
||||
#define RMS_NOOWNER ((void *)0x1)
|
||||
#define RMS_TRANSIENT ((void *)0x2)
|
||||
#define RMS_FLAGMASK 0xf
|
||||
|
||||
struct rmslock_pcpu {
|
||||
int influx;
|
||||
int readers;
|
||||
};
|
||||
|
||||
_Static_assert(sizeof(struct rmslock_pcpu) == 8, "bad size");
|
||||
|
||||
/*
|
||||
* Internal routines
|
||||
*/
|
||||
static struct rmslock_pcpu *
|
||||
rms_int_pcpu(struct rmslock *rms)
|
||||
{
|
||||
|
||||
CRITICAL_ASSERT(curthread);
|
||||
return (zpcpu_get(rms->pcpu));
|
||||
}
|
||||
|
||||
static struct rmslock_pcpu *
|
||||
rms_int_remote_pcpu(struct rmslock *rms, int cpu)
|
||||
{
|
||||
|
||||
return (zpcpu_get_cpu(rms->pcpu, cpu));
|
||||
}
|
||||
|
||||
static void
|
||||
rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
||||
{
|
||||
|
||||
CRITICAL_ASSERT(curthread);
|
||||
MPASS(pcpu->influx == 0);
|
||||
pcpu->influx = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
||||
{
|
||||
|
||||
CRITICAL_ASSERT(curthread);
|
||||
MPASS(pcpu->influx == 1);
|
||||
pcpu->influx = 0;
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
static void
|
||||
rms_int_debug_readers_inc(struct rmslock *rms)
|
||||
{
|
||||
int old;
|
||||
old = atomic_fetchadd_int(&rms->debug_readers, 1);
|
||||
KASSERT(old >= 0, ("%s: bad readers count %d\n", __func__, old));
|
||||
}
|
||||
|
||||
static void
|
||||
rms_int_debug_readers_dec(struct rmslock *rms)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = atomic_fetchadd_int(&rms->debug_readers, -1);
|
||||
KASSERT(old > 0, ("%s: bad readers count %d\n", __func__, old));
|
||||
}
|
||||
#else
|
||||
static void
|
||||
rms_int_debug_readers_inc(struct rmslock *rms)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
rms_int_debug_readers_dec(struct rmslock *rms)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
||||
{
|
||||
|
||||
CRITICAL_ASSERT(curthread);
|
||||
rms_int_debug_readers_inc(rms);
|
||||
pcpu->readers++;
|
||||
}
|
||||
|
||||
static void
|
||||
rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
||||
{
|
||||
|
||||
CRITICAL_ASSERT(curthread);
|
||||
rms_int_debug_readers_dec(rms);
|
||||
pcpu->readers--;
|
||||
}
|
||||
|
||||
/*
|
||||
* Public API
|
||||
*/
|
||||
void
|
||||
rms_init(struct rmslock *rms, const char *name)
|
||||
{
|
||||
@ -889,9 +984,9 @@ rms_init(struct rmslock *rms, const char *name)
|
||||
rms->owner = RMS_NOOWNER;
|
||||
rms->writers = 0;
|
||||
rms->readers = 0;
|
||||
rms->debug_readers = 0;
|
||||
mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW);
|
||||
rms->readers_pcpu = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO);
|
||||
rms->readers_influx = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO);
|
||||
rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO);
|
||||
}
|
||||
|
||||
void
|
||||
@ -901,23 +996,21 @@ rms_destroy(struct rmslock *rms)
|
||||
MPASS(rms->writers == 0);
|
||||
MPASS(rms->readers == 0);
|
||||
mtx_destroy(&rms->mtx);
|
||||
uma_zfree_pcpu(pcpu_zone_4, rms->readers_pcpu);
|
||||
uma_zfree_pcpu(pcpu_zone_4, rms->readers_influx);
|
||||
uma_zfree_pcpu(pcpu_zone_8, rms->pcpu);
|
||||
}
|
||||
|
||||
static void __noinline
|
||||
rms_rlock_fallback(struct rmslock *rms)
|
||||
{
|
||||
|
||||
zpcpu_set_protected(rms->readers_influx, 0);
|
||||
rms_int_influx_exit(rms, rms_int_pcpu(rms));
|
||||
critical_exit();
|
||||
|
||||
mtx_lock(&rms->mtx);
|
||||
MPASS(*zpcpu_get(rms->readers_pcpu) == 0);
|
||||
while (rms->writers > 0)
|
||||
msleep(&rms->readers, &rms->mtx, PUSER - 1, mtx_name(&rms->mtx), 0);
|
||||
critical_enter();
|
||||
zpcpu_add_protected(rms->readers_pcpu, 1);
|
||||
rms_int_readers_inc(rms, rms_int_pcpu(rms));
|
||||
mtx_unlock(&rms->mtx);
|
||||
critical_exit();
|
||||
}
|
||||
@ -925,43 +1018,46 @@ rms_rlock_fallback(struct rmslock *rms)
|
||||
void
|
||||
rms_rlock(struct rmslock *rms)
|
||||
{
|
||||
struct rmslock_pcpu *pcpu;
|
||||
|
||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
|
||||
MPASS(atomic_load_ptr(&rms->owner) != curthread);
|
||||
|
||||
critical_enter();
|
||||
zpcpu_set_protected(rms->readers_influx, 1);
|
||||
__compiler_membar();
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
rms_int_influx_enter(rms, pcpu);
|
||||
rms_int_membar();
|
||||
if (__predict_false(rms->writers > 0)) {
|
||||
rms_rlock_fallback(rms);
|
||||
return;
|
||||
}
|
||||
__compiler_membar();
|
||||
zpcpu_add_protected(rms->readers_pcpu, 1);
|
||||
__compiler_membar();
|
||||
zpcpu_set_protected(rms->readers_influx, 0);
|
||||
rms_int_membar();
|
||||
rms_int_readers_inc(rms, pcpu);
|
||||
rms_int_membar();
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
int
|
||||
rms_try_rlock(struct rmslock *rms)
|
||||
{
|
||||
struct rmslock_pcpu *pcpu;
|
||||
|
||||
MPASS(atomic_load_ptr(&rms->owner) != curthread);
|
||||
|
||||
critical_enter();
|
||||
zpcpu_set_protected(rms->readers_influx, 1);
|
||||
__compiler_membar();
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
rms_int_influx_enter(rms, pcpu);
|
||||
rms_int_membar();
|
||||
if (__predict_false(rms->writers > 0)) {
|
||||
__compiler_membar();
|
||||
zpcpu_set_protected(rms->readers_influx, 0);
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
return (0);
|
||||
}
|
||||
__compiler_membar();
|
||||
zpcpu_add_protected(rms->readers_pcpu, 1);
|
||||
__compiler_membar();
|
||||
zpcpu_set_protected(rms->readers_influx, 0);
|
||||
rms_int_membar();
|
||||
rms_int_readers_inc(rms, pcpu);
|
||||
rms_int_membar();
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
return (1);
|
||||
}
|
||||
@ -970,13 +1066,14 @@ static void __noinline
|
||||
rms_runlock_fallback(struct rmslock *rms)
|
||||
{
|
||||
|
||||
zpcpu_set_protected(rms->readers_influx, 0);
|
||||
rms_int_influx_exit(rms, rms_int_pcpu(rms));
|
||||
critical_exit();
|
||||
|
||||
mtx_lock(&rms->mtx);
|
||||
MPASS(*zpcpu_get(rms->readers_pcpu) == 0);
|
||||
MPASS(rms->writers > 0);
|
||||
MPASS(rms->readers > 0);
|
||||
MPASS(rms->debug_readers == rms->readers);
|
||||
rms_int_debug_readers_dec(rms);
|
||||
rms->readers--;
|
||||
if (rms->readers == 0)
|
||||
wakeup_one(&rms->writers);
|
||||
@ -986,18 +1083,20 @@ rms_runlock_fallback(struct rmslock *rms)
|
||||
void
|
||||
rms_runlock(struct rmslock *rms)
|
||||
{
|
||||
struct rmslock_pcpu *pcpu;
|
||||
|
||||
critical_enter();
|
||||
zpcpu_set_protected(rms->readers_influx, 1);
|
||||
__compiler_membar();
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
rms_int_influx_enter(rms, pcpu);
|
||||
rms_int_membar();
|
||||
if (__predict_false(rms->writers > 0)) {
|
||||
rms_runlock_fallback(rms);
|
||||
return;
|
||||
}
|
||||
__compiler_membar();
|
||||
zpcpu_sub_protected(rms->readers_pcpu, 1);
|
||||
__compiler_membar();
|
||||
zpcpu_set_protected(rms->readers_influx, 0);
|
||||
rms_int_membar();
|
||||
rms_int_readers_dec(rms, pcpu);
|
||||
rms_int_membar();
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
@ -1010,17 +1109,19 @@ static void
|
||||
rms_action_func(void *arg)
|
||||
{
|
||||
struct rmslock_ipi *rmsipi;
|
||||
struct rmslock_pcpu *pcpu;
|
||||
struct rmslock *rms;
|
||||
int readers;
|
||||
|
||||
rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
|
||||
rms = rmsipi->rms;
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
|
||||
if (*zpcpu_get(rms->readers_influx))
|
||||
if (pcpu->influx)
|
||||
return;
|
||||
readers = zpcpu_replace(rms->readers_pcpu, 0);
|
||||
if (readers != 0)
|
||||
atomic_add_int(&rms->readers, readers);
|
||||
if (pcpu->readers != 0) {
|
||||
atomic_add_int(&rms->readers, pcpu->readers);
|
||||
pcpu->readers = 0;
|
||||
}
|
||||
smp_rendezvous_cpus_done(arg);
|
||||
}
|
||||
|
||||
@ -1028,17 +1129,39 @@ static void
|
||||
rms_wait_func(void *arg, int cpu)
|
||||
{
|
||||
struct rmslock_ipi *rmsipi;
|
||||
struct rmslock_pcpu *pcpu;
|
||||
struct rmslock *rms;
|
||||
int *in_op;
|
||||
|
||||
rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
|
||||
rms = rmsipi->rms;
|
||||
pcpu = rms_int_remote_pcpu(rms, cpu);
|
||||
|
||||
in_op = zpcpu_get_cpu(rms->readers_influx, cpu);
|
||||
while (atomic_load_int(in_op))
|
||||
while (atomic_load_int(&pcpu->influx))
|
||||
cpu_spinwait();
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
static void
|
||||
rms_assert_no_pcpu_readers(struct rmslock *rms)
|
||||
{
|
||||
struct rmslock_pcpu *pcpu;
|
||||
int cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
pcpu = rms_int_remote_pcpu(rms, cpu);
|
||||
if (pcpu->readers != 0) {
|
||||
panic("%s: got %d readers on cpu %d\n", __func__,
|
||||
pcpu->readers, cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void
|
||||
rms_assert_no_pcpu_readers(struct rmslock *rms)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
rms_wlock_switch(struct rmslock *rms)
|
||||
{
|
||||
@ -1080,6 +1203,7 @@ rms_wlock(struct rmslock *rms)
|
||||
("%s: unexpected owner value %p\n", __func__, rms->owner));
|
||||
|
||||
rms_wlock_switch(rms);
|
||||
rms_assert_no_pcpu_readers(rms);
|
||||
|
||||
if (rms->readers > 0) {
|
||||
msleep(&rms->writers, &rms->mtx, (PUSER - 1),
|
||||
@ -1088,6 +1212,7 @@ rms_wlock(struct rmslock *rms)
|
||||
|
||||
out_grab:
|
||||
rms->owner = curthread;
|
||||
rms_assert_no_pcpu_readers(rms);
|
||||
mtx_unlock(&rms->mtx);
|
||||
MPASS(rms->readers == 0);
|
||||
}
|
||||
|
@ -70,13 +70,15 @@ struct rm_priotracker {
|
||||
|
||||
#include <sys/_mutex.h>
|
||||
|
||||
struct rmslock_pcpu;
|
||||
|
||||
struct rmslock {
|
||||
struct mtx mtx;
|
||||
struct thread *owner;
|
||||
struct rmslock_pcpu *pcpu;
|
||||
int writers;
|
||||
int readers;
|
||||
int *readers_pcpu;
|
||||
int *readers_influx;
|
||||
int debug_readers;
|
||||
};
|
||||
|
||||
#endif /* !_SYS__RMLOCK_H_ */
|
||||
|
@ -149,14 +149,18 @@ rms_wowned(struct rmslock *rms)
|
||||
return (rms->owner == curthread);
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
/*
|
||||
* Only valid to call if you hold the lock in some manner.
|
||||
* For assertion purposes.
|
||||
*
|
||||
* Main limitation is that we at best can tell there are readers, but not
|
||||
* whether curthread is one of them.
|
||||
*/
|
||||
static inline int
|
||||
rms_rowned(struct rmslock *rms)
|
||||
{
|
||||
|
||||
return (rms->readers > 0);
|
||||
return (rms->debug_readers > 0);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -168,6 +172,7 @@ rms_owned_any(struct rmslock *rms)
|
||||
|
||||
return (rms_rowned(rms));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* !_SYS_RMLOCK_H_ */
|
||||
|
Loading…
x
Reference in New Issue
Block a user