2006-01-27 23:13:26 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2006-01-27 23:13:26 +00:00
|
|
|
* Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Machine independent bits of reader/writer lock implementation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_ddb.h"
|
2012-03-28 20:58:30 +00:00
|
|
|
#include "opt_hwpmc_hooks.h"
|
2007-03-22 16:09:23 +00:00
|
|
|
#include "opt_no_adaptive_rwlocks.h"
|
2006-01-27 23:13:26 +00:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
2012-12-22 09:37:34 +00:00
|
|
|
#include <sys/kdb.h>
|
2006-01-27 23:13:26 +00:00
|
|
|
#include <sys/ktr.h>
|
2008-04-04 10:00:46 +00:00
|
|
|
#include <sys/kernel.h>
|
2006-01-27 23:13:26 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/rwlock.h>
|
2014-11-04 16:35:56 +00:00
|
|
|
#include <sys/sched.h>
|
2016-08-01 21:48:37 +00:00
|
|
|
#include <sys/smp.h>
|
2008-04-04 10:00:46 +00:00
|
|
|
#include <sys/sysctl.h>
|
2006-01-27 23:13:26 +00:00
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/turnstile.h>
|
2007-07-20 08:43:42 +00:00
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
#include <machine/cpu.h>
|
|
|
|
|
2007-03-22 16:09:23 +00:00
|
|
|
#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
|
|
|
|
#define ADAPTIVE_RWLOCKS
|
|
|
|
#endif
|
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
#include <sys/pmckern.h>
|
|
|
|
PMC_SOFT_DECLARE( , , lock, failed);
|
|
|
|
#endif
|
|
|
|
|
2012-11-03 15:57:37 +00:00
|
|
|
/*
|
|
|
|
* Return the rwlock address when the lock cookie address is provided.
|
|
|
|
* This functionality assumes that struct rwlock* have a member named rw_lock.
|
|
|
|
*/
|
|
|
|
#define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
2011-11-16 21:51:17 +00:00
|
|
|
static void db_show_rwlock(const struct lock_object *lock);
|
2006-01-27 23:13:26 +00:00
|
|
|
#endif
|
2011-11-16 21:51:17 +00:00
|
|
|
static void assert_rw(const struct lock_object *lock, int what);
|
2013-09-20 23:06:21 +00:00
|
|
|
static void lock_rw(struct lock_object *lock, uintptr_t how);
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2011-11-16 21:51:17 +00:00
|
|
|
static int owner_rw(const struct lock_object *lock, struct thread **owner);
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2013-09-20 23:06:21 +00:00
|
|
|
static uintptr_t unlock_rw(struct lock_object *lock);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
|
|
|
struct lock_class lock_class_rw = {
|
2007-03-09 16:04:44 +00:00
|
|
|
.lc_name = "rw",
|
|
|
|
.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
|
2007-11-18 14:43:53 +00:00
|
|
|
.lc_assert = assert_rw,
|
2006-01-27 23:13:26 +00:00
|
|
|
#ifdef DDB
|
2007-03-09 16:04:44 +00:00
|
|
|
.lc_ddb_show = db_show_rwlock,
|
2006-01-27 23:13:26 +00:00
|
|
|
#endif
|
2007-03-09 16:27:11 +00:00
|
|
|
.lc_lock = lock_rw,
|
|
|
|
.lc_unlock = unlock_rw,
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
.lc_owner = owner_rw,
|
|
|
|
#endif
|
2006-01-27 23:13:26 +00:00
|
|
|
};
|
|
|
|
|
2016-08-01 21:48:37 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
2020-01-05 12:48:19 +00:00
|
|
|
#ifdef RWLOCK_CUSTOM_BACKOFF
|
2020-01-05 12:47:29 +00:00
|
|
|
static u_short __read_frequently rowner_retries;
|
|
|
|
static u_short __read_frequently rowner_loops;
|
2020-02-26 14:26:36 +00:00
|
|
|
static SYSCTL_NODE(_debug, OID_AUTO, rwlock,
|
|
|
|
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
|
2016-08-01 21:48:37 +00:00
|
|
|
"rwlock debugging");
|
2020-01-05 12:47:29 +00:00
|
|
|
SYSCTL_U16(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
|
|
|
|
SYSCTL_U16(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
|
2016-08-01 21:48:37 +00:00
|
|
|
|
2017-09-06 20:33:33 +00:00
|
|
|
static struct lock_delay_config __read_frequently rw_delay;
|
2016-08-01 21:48:37 +00:00
|
|
|
|
2020-01-05 12:47:29 +00:00
|
|
|
SYSCTL_U16(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
|
2016-08-01 21:48:37 +00:00
|
|
|
0, "");
|
2020-01-05 12:47:29 +00:00
|
|
|
SYSCTL_U16(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
|
2016-08-01 21:48:37 +00:00
|
|
|
0, "");
|
|
|
|
|
2018-04-11 01:43:29 +00:00
|
|
|
static void
|
|
|
|
rw_lock_delay_init(void *arg __unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
lock_delay_default_init(&rw_delay);
|
|
|
|
rowner_retries = 10;
|
|
|
|
rowner_loops = max(10000, rw_delay.max);
|
|
|
|
}
|
|
|
|
LOCK_DELAY_SYSINIT(rw_lock_delay_init);
|
2020-01-05 12:48:19 +00:00
|
|
|
#else
|
|
|
|
#define rw_delay locks_delay
|
|
|
|
#define rowner_retries locks_delay_retries
|
|
|
|
#define rowner_loops locks_delay_loops
|
|
|
|
#endif
|
2016-08-01 21:48:37 +00:00
|
|
|
#endif
|
|
|
|
|
2006-04-17 21:11:01 +00:00
|
|
|
/*
|
|
|
|
* Return a pointer to the owning thread if the lock is write-locked or
|
|
|
|
* NULL if the lock is unlocked or read-locked.
|
|
|
|
*/
|
2017-01-18 17:53:57 +00:00
|
|
|
|
|
|
|
#define lv_rw_wowner(v) \
|
|
|
|
((v) & RW_LOCK_READ ? NULL : \
|
|
|
|
(struct thread *)RW_OWNER((v)))
|
|
|
|
|
|
|
|
#define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw))
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2007-06-26 21:31:56 +00:00
|
|
|
/*
|
|
|
|
* Returns if a write owner is recursed. Write ownership is not assured
|
|
|
|
* here and should be previously checked.
|
|
|
|
*/
|
|
|
|
#define rw_recursed(rw) ((rw)->rw_recurse != 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if curthread helds the lock.
|
|
|
|
*/
|
|
|
|
#define rw_wlocked(rw) (rw_wowner((rw)) == curthread)
|
|
|
|
|
2006-04-17 21:11:01 +00:00
|
|
|
/*
|
|
|
|
* Return a pointer to the owning thread for this lock who should receive
|
|
|
|
* any priority lent by threads that block on this lock. Currently this
|
|
|
|
* is identical to rw_wowner().
|
|
|
|
*/
|
|
|
|
#define rw_owner(rw) rw_wowner(rw)
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
#ifndef INVARIANTS
|
2012-11-03 15:57:37 +00:00
|
|
|
#define __rw_assert(c, what, file, line)
|
2006-01-27 23:13:26 +00:00
|
|
|
#endif
|
|
|
|
|
2007-11-18 14:43:53 +00:00
|
|
|
void
|
2011-11-16 21:51:17 +00:00
|
|
|
assert_rw(const struct lock_object *lock, int what)
|
2007-11-18 14:43:53 +00:00
|
|
|
{
|
|
|
|
|
2011-11-16 21:51:17 +00:00
|
|
|
rw_assert((const struct rwlock *)lock, what);
|
2007-11-18 14:43:53 +00:00
|
|
|
}
|
|
|
|
|
2007-03-09 16:27:11 +00:00
|
|
|
void
|
2013-09-20 23:06:21 +00:00
|
|
|
lock_rw(struct lock_object *lock, uintptr_t how)
|
2007-03-09 16:27:11 +00:00
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = (struct rwlock *)lock;
|
|
|
|
if (how)
|
|
|
|
rw_rlock(rw);
|
2013-09-22 14:09:07 +00:00
|
|
|
else
|
|
|
|
rw_wlock(rw);
|
2007-03-09 16:27:11 +00:00
|
|
|
}
|
|
|
|
|
2013-09-20 23:06:21 +00:00
|
|
|
uintptr_t
|
2007-03-09 16:27:11 +00:00
|
|
|
unlock_rw(struct lock_object *lock)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = (struct rwlock *)lock;
|
|
|
|
rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
|
|
|
|
if (rw->rw_lock & RW_LOCK_READ) {
|
|
|
|
rw_runlock(rw);
|
2013-09-22 14:09:07 +00:00
|
|
|
return (1);
|
2007-03-09 16:27:11 +00:00
|
|
|
} else {
|
|
|
|
rw_wunlock(rw);
|
2013-09-22 14:09:07 +00:00
|
|
|
return (0);
|
2007-03-09 16:27:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
int
|
2011-11-16 21:51:17 +00:00
|
|
|
owner_rw(const struct lock_object *lock, struct thread **owner)
|
2009-05-26 20:28:22 +00:00
|
|
|
{
|
2011-11-16 21:51:17 +00:00
|
|
|
const struct rwlock *rw = (const struct rwlock *)lock;
|
2009-05-26 20:28:22 +00:00
|
|
|
uintptr_t x = rw->rw_lock;
|
|
|
|
|
|
|
|
*owner = rw_wowner(rw);
|
|
|
|
return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) :
|
|
|
|
(*owner != NULL));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
void
|
2012-11-03 15:57:37 +00:00
|
|
|
_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2012-11-03 15:57:37 +00:00
|
|
|
struct rwlock *rw;
|
2007-06-26 21:31:56 +00:00
|
|
|
int flags;
|
|
|
|
|
2012-11-03 15:57:37 +00:00
|
|
|
rw = rwlock2rw(c);
|
|
|
|
|
2007-06-26 21:31:56 +00:00
|
|
|
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
|
2014-12-13 21:00:10 +00:00
|
|
|
RW_RECURSE | RW_NEW)) == 0);
|
2009-08-17 16:17:21 +00:00
|
|
|
ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
|
|
|
|
("%s: rw_lock not aligned for %s: %p", __func__, name,
|
|
|
|
&rw->rw_lock));
|
2007-06-26 21:31:56 +00:00
|
|
|
|
2009-06-02 13:03:35 +00:00
|
|
|
flags = LO_UPGRADABLE;
|
2007-06-26 21:31:56 +00:00
|
|
|
if (opts & RW_DUPOK)
|
|
|
|
flags |= LO_DUPOK;
|
|
|
|
if (opts & RW_NOPROFILE)
|
|
|
|
flags |= LO_NOPROFILE;
|
|
|
|
if (!(opts & RW_NOWITNESS))
|
|
|
|
flags |= LO_WITNESS;
|
2009-06-02 13:03:35 +00:00
|
|
|
if (opts & RW_RECURSE)
|
|
|
|
flags |= LO_RECURSABLE;
|
2007-06-26 21:31:56 +00:00
|
|
|
if (opts & RW_QUIET)
|
|
|
|
flags |= LO_QUIET;
|
2014-12-13 21:00:10 +00:00
|
|
|
if (opts & RW_NEW)
|
|
|
|
flags |= LO_NEW;
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2013-06-25 20:23:08 +00:00
|
|
|
lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
|
2006-01-27 23:13:26 +00:00
|
|
|
rw->rw_lock = RW_UNLOCKED;
|
2007-06-26 21:31:56 +00:00
|
|
|
rw->rw_recurse = 0;
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-11-03 15:57:37 +00:00
|
|
|
_rw_destroy(volatile uintptr_t *c)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2012-11-03 15:57:37 +00:00
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2010-03-24 19:21:26 +00:00
|
|
|
KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
|
|
|
|
KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
|
2007-05-08 21:51:37 +00:00
|
|
|
rw->rw_lock = RW_DESTROYED;
|
2007-03-21 21:20:51 +00:00
|
|
|
lock_destroy(&rw->lock_object);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rw_sysinit(void *arg)
|
|
|
|
{
|
2017-11-21 14:59:23 +00:00
|
|
|
struct rw_args *args;
|
2008-12-08 21:46:55 +00:00
|
|
|
|
2017-11-21 14:59:23 +00:00
|
|
|
args = arg;
|
2012-11-03 15:57:37 +00:00
|
|
|
rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
|
|
|
|
args->ra_flags);
|
2008-12-08 21:46:55 +00:00
|
|
|
}
|
|
|
|
|
2007-02-26 19:05:13 +00:00
|
|
|
int
|
2012-11-03 15:57:37 +00:00
|
|
|
_rw_wowned(const volatile uintptr_t *c)
|
2007-02-26 19:05:13 +00:00
|
|
|
{
|
|
|
|
|
2012-11-03 15:57:37 +00:00
|
|
|
return (rw_wowner(rwlock2rw(c)) == curthread);
|
2007-02-26 19:05:13 +00:00
|
|
|
}
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
void
|
2012-11-03 15:57:37 +00:00
|
|
|
_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2012-11-03 15:57:37 +00:00
|
|
|
struct rwlock *rw;
|
2017-02-05 13:37:23 +00:00
|
|
|
uintptr_t tid, v;
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2012-11-03 15:57:37 +00:00
|
|
|
rw = rwlock2rw(c);
|
|
|
|
|
2017-06-19 21:09:50 +00:00
|
|
|
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
|
|
|
|
!TD_IS_IDLETHREAD(curthread),
|
2012-09-12 22:10:53 +00:00
|
|
|
("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
|
|
|
|
curthread, rw->lock_object.lo_name, file, line));
|
2007-05-08 21:51:37 +00:00
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
|
2007-03-21 21:20:51 +00:00
|
|
|
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
|
2008-09-10 19:13:30 +00:00
|
|
|
line, NULL);
|
2017-02-05 13:37:23 +00:00
|
|
|
tid = (uintptr_t)curthread;
|
|
|
|
v = RW_UNLOCKED;
|
|
|
|
if (!_rw_write_lock_fetch(rw, &v, tid))
|
2017-11-22 21:51:17 +00:00
|
|
|
_rw_wlock_hard(rw, v, file, line);
|
2017-02-05 13:37:23 +00:00
|
|
|
else
|
|
|
|
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
|
|
|
|
0, 0, file, line, LOCKSTAT_WRITER);
|
|
|
|
|
2007-06-26 21:31:56 +00:00
|
|
|
LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
|
2007-03-21 21:20:51 +00:00
|
|
|
WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_INC(curthread);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
2008-04-01 20:31:55 +00:00
|
|
|
int
|
2017-11-25 20:22:51 +00:00
|
|
|
__rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
|
2008-04-01 20:31:55 +00:00
|
|
|
{
|
2017-02-18 22:06:03 +00:00
|
|
|
struct thread *td;
|
|
|
|
uintptr_t tid, v;
|
2008-04-01 20:31:55 +00:00
|
|
|
int rval;
|
2017-02-18 22:06:03 +00:00
|
|
|
bool recursed;
|
2008-04-01 20:31:55 +00:00
|
|
|
|
2017-02-18 22:06:03 +00:00
|
|
|
td = curthread;
|
|
|
|
tid = (uintptr_t)td;
|
|
|
|
if (SCHEDULER_STOPPED_TD(td))
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
return (1);
|
|
|
|
|
2017-06-19 21:09:50 +00:00
|
|
|
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
|
2012-09-12 22:10:53 +00:00
|
|
|
("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
|
|
|
|
curthread, rw->lock_object.lo_name, file, line));
|
2008-04-01 20:31:55 +00:00
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
|
|
|
|
|
2017-02-18 22:06:03 +00:00
|
|
|
rval = 1;
|
|
|
|
recursed = false;
|
|
|
|
v = RW_UNLOCKED;
|
2017-02-19 16:28:46 +00:00
|
|
|
for (;;) {
|
|
|
|
if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
|
|
|
|
break;
|
|
|
|
if (v == RW_UNLOCKED)
|
|
|
|
continue;
|
2017-02-18 22:06:03 +00:00
|
|
|
if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
|
|
|
|
rw->rw_recurse++;
|
|
|
|
atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
|
2017-02-19 16:28:46 +00:00
|
|
|
break;
|
2017-02-18 22:06:03 +00:00
|
|
|
}
|
2017-02-19 16:28:46 +00:00
|
|
|
rval = 0;
|
|
|
|
break;
|
2017-02-18 22:06:03 +00:00
|
|
|
}
|
2008-04-01 20:31:55 +00:00
|
|
|
|
|
|
|
LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
|
|
|
|
if (rval) {
|
|
|
|
WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
|
|
|
|
file, line);
|
2017-02-18 22:06:03 +00:00
|
|
|
if (!recursed)
|
2015-07-19 22:24:33 +00:00
|
|
|
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
|
|
|
|
rw, 0, 0, file, line, LOCKSTAT_WRITER);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_INC(curthread);
|
2008-04-01 20:31:55 +00:00
|
|
|
}
|
|
|
|
return (rval);
|
|
|
|
}
|
|
|
|
|
2017-11-25 20:22:51 +00:00
|
|
|
int
|
|
|
|
__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
2017-11-25 20:25:45 +00:00
|
|
|
return (__rw_try_wlock_int(rw LOCK_FILE_LINE_ARG));
|
2017-11-25 20:22:51 +00:00
|
|
|
}
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
void
|
2012-11-03 15:57:37 +00:00
|
|
|
_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2012-11-03 15:57:37 +00:00
|
|
|
struct rwlock *rw;
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2012-11-03 15:57:37 +00:00
|
|
|
rw = rwlock2rw(c);
|
|
|
|
|
2007-05-08 21:51:37 +00:00
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
|
2012-11-03 15:57:37 +00:00
|
|
|
__rw_assert(c, RA_WLOCKED, file, line);
|
2007-03-21 21:20:51 +00:00
|
|
|
WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
|
2007-06-26 21:31:56 +00:00
|
|
|
LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
|
|
|
|
line);
|
2017-02-07 17:04:31 +00:00
|
|
|
|
2017-02-17 05:39:40 +00:00
|
|
|
#ifdef LOCK_PROFILING
|
2017-02-07 17:04:31 +00:00
|
|
|
_rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
|
2017-02-17 05:39:40 +00:00
|
|
|
#else
|
|
|
|
__rw_wunlock(rw, curthread, file, line);
|
|
|
|
#endif
|
2017-02-05 13:37:23 +00:00
|
|
|
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_DEC(curthread);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
2015-08-02 00:03:08 +00:00
|
|
|
|
2008-02-06 01:02:13 +00:00
|
|
|
/*
|
|
|
|
* Determines whether a new reader can acquire a lock. Succeeds if the
|
|
|
|
* reader already owns a read lock and the lock is locked for read to
|
|
|
|
* prevent deadlock from reader recursion. Also succeeds if the lock
|
|
|
|
* is unlocked and has no writer waiters or spinners. Failing otherwise
|
|
|
|
* prioritizes writers before readers.
|
|
|
|
*/
|
2017-11-22 23:52:05 +00:00
|
|
|
static bool __always_inline
|
|
|
|
__rw_can_read(struct thread *td, uintptr_t v, bool fp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((v & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER))
|
|
|
|
== RW_LOCK_READ)
|
|
|
|
return (true);
|
|
|
|
if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ))
|
|
|
|
return (true);
|
|
|
|
return (false);
|
|
|
|
}
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2017-02-08 19:28:46 +00:00
|
|
|
static bool __always_inline
|
2017-11-22 23:52:05 +00:00
|
|
|
__rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp
|
2017-11-22 21:51:17 +00:00
|
|
|
LOCK_FILE_LINE_ARG_DEF)
|
2017-02-08 19:28:46 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle the easy case. If no other thread has a write
|
|
|
|
* lock, then try to bump up the count of read locks. Note
|
|
|
|
* that we have to preserve the current state of the
|
|
|
|
* RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a
|
|
|
|
* read lock, then rw_lock must have changed, so restart
|
|
|
|
* the loop. Note that this handles the case of a
|
|
|
|
* completely unlocked rwlock since such a lock is encoded
|
|
|
|
* as a read lock with no waiters.
|
|
|
|
*/
|
2017-11-22 23:52:05 +00:00
|
|
|
while (__rw_can_read(td, *vp, fp)) {
|
2017-02-08 19:28:46 +00:00
|
|
|
if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
|
|
|
|
*vp + RW_ONE_READER)) {
|
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
|
|
|
CTR4(KTR_LOCK,
|
|
|
|
"%s: %p succeed %p -> %p", __func__,
|
|
|
|
rw, (void *)*vp,
|
|
|
|
(void *)(*vp + RW_ONE_READER));
|
|
|
|
td->td_rw_rlocks++;
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __noinline
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
|
|
|
|
LOCK_FILE_LINE_ARG_DEF)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
struct turnstile *ts;
|
2017-11-25 20:08:11 +00:00
|
|
|
struct thread *owner;
|
2007-03-22 16:09:23 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
2008-04-04 10:00:46 +00:00
|
|
|
int spintries = 0;
|
2017-10-05 19:18:02 +00:00
|
|
|
int i, n;
|
2006-04-18 20:32:42 +00:00
|
|
|
#endif
|
2009-03-15 08:03:54 +00:00
|
|
|
#ifdef LOCK_PROFILING
|
2007-03-07 20:48:48 +00:00
|
|
|
uint64_t waittime = 0;
|
2007-02-27 06:42:05 +00:00
|
|
|
int contested = 0;
|
2009-03-15 08:03:54 +00:00
|
|
|
#endif
|
2016-08-02 00:15:08 +00:00
|
|
|
#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
|
2016-08-01 21:48:37 +00:00
|
|
|
struct lock_delay_arg lda;
|
|
|
|
#endif
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2016-07-31 12:11:55 +00:00
|
|
|
u_int sleep_cnt = 0;
|
2009-05-26 20:28:22 +00:00
|
|
|
int64_t sleep_time = 0;
|
2015-06-12 10:01:24 +00:00
|
|
|
int64_t all_time = 0;
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2017-11-17 02:21:24 +00:00
|
|
|
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
|
2018-11-13 21:29:56 +00:00
|
|
|
uintptr_t state = 0;
|
2018-03-17 19:26:33 +00:00
|
|
|
int doing_lockprof = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) {
|
|
|
|
if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG))
|
|
|
|
goto out_lockstat;
|
|
|
|
doing_lockprof = 1;
|
|
|
|
all_time -= lockstat_nsecs(&rw->lock_object);
|
|
|
|
state = v;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef LOCK_PROFILING
|
|
|
|
doing_lockprof = 1;
|
|
|
|
state = v;
|
2017-11-17 02:21:24 +00:00
|
|
|
#endif
|
2006-01-27 23:13:26 +00:00
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2016-08-02 03:05:59 +00:00
|
|
|
#if defined(ADAPTIVE_RWLOCKS)
|
2016-08-01 21:48:37 +00:00
|
|
|
lock_delay_arg_init(&lda, &rw_delay);
|
2016-08-02 03:05:59 +00:00
|
|
|
#elif defined(KDTRACE_HOOKS)
|
2020-07-23 17:26:53 +00:00
|
|
|
lock_delay_arg_init_noadapt(&lda);
|
2016-08-01 21:48:37 +00:00
|
|
|
#endif
|
2012-11-03 15:57:37 +00:00
|
|
|
|
2017-11-17 02:22:51 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
PMC_SOFT_CALL( , , lock, failed);
|
|
|
|
#endif
|
2021-05-23 15:25:42 +00:00
|
|
|
lock_profile_obtain_lock_failed(&rw->lock_object, false,
|
2017-11-17 02:22:51 +00:00
|
|
|
&contested, &waittime);
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
for (;;) {
|
2017-11-22 23:52:05 +00:00
|
|
|
if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG))
|
2017-02-08 19:28:46 +00:00
|
|
|
break;
|
2015-07-19 22:26:02 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2016-08-01 21:48:37 +00:00
|
|
|
lda.spin_cnt++;
|
2015-07-19 22:26:02 +00:00
|
|
|
#endif
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2007-11-26 22:37:35 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
/*
|
|
|
|
* If the owner is running on another CPU, spin until
|
|
|
|
* the owner stops running or the state of the lock
|
|
|
|
* changes.
|
|
|
|
*/
|
2008-02-06 01:02:13 +00:00
|
|
|
if ((v & RW_LOCK_READ) == 0) {
|
|
|
|
owner = (struct thread *)RW_OWNER(v);
|
|
|
|
if (TD_IS_RUNNING(owner)) {
|
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
|
|
|
CTR3(KTR_LOCK,
|
|
|
|
"%s: spinning on %p held by %p",
|
|
|
|
__func__, rw, owner);
|
2014-11-04 16:35:56 +00:00
|
|
|
KTR_STATE1(KTR_SCHED, "thread",
|
|
|
|
sched_tdname(curthread), "spinning",
|
|
|
|
"lockname:\"%s\"", rw->lock_object.lo_name);
|
2017-01-18 17:53:57 +00:00
|
|
|
do {
|
2016-08-01 21:48:37 +00:00
|
|
|
lock_delay(&lda);
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
owner = lv_rw_wowner(v);
|
|
|
|
} while (owner != NULL && TD_IS_RUNNING(owner));
|
2014-11-04 16:35:56 +00:00
|
|
|
KTR_STATE0(KTR_SCHED, "thread",
|
|
|
|
sched_tdname(curthread), "running");
|
2008-02-06 01:02:13 +00:00
|
|
|
continue;
|
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
} else {
|
|
|
|
if ((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) {
|
|
|
|
MPASS(!__rw_can_read(td, v, false));
|
|
|
|
lock_delay_spin(2);
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2018-05-22 07:16:39 +00:00
|
|
|
continue;
|
2008-04-04 10:00:46 +00:00
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
if (spintries < rowner_retries) {
|
|
|
|
spintries++;
|
|
|
|
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
|
|
|
|
"spinning", "lockname:\"%s\"",
|
|
|
|
rw->lock_object.lo_name);
|
|
|
|
n = RW_READERS(v);
|
|
|
|
for (i = 0; i < rowner_loops; i += n) {
|
|
|
|
lock_delay_spin(n);
|
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
if (!(v & RW_LOCK_READ))
|
|
|
|
break;
|
|
|
|
n = RW_READERS(v);
|
|
|
|
if (n == 0)
|
|
|
|
break;
|
|
|
|
if (__rw_can_read(td, v, false))
|
|
|
|
break;
|
|
|
|
}
|
2013-12-17 13:37:02 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2018-05-22 07:16:39 +00:00
|
|
|
lda.spin_cnt += rowner_loops - i;
|
2013-12-17 13:37:02 +00:00
|
|
|
#endif
|
2018-05-22 07:16:39 +00:00
|
|
|
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
|
|
|
|
"running");
|
|
|
|
if (i < rowner_loops)
|
|
|
|
continue;
|
|
|
|
}
|
2007-11-26 22:37:35 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
/*
|
|
|
|
* Okay, now it's the hard case. Some other thread already
|
2008-02-06 01:02:13 +00:00
|
|
|
* has a write lock or there are write waiters present,
|
|
|
|
* acquire the turnstile lock so we can begin the process
|
|
|
|
* of blocking.
|
2006-01-27 23:13:26 +00:00
|
|
|
*/
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
ts = turnstile_trywait(&rw->lock_object);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The lock might have been released while we spun, so
|
2008-02-06 01:02:13 +00:00
|
|
|
* recheck its state and restart the loop if needed.
|
2006-01-27 23:13:26 +00:00
|
|
|
*/
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2017-11-25 20:10:33 +00:00
|
|
|
retry_ts:
|
2018-05-22 07:16:39 +00:00
|
|
|
if (((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) ||
|
|
|
|
__rw_can_read(td, v, false)) {
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
turnstile_cancel(ts);
|
2006-01-27 23:13:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-11-25 20:08:11 +00:00
|
|
|
owner = lv_rw_wowner(v);
|
|
|
|
|
2007-11-26 22:37:35 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
/*
|
2009-05-29 13:56:34 +00:00
|
|
|
* The current lock owner might have started executing
|
|
|
|
* on another CPU (or the lock could have changed
|
|
|
|
* owners) while we were waiting on the turnstile
|
|
|
|
* chain lock. If so, drop the turnstile lock and try
|
|
|
|
* again.
|
2007-11-26 22:37:35 +00:00
|
|
|
*/
|
2017-11-25 20:08:11 +00:00
|
|
|
if (owner != NULL) {
|
2008-02-06 01:02:13 +00:00
|
|
|
if (TD_IS_RUNNING(owner)) {
|
|
|
|
turnstile_cancel(ts);
|
|
|
|
continue;
|
|
|
|
}
|
2007-11-26 22:37:35 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
/*
|
2008-02-06 01:02:13 +00:00
|
|
|
* The lock is held in write mode or it already has waiters.
|
2006-01-27 23:13:26 +00:00
|
|
|
*/
|
2017-11-23 03:20:12 +00:00
|
|
|
MPASS(!__rw_can_read(td, v, false));
|
2008-02-06 01:02:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the RW_LOCK_READ_WAITERS flag is already set, then
|
|
|
|
* we can go ahead and block. If it is not set then try
|
|
|
|
* to set it. If we fail to set it drop the turnstile
|
|
|
|
* lock and restart the loop.
|
|
|
|
*/
|
|
|
|
if (!(v & RW_LOCK_READ_WAITERS)) {
|
2017-11-25 20:10:33 +00:00
|
|
|
if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
|
|
|
|
v | RW_LOCK_READ_WAITERS))
|
|
|
|
goto retry_ts;
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-04-17 21:11:01 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p set read waiters flag",
|
|
|
|
__func__, rw);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We were unable to acquire the lock and the read waiters
|
|
|
|
* flag is set, so we must block on the turnstile.
|
|
|
|
*/
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
|
|
|
|
rw);
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2015-07-18 00:57:30 +00:00
|
|
|
sleep_time -= lockstat_nsecs(&rw->lock_object);
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2017-11-25 20:08:11 +00:00
|
|
|
MPASS(owner == rw_owner(rw));
|
|
|
|
turnstile_wait(ts, owner, TS_SHARED_QUEUE);
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2015-07-18 00:57:30 +00:00
|
|
|
sleep_time += lockstat_nsecs(&rw->lock_object);
|
2009-05-26 20:28:22 +00:00
|
|
|
sleep_cnt++;
|
|
|
|
#endif
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
|
|
|
|
__func__, rw);
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
2017-11-17 02:21:24 +00:00
|
|
|
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
|
|
|
|
if (__predict_true(!doing_lockprof))
|
|
|
|
return;
|
|
|
|
#endif
|
2015-06-12 10:01:24 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2015-07-18 00:57:30 +00:00
|
|
|
all_time += lockstat_nsecs(&rw->lock_object);
|
2015-06-12 10:01:24 +00:00
|
|
|
if (sleep_time)
|
2015-07-19 22:14:09 +00:00
|
|
|
LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
|
2015-06-12 10:01:24 +00:00
|
|
|
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
|
|
|
|
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2015-06-12 10:01:24 +00:00
|
|
|
/* Record only the loops spinning and not sleeping. */
|
2016-08-01 21:48:37 +00:00
|
|
|
if (lda.spin_cnt > sleep_cnt)
|
2015-07-19 22:14:09 +00:00
|
|
|
LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
|
2015-06-12 10:01:24 +00:00
|
|
|
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
|
|
|
|
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
|
2018-03-17 19:26:33 +00:00
|
|
|
out_lockstat:
|
2015-06-12 10:01:24 +00:00
|
|
|
#endif
|
2006-01-27 23:13:26 +00:00
|
|
|
/*
|
|
|
|
* TODO: acquire "owner of record" here. Here be turnstile dragons
|
|
|
|
* however. turnstiles don't like owners changing between calls to
|
|
|
|
* turnstile_wait() currently.
|
|
|
|
*/
|
2015-07-19 22:24:33 +00:00
|
|
|
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
|
|
|
|
waittime, file, line, LOCKSTAT_READER);
|
2017-02-08 19:28:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
|
2017-02-08 19:28:46 +00:00
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
uintptr_t v;
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
|
2017-06-19 21:09:50 +00:00
|
|
|
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) ||
|
|
|
|
!TD_IS_IDLETHREAD(td),
|
2017-02-08 19:28:46 +00:00
|
|
|
("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
|
|
|
|
td, rw->lock_object.lo_name, file, line));
|
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
|
|
|
|
KASSERT(rw_wowner(rw) != td,
|
|
|
|
("rw_rlock: wlock already held for %s @ %s:%d",
|
|
|
|
rw->lock_object.lo_name, file, line));
|
|
|
|
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
|
|
|
|
|
|
|
|
v = RW_READ_VALUE(rw);
|
2018-02-17 12:07:09 +00:00
|
|
|
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||
|
2017-11-22 23:52:05 +00:00
|
|
|
!__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG)))
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
|
2018-02-17 12:07:09 +00:00
|
|
|
else
|
2021-05-23 15:25:42 +00:00
|
|
|
lock_profile_obtain_lock_success(&rw->lock_object, false, 0, 0,
|
2018-02-17 12:07:09 +00:00
|
|
|
file, line);
|
2017-02-08 19:28:46 +00:00
|
|
|
|
2007-03-21 21:20:51 +00:00
|
|
|
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
|
|
|
|
WITNESS_LOCK(&rw->lock_object, 0, file, line);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_INC(curthread);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-22 21:51:17 +00:00
|
|
|
void
|
|
|
|
__rw_rlock(volatile uintptr_t *c, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
|
|
|
__rw_rlock_int(rw LOCK_FILE_LINE_ARG);
|
|
|
|
}
|
|
|
|
|
2008-04-01 20:31:55 +00:00
|
|
|
int
|
2017-11-25 20:22:51 +00:00
|
|
|
__rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
|
2008-04-01 20:31:55 +00:00
|
|
|
{
|
|
|
|
uintptr_t x;
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return (1);
|
|
|
|
|
2012-12-22 09:37:34 +00:00
|
|
|
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
|
2012-09-12 22:10:53 +00:00
|
|
|
("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
|
|
|
|
curthread, rw->lock_object.lo_name, file, line));
|
|
|
|
|
2017-02-18 22:06:03 +00:00
|
|
|
x = rw->rw_lock;
|
2008-04-01 20:31:55 +00:00
|
|
|
for (;;) {
|
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
|
|
|
|
if (!(x & RW_LOCK_READ))
|
|
|
|
break;
|
2017-02-18 22:06:03 +00:00
|
|
|
if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
|
2008-04-01 20:31:55 +00:00
|
|
|
LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
|
|
|
|
line);
|
|
|
|
WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
|
2015-07-19 22:24:33 +00:00
|
|
|
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
|
|
|
|
rw, 0, 0, file, line, LOCKSTAT_READER);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_INC(curthread);
|
2008-04-01 20:31:55 +00:00
|
|
|
curthread->td_rw_rlocks++;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2017-11-25 20:22:51 +00:00
|
|
|
int
|
|
|
|
__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
2017-11-25 20:25:45 +00:00
|
|
|
return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG));
|
2017-11-25 20:22:51 +00:00
|
|
|
}
|
|
|
|
|
2017-02-08 19:28:46 +00:00
|
|
|
static bool __always_inline
|
|
|
|
__rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
for (;;) {
|
2018-05-22 07:16:39 +00:00
|
|
|
if (RW_READERS(*vp) > 1 || !(*vp & RW_LOCK_WAITERS)) {
|
2017-02-08 19:28:46 +00:00
|
|
|
if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
|
|
|
|
*vp - RW_ONE_READER)) {
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR4(KTR_LOCK,
|
|
|
|
"%s: %p succeeded %p -> %p",
|
2017-02-08 19:28:46 +00:00
|
|
|
__func__, rw, (void *)*vp,
|
|
|
|
(void *)(*vp - RW_ONE_READER));
|
|
|
|
td->td_rw_rlocks--;
|
|
|
|
return (true);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
continue;
|
2007-03-07 20:48:48 +00:00
|
|
|
}
|
2017-02-08 19:28:46 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __noinline
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
|
|
|
|
LOCK_FILE_LINE_ARG_DEF)
|
2017-02-08 19:28:46 +00:00
|
|
|
{
|
|
|
|
struct turnstile *ts;
|
2017-12-31 00:31:14 +00:00
|
|
|
uintptr_t setv, queue;
|
2017-02-08 19:28:46 +00:00
|
|
|
|
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2018-02-14 20:37:33 +00:00
|
|
|
if (__rw_runlock_try(rw, td, &v))
|
|
|
|
goto out_lockstat;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we know we have waiters and we think we are the
|
|
|
|
* last reader, so grab the turnstile lock.
|
|
|
|
*/
|
|
|
|
turnstile_chain_lock(&rw->lock_object);
|
|
|
|
v = RW_READ_VALUE(rw);
|
2017-02-08 19:28:46 +00:00
|
|
|
for (;;) {
|
|
|
|
if (__rw_runlock_try(rw, td, &v))
|
|
|
|
break;
|
|
|
|
|
2008-02-06 01:02:13 +00:00
|
|
|
MPASS(v & RW_LOCK_WAITERS);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to drop our lock leaving the lock in a unlocked
|
|
|
|
* state.
|
|
|
|
*
|
|
|
|
* If you wanted to do explicit lock handoff you'd have to
|
|
|
|
* do it here. You'd also want to use turnstile_signal()
|
|
|
|
* and you'd have to handle the race where a higher
|
|
|
|
* priority thread blocks on the write lock before the
|
|
|
|
* thread you wakeup actually runs and have the new thread
|
|
|
|
* "steal" the lock. For now it's a lot simpler to just
|
|
|
|
* wakeup all of the waiters.
|
|
|
|
*
|
|
|
|
* As above, if we fail, then another thread might have
|
|
|
|
* acquired a read lock, so drop the turnstile lock and
|
|
|
|
* restart.
|
|
|
|
*/
|
2017-12-31 00:31:14 +00:00
|
|
|
setv = RW_UNLOCKED;
|
|
|
|
queue = TS_SHARED_QUEUE;
|
2008-02-06 01:02:13 +00:00
|
|
|
if (v & RW_LOCK_WRITE_WAITERS) {
|
|
|
|
queue = TS_EXCLUSIVE_QUEUE;
|
2017-12-31 00:31:14 +00:00
|
|
|
setv |= (v & RW_LOCK_READ_WAITERS);
|
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
setv |= (v & RW_LOCK_WRITE_SPINNER);
|
2017-12-31 00:31:14 +00:00
|
|
|
if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv))
|
2018-02-14 20:37:33 +00:00
|
|
|
continue;
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
|
|
|
|
__func__, rw);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok. The lock is released and all that's left is to
|
|
|
|
* wake up the waiters. Note that the lock might not be
|
|
|
|
* free anymore, but in that case the writers will just
|
|
|
|
* block again if they run before the new lock holder(s)
|
|
|
|
* release the lock.
|
|
|
|
*/
|
2007-03-21 21:20:51 +00:00
|
|
|
ts = turnstile_lookup(&rw->lock_object);
|
2006-04-18 18:27:54 +00:00
|
|
|
MPASS(ts != NULL);
|
2008-02-06 01:02:13 +00:00
|
|
|
turnstile_broadcast(ts, queue);
|
2018-06-02 22:37:53 +00:00
|
|
|
turnstile_unpend(ts);
|
2017-02-08 19:28:46 +00:00
|
|
|
td->td_rw_rlocks--;
|
2006-01-27 23:13:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-02-14 20:37:33 +00:00
|
|
|
turnstile_chain_unlock(&rw->lock_object);
|
|
|
|
out_lockstat:
|
2015-07-19 22:24:33 +00:00
|
|
|
LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
|
2017-02-08 19:28:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-11-22 21:51:17 +00:00
|
|
|
_rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
|
2017-02-08 19:28:46 +00:00
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
uintptr_t v;
|
|
|
|
|
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
|
2017-02-08 19:28:46 +00:00
|
|
|
WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
|
|
|
|
LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
|
2018-02-17 12:07:09 +00:00
|
|
|
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||
|
2017-02-08 19:28:46 +00:00
|
|
|
!__rw_runlock_try(rw, td, &v)))
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
|
2018-02-17 12:07:09 +00:00
|
|
|
else
|
2021-05-23 15:25:42 +00:00
|
|
|
lock_profile_release_lock(&rw->lock_object, false);
|
2017-02-08 19:28:46 +00:00
|
|
|
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_DEC(curthread);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-22 21:51:17 +00:00
|
|
|
void
|
|
|
|
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
|
|
|
_rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG);
|
|
|
|
}
|
|
|
|
|
2018-05-22 07:16:39 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
static inline void
|
|
|
|
rw_drop_critical(uintptr_t v, bool *in_critical, int *extra_work)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (v & RW_LOCK_WRITE_SPINNER)
|
|
|
|
return;
|
|
|
|
if (*in_critical) {
|
|
|
|
critical_exit();
|
|
|
|
*in_critical = false;
|
|
|
|
(*extra_work)--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define rw_drop_critical(v, in_critical, extra_work) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
/*
|
|
|
|
* This function is called when we are unable to obtain a write lock on the
|
|
|
|
* first try. This means that at least one other thread holds either a
|
|
|
|
* read or write lock.
|
|
|
|
*/
|
|
|
|
void
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2017-11-22 21:51:17 +00:00
|
|
|
uintptr_t tid;
|
2012-11-03 15:57:37 +00:00
|
|
|
struct rwlock *rw;
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
struct turnstile *ts;
|
2017-11-25 20:08:11 +00:00
|
|
|
struct thread *owner;
|
2007-03-22 16:09:23 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
2008-02-06 01:02:13 +00:00
|
|
|
int spintries = 0;
|
2017-10-05 19:18:02 +00:00
|
|
|
int i, n;
|
2018-05-19 03:52:55 +00:00
|
|
|
enum { READERS, WRITER } sleep_reason = READERS;
|
2018-05-22 07:16:39 +00:00
|
|
|
bool in_critical = false;
|
2006-04-18 20:32:42 +00:00
|
|
|
#endif
|
2018-05-22 07:16:39 +00:00
|
|
|
uintptr_t setv;
|
2009-03-15 08:03:54 +00:00
|
|
|
#ifdef LOCK_PROFILING
|
|
|
|
uint64_t waittime = 0;
|
2007-07-20 08:43:42 +00:00
|
|
|
int contested = 0;
|
2009-03-15 08:03:54 +00:00
|
|
|
#endif
|
2016-08-02 00:15:08 +00:00
|
|
|
#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
|
2016-08-01 21:48:37 +00:00
|
|
|
struct lock_delay_arg lda;
|
|
|
|
#endif
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2016-07-31 12:11:55 +00:00
|
|
|
u_int sleep_cnt = 0;
|
2009-05-26 20:28:22 +00:00
|
|
|
int64_t sleep_time = 0;
|
2015-06-12 10:01:24 +00:00
|
|
|
int64_t all_time = 0;
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2017-10-20 03:32:42 +00:00
|
|
|
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
|
2018-11-13 21:29:56 +00:00
|
|
|
uintptr_t state = 0;
|
2018-03-17 19:26:33 +00:00
|
|
|
int doing_lockprof = 0;
|
2017-10-20 03:32:42 +00:00
|
|
|
#endif
|
2018-05-22 07:16:39 +00:00
|
|
|
int extra_work = 0;
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2017-11-22 21:51:17 +00:00
|
|
|
tid = (uintptr_t)curthread;
|
2018-03-17 19:26:33 +00:00
|
|
|
rw = rwlock2rw(c);
|
|
|
|
|
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) {
|
|
|
|
while (v == RW_UNLOCKED) {
|
|
|
|
if (_rw_write_lock_fetch(rw, &v, tid))
|
|
|
|
goto out_lockstat;
|
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
extra_work = 1;
|
2018-03-17 19:26:33 +00:00
|
|
|
doing_lockprof = 1;
|
|
|
|
all_time -= lockstat_nsecs(&rw->lock_object);
|
|
|
|
state = v;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef LOCK_PROFILING
|
2018-05-22 07:16:39 +00:00
|
|
|
extra_work = 1;
|
2018-03-17 19:26:33 +00:00
|
|
|
doing_lockprof = 1;
|
|
|
|
state = v;
|
|
|
|
#endif
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2017-02-06 09:40:14 +00:00
|
|
|
if (__predict_false(v == RW_UNLOCKED))
|
|
|
|
v = RW_READ_VALUE(rw);
|
2012-11-03 15:57:37 +00:00
|
|
|
|
2017-01-18 17:53:57 +00:00
|
|
|
if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
|
2009-06-02 13:03:35 +00:00
|
|
|
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
|
2007-06-26 21:31:56 +00:00
|
|
|
("%s: recursing but non-recursive rw %s @ %s:%d\n",
|
|
|
|
__func__, rw->lock_object.lo_name, file, line));
|
|
|
|
rw->rw_recurse++;
|
2017-02-07 17:04:31 +00:00
|
|
|
atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
|
2007-06-26 21:31:56 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
|
|
|
CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
|
2007-03-21 21:20:51 +00:00
|
|
|
rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2020-11-24 03:49:37 +00:00
|
|
|
#if defined(ADAPTIVE_RWLOCKS)
|
|
|
|
lock_delay_arg_init(&lda, &rw_delay);
|
|
|
|
#elif defined(KDTRACE_HOOKS)
|
|
|
|
lock_delay_arg_init_noadapt(&lda);
|
|
|
|
#endif
|
|
|
|
|
2017-11-17 02:22:51 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
PMC_SOFT_CALL( , , lock, failed);
|
|
|
|
#endif
|
2021-05-23 15:25:42 +00:00
|
|
|
lock_profile_obtain_lock_failed(&rw->lock_object, false,
|
2017-11-17 02:22:51 +00:00
|
|
|
&contested, &waittime);
|
|
|
|
|
2016-06-01 18:32:20 +00:00
|
|
|
for (;;) {
|
2017-01-18 17:53:57 +00:00
|
|
|
if (v == RW_UNLOCKED) {
|
2017-02-05 04:53:13 +00:00
|
|
|
if (_rw_write_lock_fetch(rw, &v, tid))
|
2017-01-18 17:53:57 +00:00
|
|
|
break;
|
|
|
|
continue;
|
|
|
|
}
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2016-08-01 21:48:37 +00:00
|
|
|
lda.spin_cnt++;
|
2012-03-28 20:58:30 +00:00
|
|
|
#endif
|
2017-11-17 02:22:51 +00:00
|
|
|
|
2007-11-26 22:37:35 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
2018-05-22 07:16:39 +00:00
|
|
|
if (v == (RW_LOCK_READ | RW_LOCK_WRITE_SPINNER)) {
|
|
|
|
if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
|
|
|
|
break;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2007-11-26 22:37:35 +00:00
|
|
|
/*
|
|
|
|
* If the lock is write locked and the owner is
|
|
|
|
* running on another CPU, spin until the owner stops
|
|
|
|
* running or the state of the lock changes.
|
|
|
|
*/
|
2018-03-04 21:38:30 +00:00
|
|
|
if (!(v & RW_LOCK_READ)) {
|
2018-05-22 07:16:39 +00:00
|
|
|
rw_drop_critical(v, &in_critical, &extra_work);
|
2018-03-04 21:38:30 +00:00
|
|
|
sleep_reason = WRITER;
|
|
|
|
owner = lv_rw_wowner(v);
|
|
|
|
if (!TD_IS_RUNNING(owner))
|
|
|
|
goto ts;
|
2007-11-26 22:37:35 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
|
|
|
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
|
|
|
|
__func__, rw, owner);
|
2014-11-04 16:35:56 +00:00
|
|
|
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
|
|
|
|
"spinning", "lockname:\"%s\"",
|
|
|
|
rw->lock_object.lo_name);
|
2017-01-18 17:53:57 +00:00
|
|
|
do {
|
2016-08-01 21:48:37 +00:00
|
|
|
lock_delay(&lda);
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
owner = lv_rw_wowner(v);
|
|
|
|
} while (owner != NULL && TD_IS_RUNNING(owner));
|
2014-11-04 16:35:56 +00:00
|
|
|
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
|
|
|
|
"running");
|
2007-11-26 22:37:35 +00:00
|
|
|
continue;
|
2018-03-04 21:38:30 +00:00
|
|
|
} else if (RW_READERS(v) > 0) {
|
|
|
|
sleep_reason = READERS;
|
|
|
|
if (spintries == rowner_retries)
|
|
|
|
goto ts;
|
2008-02-06 01:02:13 +00:00
|
|
|
if (!(v & RW_LOCK_WRITE_SPINNER)) {
|
2018-05-22 07:16:39 +00:00
|
|
|
if (!in_critical) {
|
|
|
|
critical_enter();
|
|
|
|
in_critical = true;
|
|
|
|
extra_work++;
|
|
|
|
}
|
2017-11-11 09:34:11 +00:00
|
|
|
if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
|
2008-02-06 01:02:13 +00:00
|
|
|
v | RW_LOCK_WRITE_SPINNER)) {
|
2018-05-22 07:16:39 +00:00
|
|
|
critical_exit();
|
|
|
|
in_critical = false;
|
|
|
|
extra_work--;
|
2008-02-06 01:02:13 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spintries++;
|
2014-11-04 16:35:56 +00:00
|
|
|
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
|
|
|
|
"spinning", "lockname:\"%s\"",
|
|
|
|
rw->lock_object.lo_name);
|
2018-05-22 07:16:39 +00:00
|
|
|
n = RW_READERS(v);
|
2017-10-05 19:18:02 +00:00
|
|
|
for (i = 0; i < rowner_loops; i += n) {
|
|
|
|
lock_delay_spin(n);
|
2017-10-05 13:01:18 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2018-05-22 07:16:39 +00:00
|
|
|
if (!(v & RW_LOCK_WRITE_SPINNER))
|
|
|
|
break;
|
|
|
|
if (!(v & RW_LOCK_READ))
|
|
|
|
break;
|
|
|
|
n = RW_READERS(v);
|
|
|
|
if (n == 0)
|
2017-10-05 13:01:18 +00:00
|
|
|
break;
|
2008-02-06 01:02:13 +00:00
|
|
|
}
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2018-03-04 21:38:30 +00:00
|
|
|
lda.spin_cnt += i;
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2018-03-04 21:38:30 +00:00
|
|
|
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
|
|
|
|
"running");
|
2017-12-31 02:31:01 +00:00
|
|
|
if (i < rowner_loops)
|
2008-02-06 01:02:13 +00:00
|
|
|
continue;
|
|
|
|
}
|
2018-03-04 21:38:30 +00:00
|
|
|
ts:
|
2007-11-26 22:37:35 +00:00
|
|
|
#endif
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
ts = turnstile_trywait(&rw->lock_object);
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2017-11-25 20:10:33 +00:00
|
|
|
retry_ts:
|
2017-11-25 20:08:11 +00:00
|
|
|
owner = lv_rw_wowner(v);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2007-11-26 22:37:35 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
/*
|
2009-05-29 13:56:34 +00:00
|
|
|
* The current lock owner might have started executing
|
|
|
|
* on another CPU (or the lock could have changed
|
|
|
|
* owners) while we were waiting on the turnstile
|
|
|
|
* chain lock. If so, drop the turnstile lock and try
|
|
|
|
* again.
|
2007-11-26 22:37:35 +00:00
|
|
|
*/
|
2017-11-25 20:08:11 +00:00
|
|
|
if (owner != NULL) {
|
2007-11-26 22:37:35 +00:00
|
|
|
if (TD_IS_RUNNING(owner)) {
|
|
|
|
turnstile_cancel(ts);
|
2018-05-22 07:16:39 +00:00
|
|
|
rw_drop_critical(v, &in_critical, &extra_work);
|
2007-11-26 22:37:35 +00:00
|
|
|
continue;
|
|
|
|
}
|
2018-03-04 21:38:30 +00:00
|
|
|
} else if (RW_READERS(v) > 0 && sleep_reason == WRITER) {
|
2017-12-31 00:47:04 +00:00
|
|
|
turnstile_cancel(ts);
|
2018-05-22 07:16:39 +00:00
|
|
|
rw_drop_critical(v, &in_critical, &extra_work);
|
2017-12-31 00:47:04 +00:00
|
|
|
continue;
|
2007-11-26 22:37:35 +00:00
|
|
|
}
|
|
|
|
#endif
|
2006-01-27 23:13:26 +00:00
|
|
|
/*
|
2008-05-27 00:27:50 +00:00
|
|
|
* Check for the waiters flags about this rwlock.
|
|
|
|
* If the lock was released, without maintain any pending
|
|
|
|
* waiters queue, simply try to acquire it.
|
|
|
|
* If a pending waiters queue is present, claim the lock
|
|
|
|
* ownership and maintain the pending queue.
|
2006-01-27 23:13:26 +00:00
|
|
|
*/
|
2018-05-22 07:16:39 +00:00
|
|
|
setv = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
|
|
|
|
if ((v & ~setv) == RW_UNLOCKED) {
|
|
|
|
setv &= ~RW_LOCK_WRITE_SPINNER;
|
|
|
|
if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | setv)) {
|
|
|
|
if (setv)
|
2008-02-06 01:02:13 +00:00
|
|
|
turnstile_claim(ts);
|
|
|
|
else
|
|
|
|
turnstile_cancel(ts);
|
2006-01-27 23:13:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-11-25 20:10:33 +00:00
|
|
|
goto retry_ts;
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
|
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
if (in_critical) {
|
|
|
|
if ((v & RW_LOCK_WRITE_SPINNER) ||
|
|
|
|
!((v & RW_LOCK_WRITE_WAITERS))) {
|
|
|
|
setv = v & ~RW_LOCK_WRITE_SPINNER;
|
|
|
|
setv |= RW_LOCK_WRITE_WAITERS;
|
|
|
|
if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, setv))
|
|
|
|
goto retry_ts;
|
|
|
|
}
|
|
|
|
critical_exit();
|
|
|
|
in_critical = false;
|
|
|
|
extra_work--;
|
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
|
|
|
|
* set it. If we fail to set it, then loop back and try
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (!(v & RW_LOCK_WRITE_WAITERS)) {
|
|
|
|
if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
|
|
|
|
v | RW_LOCK_WRITE_WAITERS))
|
|
|
|
goto retry_ts;
|
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
|
|
|
CTR2(KTR_LOCK, "%s: %p set write waiters flag",
|
|
|
|
__func__, rw);
|
|
|
|
}
|
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
#endif
|
2006-01-27 23:13:26 +00:00
|
|
|
/*
|
|
|
|
* We were unable to acquire the lock and the write waiters
|
|
|
|
* flag is set, so we must block on the turnstile.
|
|
|
|
*/
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
|
|
|
|
rw);
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2015-07-18 00:57:30 +00:00
|
|
|
sleep_time -= lockstat_nsecs(&rw->lock_object);
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2017-11-25 20:08:11 +00:00
|
|
|
MPASS(owner == rw_owner(rw));
|
|
|
|
turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2015-07-18 00:57:30 +00:00
|
|
|
sleep_time += lockstat_nsecs(&rw->lock_object);
|
2009-05-26 20:28:22 +00:00
|
|
|
sleep_cnt++;
|
|
|
|
#endif
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
|
|
|
|
__func__, rw);
|
2008-02-06 01:02:13 +00:00
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
spintries = 0;
|
|
|
|
#endif
|
2017-01-18 17:53:57 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
2018-05-22 07:16:39 +00:00
|
|
|
if (__predict_true(!extra_work))
|
|
|
|
return;
|
|
|
|
#ifdef ADAPTIVE_RWLOCKS
|
|
|
|
if (in_critical)
|
|
|
|
critical_exit();
|
|
|
|
#endif
|
2017-10-20 03:32:42 +00:00
|
|
|
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
|
|
|
|
if (__predict_true(!doing_lockprof))
|
|
|
|
return;
|
|
|
|
#endif
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2015-07-18 00:57:30 +00:00
|
|
|
all_time += lockstat_nsecs(&rw->lock_object);
|
2009-05-26 20:28:22 +00:00
|
|
|
if (sleep_time)
|
2015-07-19 22:14:09 +00:00
|
|
|
LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
|
2015-06-12 10:01:24 +00:00
|
|
|
LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
|
|
|
|
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
|
2009-05-26 20:28:22 +00:00
|
|
|
|
2015-06-12 10:01:24 +00:00
|
|
|
/* Record only the loops spinning and not sleeping. */
|
2016-08-01 21:48:37 +00:00
|
|
|
if (lda.spin_cnt > sleep_cnt)
|
2015-07-19 22:14:09 +00:00
|
|
|
LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
|
2016-07-30 22:21:48 +00:00
|
|
|
LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
|
2015-06-12 10:01:24 +00:00
|
|
|
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
|
2018-03-17 19:26:33 +00:00
|
|
|
out_lockstat:
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2015-07-19 22:24:33 +00:00
|
|
|
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
|
|
|
|
waittime, file, line, LOCKSTAT_WRITER);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-02-09 08:19:30 +00:00
|
|
|
* This function is called if lockstat is active or the first try at releasing
|
|
|
|
* a write lock failed. The latter means that the lock is recursed or one of
|
|
|
|
* the 2 waiter bits must be set indicating that at least one thread is waiting
|
|
|
|
* on this lock.
|
2006-01-27 23:13:26 +00:00
|
|
|
*/
|
|
|
|
void
|
2017-11-22 22:04:04 +00:00
|
|
|
__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2012-11-03 15:57:37 +00:00
|
|
|
struct rwlock *rw;
|
2006-01-27 23:13:26 +00:00
|
|
|
struct turnstile *ts;
|
2017-11-22 22:04:04 +00:00
|
|
|
uintptr_t tid, setv;
|
2006-01-27 23:13:26 +00:00
|
|
|
int queue;
|
|
|
|
|
2017-11-22 22:04:04 +00:00
|
|
|
tid = (uintptr_t)curthread;
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2012-11-03 15:57:37 +00:00
|
|
|
rw = rwlock2rw(c);
|
2017-11-22 22:04:04 +00:00
|
|
|
if (__predict_false(v == tid))
|
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
|
2017-02-09 08:19:30 +00:00
|
|
|
if (v & RW_LOCK_WRITER_RECURSED) {
|
2017-02-07 17:04:31 +00:00
|
|
|
if (--(rw->rw_recurse) == 0)
|
|
|
|
atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
|
2017-02-09 08:19:30 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
|
|
|
CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
|
2007-06-26 21:31:56 +00:00
|
|
|
return;
|
2017-02-07 17:04:31 +00:00
|
|
|
}
|
2007-06-26 21:31:56 +00:00
|
|
|
|
2017-02-09 08:19:30 +00:00
|
|
|
LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER);
|
|
|
|
if (v == tid && _rw_write_unlock(rw, tid))
|
|
|
|
return;
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
|
|
|
|
("%s: neither of the waiter flags are set", __func__));
|
|
|
|
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
|
|
|
|
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
turnstile_chain_lock(&rw->lock_object);
|
2006-01-27 23:13:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the same algo as sx locks for now. Prefer waking up shared
|
|
|
|
* waiters if we have any over writers. This is probably not ideal.
|
|
|
|
*
|
|
|
|
* 'v' is the value we are going to write back to rw_lock. If we
|
|
|
|
* have waiters on both queues, we need to preserve the state of
|
|
|
|
* the waiter flag for the queue we don't wake up. For now this is
|
|
|
|
* hardcoded for the algorithm mentioned above.
|
|
|
|
*
|
|
|
|
* In the case of both readers and writers waiting we wakeup the
|
|
|
|
* readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a
|
|
|
|
* new writer comes in before a reader it will claim the lock up
|
|
|
|
* above. There is probably a potential priority inversion in
|
|
|
|
* there that could be worked around either by waking both queues
|
|
|
|
* of waiters or doing some complicated lock handoff gymnastics.
|
|
|
|
*/
|
2017-11-17 02:26:15 +00:00
|
|
|
setv = RW_UNLOCKED;
|
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
queue = TS_SHARED_QUEUE;
|
|
|
|
if (v & RW_LOCK_WRITE_WAITERS) {
|
2006-01-27 23:13:26 +00:00
|
|
|
queue = TS_EXCLUSIVE_QUEUE;
|
2017-11-17 02:26:15 +00:00
|
|
|
setv |= (v & RW_LOCK_READ_WAITERS);
|
|
|
|
}
|
|
|
|
atomic_store_rel_ptr(&rw->rw_lock, setv);
|
2006-04-18 18:27:54 +00:00
|
|
|
|
|
|
|
/* Wake up all waiters for the specific queue. */
|
2007-03-21 21:20:51 +00:00
|
|
|
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
2006-01-27 23:13:26 +00:00
|
|
|
CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
|
|
|
|
queue == TS_SHARED_QUEUE ? "read" : "write");
|
2017-11-17 02:26:15 +00:00
|
|
|
|
|
|
|
ts = turnstile_lookup(&rw->lock_object);
|
|
|
|
MPASS(ts != NULL);
|
2006-01-27 23:13:26 +00:00
|
|
|
turnstile_broadcast(ts, queue);
|
2018-06-02 22:37:53 +00:00
|
|
|
turnstile_unpend(ts);
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
turnstile_chain_unlock(&rw->lock_object);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
|
2006-04-19 21:06:52 +00:00
|
|
|
/*
|
|
|
|
* Attempt to do a non-blocking upgrade from a read lock to a write
|
|
|
|
* lock. This will only succeed if this thread holds a single read
|
|
|
|
* lock. Returns true if the upgrade succeeded and false otherwise.
|
|
|
|
*/
|
|
|
|
int
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
|
2006-04-19 21:06:52 +00:00
|
|
|
{
|
2018-04-10 22:32:31 +00:00
|
|
|
uintptr_t v, setv, tid;
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
struct turnstile *ts;
|
2006-04-19 21:06:52 +00:00
|
|
|
int success;
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return (1);
|
|
|
|
|
2007-05-08 21:51:37 +00:00
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
|
2006-04-19 21:06:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to switch from one reader to a writer. If there
|
|
|
|
* are any write waiters, then we will have to lock the
|
|
|
|
* turnstile first to prevent races with another writer
|
|
|
|
* calling turnstile_wait() before we have claimed this
|
|
|
|
* turnstile. So, do the simple case of no waiters first.
|
|
|
|
*/
|
|
|
|
tid = (uintptr_t)curthread;
|
2008-02-06 01:02:13 +00:00
|
|
|
success = 0;
|
2018-04-10 22:32:31 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
2008-02-06 01:02:13 +00:00
|
|
|
for (;;) {
|
|
|
|
if (RW_READERS(v) > 1)
|
|
|
|
break;
|
|
|
|
if (!(v & RW_LOCK_WAITERS)) {
|
2018-04-10 22:32:31 +00:00
|
|
|
success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid);
|
2008-02-06 01:02:13 +00:00
|
|
|
if (!success)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
2006-04-19 21:06:52 +00:00
|
|
|
|
2008-02-06 01:02:13 +00:00
|
|
|
/*
|
|
|
|
* Ok, we think we have waiters, so lock the turnstile.
|
|
|
|
*/
|
|
|
|
ts = turnstile_trywait(&rw->lock_object);
|
2018-04-10 22:32:31 +00:00
|
|
|
v = RW_READ_VALUE(rw);
|
|
|
|
retry_ts:
|
2008-02-06 01:02:13 +00:00
|
|
|
if (RW_READERS(v) > 1) {
|
|
|
|
turnstile_cancel(ts);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Try to switch from one reader to a writer again. This time
|
|
|
|
* we honor the current state of the waiters flags.
|
|
|
|
* If we obtain the lock with the flags set, then claim
|
|
|
|
* ownership of the turnstile.
|
|
|
|
*/
|
2018-04-10 22:32:31 +00:00
|
|
|
setv = tid | (v & RW_LOCK_WAITERS);
|
|
|
|
success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv);
|
2008-02-06 01:02:13 +00:00
|
|
|
if (success) {
|
2018-04-10 22:32:31 +00:00
|
|
|
if (v & RW_LOCK_WAITERS)
|
2008-02-06 01:02:13 +00:00
|
|
|
turnstile_claim(ts);
|
|
|
|
else
|
|
|
|
turnstile_cancel(ts);
|
|
|
|
break;
|
|
|
|
}
|
2018-04-10 22:32:31 +00:00
|
|
|
goto retry_ts;
|
2008-02-06 01:02:13 +00:00
|
|
|
}
|
2007-03-21 21:20:51 +00:00
|
|
|
LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
|
2008-02-06 01:02:13 +00:00
|
|
|
if (success) {
|
|
|
|
curthread->td_rw_rlocks--;
|
2007-03-21 21:20:51 +00:00
|
|
|
WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
|
2006-04-19 21:06:52 +00:00
|
|
|
file, line);
|
2015-07-19 22:14:09 +00:00
|
|
|
LOCKSTAT_RECORD0(rw__upgrade, rw);
|
2008-02-06 01:02:13 +00:00
|
|
|
}
|
2006-04-19 21:06:52 +00:00
|
|
|
return (success);
|
|
|
|
}
|
|
|
|
|
2017-11-22 21:51:17 +00:00
|
|
|
int
|
|
|
|
__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
|
|
|
return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG));
|
|
|
|
}
|
|
|
|
|
2006-04-19 21:06:52 +00:00
|
|
|
/*
|
|
|
|
* Downgrade a write lock into a single read lock.
|
|
|
|
*/
|
|
|
|
void
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
|
2006-04-19 21:06:52 +00:00
|
|
|
{
|
|
|
|
struct turnstile *ts;
|
|
|
|
uintptr_t tid, v;
|
2008-02-06 01:02:13 +00:00
|
|
|
int rwait, wwait;
|
2006-04-19 21:06:52 +00:00
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2007-05-08 21:51:37 +00:00
|
|
|
KASSERT(rw->rw_lock != RW_DESTROYED,
|
|
|
|
("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
|
2017-11-22 21:51:17 +00:00
|
|
|
__rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line);
|
2007-06-26 21:31:56 +00:00
|
|
|
#ifndef INVARIANTS
|
|
|
|
if (rw_recursed(rw))
|
|
|
|
panic("downgrade of a recursed lock");
|
|
|
|
#endif
|
2006-04-19 21:06:52 +00:00
|
|
|
|
2007-03-21 21:20:51 +00:00
|
|
|
WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
|
2006-04-19 21:06:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert from a writer to a single reader. First we handle
|
|
|
|
* the easy case with no waiters. If there are any waiters, we
|
2008-02-06 01:02:13 +00:00
|
|
|
* lock the turnstile and "disown" the lock.
|
2006-04-19 21:06:52 +00:00
|
|
|
*/
|
|
|
|
tid = (uintptr_t)curthread;
|
|
|
|
if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we think we have waiters, so lock the turnstile so we can
|
|
|
|
* read the waiter flags without any races.
|
|
|
|
*/
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
turnstile_chain_lock(&rw->lock_object);
|
2008-02-06 01:02:13 +00:00
|
|
|
v = rw->rw_lock & RW_LOCK_WAITERS;
|
|
|
|
rwait = v & RW_LOCK_READ_WAITERS;
|
|
|
|
wwait = v & RW_LOCK_WRITE_WAITERS;
|
|
|
|
MPASS(rwait | wwait);
|
2006-04-19 21:06:52 +00:00
|
|
|
|
|
|
|
/*
|
2008-02-06 01:02:13 +00:00
|
|
|
* Downgrade from a write lock while preserving waiters flag
|
|
|
|
* and give up ownership of the turnstile.
|
2006-04-19 21:06:52 +00:00
|
|
|
*/
|
2007-03-21 21:20:51 +00:00
|
|
|
ts = turnstile_lookup(&rw->lock_object);
|
2006-04-19 21:06:52 +00:00
|
|
|
MPASS(ts != NULL);
|
2008-02-06 01:02:13 +00:00
|
|
|
if (!wwait)
|
|
|
|
v &= ~RW_LOCK_READ_WAITERS;
|
|
|
|
atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
|
|
|
|
/*
|
|
|
|
* Wake other readers if there are no writers pending. Otherwise they
|
|
|
|
* won't be able to acquire the lock anyway.
|
|
|
|
*/
|
|
|
|
if (rwait && !wwait) {
|
2006-04-19 21:06:52 +00:00
|
|
|
turnstile_broadcast(ts, TS_SHARED_QUEUE);
|
2018-06-02 22:37:53 +00:00
|
|
|
turnstile_unpend(ts);
|
2008-02-06 01:02:13 +00:00
|
|
|
} else
|
2006-04-19 21:06:52 +00:00
|
|
|
turnstile_disown(ts);
|
Commit 3/14 of sched_lock decomposition.
- Add a per-turnstile spinlock to solve potential priority propagation
deadlocks that are possible with thread_lock().
- The turnstile lock order is defined as the exact opposite of the
lock order used with the sleep locks they represent. This allows us
to walk in reverse order in priority_propagate and this is the only
place we wish to multiply acquire turnstile locks.
- Use the turnstile_chain lock to protect assigning mutexes to turnstiles.
- Change the turnstile interface to pass back turnstile pointers to the
consumers. This allows us to reduce some locking and makes it easier
to cancel turnstile assignment while the turnstile chain lock is held.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:51:44 +00:00
|
|
|
turnstile_chain_unlock(&rw->lock_object);
|
2006-04-19 21:06:52 +00:00
|
|
|
out:
|
2008-02-06 01:02:13 +00:00
|
|
|
curthread->td_rw_rlocks++;
|
2007-03-21 21:20:51 +00:00
|
|
|
LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
|
2015-07-19 22:14:09 +00:00
|
|
|
LOCKSTAT_RECORD0(rw__downgrade, rw);
|
2006-04-19 21:06:52 +00:00
|
|
|
}
|
|
|
|
|
2017-11-22 21:51:17 +00:00
|
|
|
void
|
|
|
|
__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
|
|
|
|
{
|
|
|
|
struct rwlock *rw;
|
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
|
|
|
__rw_downgrade_int(rw LOCK_FILE_LINE_ARG);
|
|
|
|
}
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
#ifdef INVARIANT_SUPPORT
|
2006-02-01 04:18:07 +00:00
|
|
|
#ifndef INVARIANTS
|
2012-11-03 15:57:37 +00:00
|
|
|
#undef __rw_assert
|
2006-01-27 23:13:26 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the non-WITNESS case, rw_assert() can only detect that at least
|
|
|
|
* *some* thread owns an rlock, but it cannot guarantee that *this*
|
|
|
|
* thread owns an rlock.
|
|
|
|
*/
|
|
|
|
void
|
2012-11-03 15:57:37 +00:00
|
|
|
__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2012-11-03 15:57:37 +00:00
|
|
|
const struct rwlock *rw;
|
2006-01-27 23:13:26 +00:00
|
|
|
|
2018-11-13 20:48:05 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
2006-01-27 23:13:26 +00:00
|
|
|
return;
|
2012-11-03 15:57:37 +00:00
|
|
|
|
|
|
|
rw = rwlock2rw(c);
|
|
|
|
|
2006-01-27 23:13:26 +00:00
|
|
|
switch (what) {
|
|
|
|
case RA_LOCKED:
|
2007-06-26 21:31:56 +00:00
|
|
|
case RA_LOCKED | RA_RECURSED:
|
|
|
|
case RA_LOCKED | RA_NOTRECURSED:
|
2006-01-27 23:13:26 +00:00
|
|
|
case RA_RLOCKED:
|
2013-06-03 17:38:57 +00:00
|
|
|
case RA_RLOCKED | RA_RECURSED:
|
|
|
|
case RA_RLOCKED | RA_NOTRECURSED:
|
2006-01-27 23:13:26 +00:00
|
|
|
#ifdef WITNESS
|
2007-03-21 21:20:51 +00:00
|
|
|
witness_assert(&rw->lock_object, what, file, line);
|
2006-01-27 23:13:26 +00:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* If some other thread has a write lock or we have one
|
|
|
|
* and are asserting a read lock, fail. Also, if no one
|
|
|
|
* has a lock at all, fail.
|
|
|
|
*/
|
2006-01-30 19:25:52 +00:00
|
|
|
if (rw->rw_lock == RW_UNLOCKED ||
|
2013-06-03 17:38:57 +00:00
|
|
|
(!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
|
2006-04-17 21:11:01 +00:00
|
|
|
rw_wowner(rw) != curthread)))
|
2006-01-27 23:13:26 +00:00
|
|
|
panic("Lock %s not %slocked @ %s:%d\n",
|
2013-06-03 17:38:57 +00:00
|
|
|
rw->lock_object.lo_name, (what & RA_RLOCKED) ?
|
2006-01-27 23:13:26 +00:00
|
|
|
"read " : "", file, line);
|
2007-06-26 21:31:56 +00:00
|
|
|
|
2013-06-03 17:38:57 +00:00
|
|
|
if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
|
2007-06-26 21:31:56 +00:00
|
|
|
if (rw_recursed(rw)) {
|
|
|
|
if (what & RA_NOTRECURSED)
|
|
|
|
panic("Lock %s recursed @ %s:%d\n",
|
|
|
|
rw->lock_object.lo_name, file,
|
|
|
|
line);
|
|
|
|
} else if (what & RA_RECURSED)
|
|
|
|
panic("Lock %s not recursed @ %s:%d\n",
|
|
|
|
rw->lock_object.lo_name, file, line);
|
|
|
|
}
|
2006-01-27 23:13:26 +00:00
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
case RA_WLOCKED:
|
2007-06-26 21:31:56 +00:00
|
|
|
case RA_WLOCKED | RA_RECURSED:
|
|
|
|
case RA_WLOCKED | RA_NOTRECURSED:
|
2006-04-17 21:11:01 +00:00
|
|
|
if (rw_wowner(rw) != curthread)
|
2006-01-27 23:13:26 +00:00
|
|
|
panic("Lock %s not exclusively locked @ %s:%d\n",
|
2007-03-21 21:20:51 +00:00
|
|
|
rw->lock_object.lo_name, file, line);
|
2007-06-26 21:31:56 +00:00
|
|
|
if (rw_recursed(rw)) {
|
|
|
|
if (what & RA_NOTRECURSED)
|
|
|
|
panic("Lock %s recursed @ %s:%d\n",
|
|
|
|
rw->lock_object.lo_name, file, line);
|
|
|
|
} else if (what & RA_RECURSED)
|
|
|
|
panic("Lock %s not recursed @ %s:%d\n",
|
|
|
|
rw->lock_object.lo_name, file, line);
|
2006-01-27 23:13:26 +00:00
|
|
|
break;
|
|
|
|
case RA_UNLOCKED:
|
|
|
|
#ifdef WITNESS
|
2007-03-21 21:20:51 +00:00
|
|
|
witness_assert(&rw->lock_object, what, file, line);
|
2006-01-27 23:13:26 +00:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* If we hold a write lock fail. We can't reliably check
|
|
|
|
* to see if we hold a read lock or not.
|
|
|
|
*/
|
2006-04-17 21:11:01 +00:00
|
|
|
if (rw_wowner(rw) == curthread)
|
2006-01-27 23:13:26 +00:00
|
|
|
panic("Lock %s exclusively locked @ %s:%d\n",
|
2007-03-21 21:20:51 +00:00
|
|
|
rw->lock_object.lo_name, file, line);
|
2006-01-27 23:13:26 +00:00
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
|
|
|
|
line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* INVARIANT_SUPPORT */
|
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
void
|
2011-11-16 21:51:17 +00:00
|
|
|
db_show_rwlock(const struct lock_object *lock)
|
2006-01-27 23:13:26 +00:00
|
|
|
{
|
2011-11-16 21:51:17 +00:00
|
|
|
const struct rwlock *rw;
|
2006-01-27 23:13:26 +00:00
|
|
|
struct thread *td;
|
|
|
|
|
2011-11-16 21:51:17 +00:00
|
|
|
rw = (const struct rwlock *)lock;
|
2006-01-27 23:13:26 +00:00
|
|
|
|
|
|
|
db_printf(" state: ");
|
|
|
|
if (rw->rw_lock == RW_UNLOCKED)
|
|
|
|
db_printf("UNLOCKED\n");
|
2007-05-08 21:51:37 +00:00
|
|
|
else if (rw->rw_lock == RW_DESTROYED) {
|
|
|
|
db_printf("DESTROYED\n");
|
|
|
|
return;
|
|
|
|
} else if (rw->rw_lock & RW_LOCK_READ)
|
2007-03-13 16:51:27 +00:00
|
|
|
db_printf("RLOCK: %ju locks\n",
|
|
|
|
(uintmax_t)(RW_READERS(rw->rw_lock)));
|
2006-01-27 23:13:26 +00:00
|
|
|
else {
|
2006-04-17 21:11:01 +00:00
|
|
|
td = rw_wowner(rw);
|
2006-01-27 23:13:26 +00:00
|
|
|
db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
|
2007-11-14 06:21:24 +00:00
|
|
|
td->td_tid, td->td_proc->p_pid, td->td_name);
|
2007-06-26 21:31:56 +00:00
|
|
|
if (rw_recursed(rw))
|
|
|
|
db_printf(" recursed: %u\n", rw->rw_recurse);
|
2006-01-27 23:13:26 +00:00
|
|
|
}
|
|
|
|
db_printf(" waiters: ");
|
|
|
|
switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
|
|
|
|
case RW_LOCK_READ_WAITERS:
|
|
|
|
db_printf("readers\n");
|
|
|
|
break;
|
|
|
|
case RW_LOCK_WRITE_WAITERS:
|
|
|
|
db_printf("writers\n");
|
|
|
|
break;
|
|
|
|
case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
|
2007-03-12 20:10:29 +00:00
|
|
|
db_printf("readers and writers\n");
|
2006-01-27 23:13:26 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
db_printf("none\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|