2007-11-08 14:47:55 +00:00
|
|
|
/*-
|
2017-11-20 19:43:44 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
2007-11-08 14:47:55 +00:00
|
|
|
* Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Machine independent bits of reader/writer lock implementation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_ddb.h"
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
|
|
|
|
#include <sys/kernel.h>
|
2012-12-22 09:37:34 +00:00
|
|
|
#include <sys/kdb.h>
|
2007-11-08 14:47:55 +00:00
|
|
|
#include <sys/ktr.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/rmlock.h>
|
|
|
|
#include <sys/sched.h>
|
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <sys/turnstile.h>
|
|
|
|
#include <sys/lock_profile.h>
|
|
|
|
#include <machine/cpu.h>
|
2019-12-27 11:19:57 +00:00
|
|
|
#include <vm/uma.h>
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
#endif
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
/*
|
|
|
|
* A cookie to mark destroyed rmlocks. This is stored in the head of
|
|
|
|
* rm_activeReaders.
|
|
|
|
*/
|
|
|
|
#define RM_DESTROYED ((void *)0xdead)
|
|
|
|
|
|
|
|
#define rm_destroyed(rm) \
|
|
|
|
(LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
#define RMPF_ONQUEUE 1
|
|
|
|
#define RMPF_SIGNAL 2
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
#ifndef INVARIANTS
|
|
|
|
#define _rm_assert(c, what, file, line)
|
|
|
|
#endif
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2011-11-16 21:51:17 +00:00
|
|
|
static void assert_rm(const struct lock_object *lock, int what);
|
2013-06-25 18:44:15 +00:00
|
|
|
#ifdef DDB
|
|
|
|
static void db_show_rm(const struct lock_object *lock);
|
|
|
|
#endif
|
2013-09-20 23:06:21 +00:00
|
|
|
static void lock_rm(struct lock_object *lock, uintptr_t how);
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
2011-11-16 21:51:17 +00:00
|
|
|
static int owner_rm(const struct lock_object *lock, struct thread **owner);
|
2009-05-26 20:28:22 +00:00
|
|
|
#endif
|
2013-09-20 23:06:21 +00:00
|
|
|
static uintptr_t unlock_rm(struct lock_object *lock);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
struct lock_class lock_class_rm = {
|
|
|
|
.lc_name = "rm",
|
|
|
|
.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
|
2007-11-18 14:43:53 +00:00
|
|
|
.lc_assert = assert_rm,
|
2007-11-08 14:47:55 +00:00
|
|
|
#ifdef DDB
|
2013-06-25 18:44:15 +00:00
|
|
|
.lc_ddb_show = db_show_rm,
|
2007-11-08 14:47:55 +00:00
|
|
|
#endif
|
2013-06-25 18:44:15 +00:00
|
|
|
.lc_lock = lock_rm,
|
|
|
|
.lc_unlock = unlock_rm,
|
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
.lc_owner = owner_rm,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
struct lock_class lock_class_rm_sleepable = {
|
|
|
|
.lc_name = "sleepable rm",
|
|
|
|
.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
|
|
|
|
.lc_assert = assert_rm,
|
|
|
|
#ifdef DDB
|
|
|
|
.lc_ddb_show = db_show_rm,
|
2007-11-08 14:47:55 +00:00
|
|
|
#endif
|
|
|
|
.lc_lock = lock_rm,
|
|
|
|
.lc_unlock = unlock_rm,
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
.lc_owner = owner_rm,
|
|
|
|
#endif
|
2007-11-08 14:47:55 +00:00
|
|
|
};
|
|
|
|
|
2007-11-18 14:43:53 +00:00
|
|
|
static void
|
2011-11-16 21:51:17 +00:00
|
|
|
assert_rm(const struct lock_object *lock, int what)
|
2007-11-18 14:43:53 +00:00
|
|
|
{
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
rm_assert((const struct rmlock *)lock, what);
|
2007-11-18 14:43:53 +00:00
|
|
|
}
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
static void
|
2013-09-20 23:06:21 +00:00
|
|
|
lock_rm(struct lock_object *lock, uintptr_t how)
|
2009-04-26 21:16:03 +00:00
|
|
|
{
|
2013-06-25 18:44:15 +00:00
|
|
|
struct rmlock *rm;
|
2013-09-20 23:06:21 +00:00
|
|
|
struct rm_priotracker *tracker;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
rm = (struct rmlock *)lock;
|
2013-09-20 23:06:21 +00:00
|
|
|
if (how == 0)
|
2013-06-25 18:44:15 +00:00
|
|
|
rm_wlock(rm);
|
2013-09-20 23:06:21 +00:00
|
|
|
else {
|
|
|
|
tracker = (struct rm_priotracker *)how;
|
|
|
|
rm_rlock(rm, tracker);
|
|
|
|
}
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2013-09-20 23:06:21 +00:00
|
|
|
static uintptr_t
|
2009-04-26 21:16:03 +00:00
|
|
|
unlock_rm(struct lock_object *lock)
|
|
|
|
{
|
2013-09-20 23:06:21 +00:00
|
|
|
struct thread *td;
|
|
|
|
struct pcpu *pc;
|
2013-06-25 18:44:15 +00:00
|
|
|
struct rmlock *rm;
|
2013-09-20 23:06:21 +00:00
|
|
|
struct rm_queue *queue;
|
|
|
|
struct rm_priotracker *tracker;
|
|
|
|
uintptr_t how;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
rm = (struct rmlock *)lock;
|
2013-09-20 23:06:21 +00:00
|
|
|
tracker = NULL;
|
|
|
|
how = 0;
|
|
|
|
rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
|
|
|
|
if (rm_wowned(rm))
|
|
|
|
rm_wunlock(rm);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Find the right rm_priotracker structure for curthread.
|
|
|
|
* The guarantee about its uniqueness is given by the fact
|
|
|
|
* we already asserted the lock wasn't recursively acquired.
|
|
|
|
*/
|
|
|
|
critical_enter();
|
|
|
|
td = curthread;
|
2017-02-19 02:03:09 +00:00
|
|
|
pc = get_pcpu();
|
2013-09-20 23:06:21 +00:00
|
|
|
for (queue = pc->pc_rm_queue.rmq_next;
|
|
|
|
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
|
|
|
|
tracker = (struct rm_priotracker *)queue;
|
|
|
|
if ((tracker->rmp_rmlock == rm) &&
|
|
|
|
(tracker->rmp_thread == td)) {
|
|
|
|
how = (uintptr_t)tracker;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
KASSERT(tracker != NULL,
|
|
|
|
("rm_priotracker is non-NULL when lock held in read mode"));
|
|
|
|
critical_exit();
|
|
|
|
rm_runlock(rm, tracker);
|
|
|
|
}
|
|
|
|
return (how);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2009-05-26 20:28:22 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
static int
|
2011-11-16 21:51:17 +00:00
|
|
|
owner_rm(const struct lock_object *lock, struct thread **owner)
|
2009-05-26 20:28:22 +00:00
|
|
|
{
|
2013-06-25 18:44:15 +00:00
|
|
|
const struct rmlock *rm;
|
|
|
|
struct lock_class *lc;
|
2009-05-26 20:28:22 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
rm = (const struct rmlock *)lock;
|
|
|
|
lc = LOCK_CLASS(&rm->rm_wlock_object);
|
|
|
|
return (lc->lc_owner(&rm->rm_wlock_object, owner));
|
2009-05-26 20:28:22 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
static struct mtx rm_spinlock;
|
|
|
|
|
|
|
|
MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
|
|
|
|
|
|
|
|
/*
|
2009-12-25 01:16:24 +00:00
|
|
|
* Add or remove tracker from per-cpu list.
|
2009-04-26 21:16:03 +00:00
|
|
|
*
|
2009-12-25 01:16:24 +00:00
|
|
|
* The per-cpu list can be traversed at any time in forward direction from an
|
2009-04-26 21:16:03 +00:00
|
|
|
* interrupt on the *local* cpu.
|
2007-11-08 14:47:55 +00:00
|
|
|
*/
|
2009-04-26 21:16:03 +00:00
|
|
|
static void inline
|
|
|
|
rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
|
|
|
|
{
|
|
|
|
struct rm_queue *next;
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
/* Initialize all tracker pointers */
|
|
|
|
tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
|
|
|
|
next = pc->pc_rm_queue.rmq_next;
|
|
|
|
tracker->rmp_cpuQueue.rmq_next = next;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
|
|
|
/* rmq_prev is not used during froward traversal. */
|
2007-11-08 14:47:55 +00:00
|
|
|
next->rmq_prev = &tracker->rmp_cpuQueue;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
|
|
|
/* Update pointer to first element. */
|
2009-12-25 21:14:05 +00:00
|
|
|
pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
/*
|
|
|
|
* Return a count of the number of trackers the thread 'td' already
|
|
|
|
* has on this CPU for the lock 'rm'.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
|
|
|
|
const struct thread *td)
|
|
|
|
{
|
|
|
|
struct rm_queue *queue;
|
|
|
|
struct rm_priotracker *tracker;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
|
|
|
|
queue = queue->rmq_next) {
|
|
|
|
tracker = (struct rm_priotracker *)queue;
|
|
|
|
if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
static void inline
|
|
|
|
rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
|
|
|
|
{
|
2007-11-08 14:47:55 +00:00
|
|
|
struct rm_queue *next, *prev;
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
next = tracker->rmp_cpuQueue.rmq_next;
|
|
|
|
prev = tracker->rmp_cpuQueue.rmq_prev;
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
/* Not used during forward traversal. */
|
|
|
|
next->rmq_prev = prev;
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
/* Remove from list. */
|
|
|
|
prev->rmq_next = next;
|
|
|
|
}
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
static void
|
|
|
|
rm_cleanIPI(void *arg)
|
|
|
|
{
|
2007-11-08 14:47:55 +00:00
|
|
|
struct pcpu *pc;
|
2009-04-26 21:16:03 +00:00
|
|
|
struct rmlock *rm = arg;
|
2007-11-08 14:47:55 +00:00
|
|
|
struct rm_priotracker *tracker;
|
2009-04-26 21:16:03 +00:00
|
|
|
struct rm_queue *queue;
|
2017-02-19 02:03:09 +00:00
|
|
|
pc = get_pcpu();
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
|
2007-11-08 14:47:55 +00:00
|
|
|
queue = queue->rmq_next) {
|
2009-04-26 21:16:03 +00:00
|
|
|
tracker = (struct rm_priotracker *)queue;
|
|
|
|
if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
|
2007-11-08 14:47:55 +00:00
|
|
|
tracker->rmp_flags = RMPF_ONQUEUE;
|
|
|
|
mtx_lock_spin(&rm_spinlock);
|
2009-04-26 21:16:03 +00:00
|
|
|
LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
|
2007-11-08 14:47:55 +00:00
|
|
|
rmp_qentry);
|
|
|
|
mtx_unlock_spin(&rm_spinlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-05-29 10:52:37 +00:00
|
|
|
rm_init_flags(struct rmlock *rm, const char *name, int opts)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
2013-06-25 18:44:15 +00:00
|
|
|
struct lock_class *lc;
|
2014-12-13 21:00:10 +00:00
|
|
|
int liflags, xflags;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2009-05-29 10:52:37 +00:00
|
|
|
liflags = 0;
|
|
|
|
if (!(opts & RM_NOWITNESS))
|
|
|
|
liflags |= LO_WITNESS;
|
|
|
|
if (opts & RM_RECURSE)
|
|
|
|
liflags |= LO_RECURSABLE;
|
2014-12-13 21:00:10 +00:00
|
|
|
if (opts & RM_NEW)
|
|
|
|
liflags |= LO_NEW;
|
2021-04-08 17:41:19 +00:00
|
|
|
if (opts & RM_DUPOK)
|
|
|
|
liflags |= LO_DUPOK;
|
2010-09-01 19:50:03 +00:00
|
|
|
rm->rm_writecpus = all_cpus;
|
2007-11-08 14:47:55 +00:00
|
|
|
LIST_INIT(&rm->rm_activeReaders);
|
2010-09-01 19:50:03 +00:00
|
|
|
if (opts & RM_SLEEPABLE) {
|
2013-06-25 18:44:15 +00:00
|
|
|
liflags |= LO_SLEEPABLE;
|
|
|
|
lc = &lock_class_rm_sleepable;
|
2014-12-13 21:00:10 +00:00
|
|
|
xflags = (opts & RM_NEW ? SX_NEW : 0);
|
|
|
|
sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
|
|
|
|
xflags | SX_NOWITNESS);
|
2013-06-25 18:44:15 +00:00
|
|
|
} else {
|
|
|
|
lc = &lock_class_rm;
|
2014-12-13 21:00:10 +00:00
|
|
|
xflags = (opts & RM_NEW ? MTX_NEW : 0);
|
|
|
|
mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
|
|
|
|
xflags | MTX_NOWITNESS);
|
2013-06-25 18:44:15 +00:00
|
|
|
}
|
|
|
|
lock_init(&rm->lock_object, lc, name, NULL, liflags);
|
2009-05-29 10:52:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rm_init(struct rmlock *rm, const char *name)
|
|
|
|
{
|
|
|
|
|
|
|
|
rm_init_flags(rm, name, 0);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rm_destroy(struct rmlock *rm)
|
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
rm_assert(rm, RA_UNLOCKED);
|
|
|
|
LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
|
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
2010-09-01 19:50:03 +00:00
|
|
|
sx_destroy(&rm->rm_lock_sx);
|
|
|
|
else
|
|
|
|
mtx_destroy(&rm->rm_lock_mtx);
|
2007-11-08 14:47:55 +00:00
|
|
|
lock_destroy(&rm->lock_object);
|
|
|
|
}
|
|
|
|
|
2007-11-10 15:06:30 +00:00
|
|
|
int
|
2011-11-16 21:51:17 +00:00
|
|
|
rm_wowned(const struct rmlock *rm)
|
2007-11-10 15:06:30 +00:00
|
|
|
{
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
2010-09-01 19:50:03 +00:00
|
|
|
return (sx_xlocked(&rm->rm_lock_sx));
|
|
|
|
else
|
|
|
|
return (mtx_owned(&rm->rm_lock_mtx));
|
2007-11-10 15:06:30 +00:00
|
|
|
}
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
void
|
|
|
|
rm_sysinit(void *arg)
|
|
|
|
{
|
2017-11-21 14:59:23 +00:00
|
|
|
struct rm_args *args;
|
2009-05-29 10:52:37 +00:00
|
|
|
|
2017-11-21 14:59:23 +00:00
|
|
|
args = arg;
|
|
|
|
rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2018-05-11 06:59:54 +00:00
|
|
|
static __noinline int
|
2010-09-01 19:50:03 +00:00
|
|
|
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
|
|
|
struct pcpu *pc;
|
|
|
|
|
|
|
|
critical_enter();
|
2017-02-19 02:03:09 +00:00
|
|
|
pc = get_pcpu();
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
/* Check if we just need to do a proper critical_exit. */
|
2011-06-13 13:28:31 +00:00
|
|
|
if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
|
2007-11-08 14:47:55 +00:00
|
|
|
critical_exit();
|
2010-09-01 19:50:03 +00:00
|
|
|
return (1);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2009-12-25 01:16:24 +00:00
|
|
|
/* Remove our tracker from the per-cpu list. */
|
2009-04-26 21:16:03 +00:00
|
|
|
rm_tracker_remove(pc, tracker);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2021-02-24 02:15:50 +00:00
|
|
|
/*
|
|
|
|
* Check to see if the IPI granted us the lock after all. The load of
|
|
|
|
* rmp_flags must happen after the tracker is removed from the list.
|
|
|
|
*/
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2009-04-26 21:16:03 +00:00
|
|
|
if (tracker->rmp_flags) {
|
|
|
|
/* Just add back tracker - we hold the lock. */
|
|
|
|
rm_tracker_add(pc, tracker);
|
2007-11-08 14:47:55 +00:00
|
|
|
critical_exit();
|
2010-09-01 19:50:03 +00:00
|
|
|
return (1);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-04-29 22:15:33 +00:00
|
|
|
* We allow readers to acquire a lock even if a writer is blocked if
|
2009-04-26 21:16:03 +00:00
|
|
|
* the lock is recursive and the reader already holds the lock.
|
2007-11-08 14:47:55 +00:00
|
|
|
*/
|
|
|
|
if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
|
|
|
|
/*
|
2009-12-25 01:16:24 +00:00
|
|
|
* Just grant the lock if this thread already has a tracker
|
|
|
|
* for this lock on the per-cpu queue.
|
2007-11-08 14:47:55 +00:00
|
|
|
*/
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm_trackers_present(pc, rm, curthread) != 0) {
|
|
|
|
mtx_lock_spin(&rm_spinlock);
|
|
|
|
LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
|
|
|
|
rmp_qentry);
|
|
|
|
tracker->rmp_flags = RMPF_ONQUEUE;
|
|
|
|
mtx_unlock_spin(&rm_spinlock);
|
|
|
|
rm_tracker_add(pc, tracker);
|
|
|
|
critical_exit();
|
|
|
|
return (1);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sched_unpin();
|
|
|
|
critical_exit();
|
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
if (trylock) {
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
|
2010-09-01 19:50:03 +00:00
|
|
|
if (!sx_try_xlock(&rm->rm_lock_sx))
|
|
|
|
return (0);
|
|
|
|
} else {
|
|
|
|
if (!mtx_trylock(&rm->rm_lock_mtx))
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
} else {
|
2015-09-15 22:16:21 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
|
|
|
|
THREAD_SLEEPING_OK();
|
2010-09-01 19:50:03 +00:00
|
|
|
sx_xlock(&rm->rm_lock_sx);
|
2015-09-15 22:16:21 +00:00
|
|
|
THREAD_NO_SLEEPING();
|
|
|
|
} else
|
2010-09-01 19:50:03 +00:00
|
|
|
mtx_lock(&rm->rm_lock_mtx);
|
|
|
|
}
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
critical_enter();
|
2017-02-19 02:03:09 +00:00
|
|
|
pc = get_pcpu();
|
2011-06-13 13:28:31 +00:00
|
|
|
CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
|
2009-04-26 21:16:03 +00:00
|
|
|
rm_tracker_add(pc, tracker);
|
2007-11-08 14:47:55 +00:00
|
|
|
sched_pin();
|
|
|
|
critical_exit();
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
2010-09-01 19:50:03 +00:00
|
|
|
sx_xunlock(&rm->rm_lock_sx);
|
|
|
|
else
|
|
|
|
mtx_unlock(&rm->rm_lock_mtx);
|
|
|
|
|
|
|
|
return (1);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
int
|
|
|
|
_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
|
|
|
struct thread *td = curthread;
|
|
|
|
struct pcpu *pc;
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return (1);
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
tracker->rmp_flags = 0;
|
|
|
|
tracker->rmp_thread = td;
|
|
|
|
tracker->rmp_rmlock = rm;
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
|
|
|
THREAD_NO_SLEEPING();
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
td->td_critnest++; /* critical_enter(); */
|
|
|
|
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
rm_tracker_add(pc, tracker);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-05-29 14:20:10 +00:00
|
|
|
sched_pin();
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
td->td_critnest--;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fast path to combine two common conditions into a single
|
|
|
|
* conditional jump.
|
2007-11-08 14:47:55 +00:00
|
|
|
*/
|
2018-05-11 06:59:54 +00:00
|
|
|
if (__predict_true(0 == (td->td_owepreempt |
|
|
|
|
CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
|
2010-09-01 19:50:03 +00:00
|
|
|
return (1);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
/* We do not have a read token and need to acquire one. */
|
2010-09-01 19:50:03 +00:00
|
|
|
return _rm_rlock_hard(rm, tracker, trylock);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2018-05-11 06:59:54 +00:00
|
|
|
static __noinline void
|
2009-04-26 21:16:03 +00:00
|
|
|
_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
if (td->td_owepreempt) {
|
|
|
|
td->td_critnest++;
|
|
|
|
critical_exit();
|
|
|
|
}
|
2009-04-26 21:16:03 +00:00
|
|
|
|
|
|
|
if (!tracker->rmp_flags)
|
2007-11-08 14:47:55 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
mtx_lock_spin(&rm_spinlock);
|
2009-04-26 21:16:03 +00:00
|
|
|
LIST_REMOVE(tracker, rmp_qentry);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
if (tracker->rmp_flags & RMPF_SIGNAL) {
|
|
|
|
struct rmlock *rm;
|
2009-04-26 21:16:03 +00:00
|
|
|
struct turnstile *ts;
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
rm = tracker->rmp_rmlock;
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
turnstile_chain_lock(&rm->lock_object);
|
|
|
|
mtx_unlock_spin(&rm_spinlock);
|
|
|
|
|
|
|
|
ts = turnstile_lookup(&rm->lock_object);
|
|
|
|
|
|
|
|
turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
|
2018-06-02 22:37:53 +00:00
|
|
|
turnstile_unpend(ts);
|
2007-11-08 14:47:55 +00:00
|
|
|
turnstile_chain_unlock(&rm->lock_object);
|
|
|
|
} else
|
|
|
|
mtx_unlock_spin(&rm_spinlock);
|
2009-04-26 21:16:03 +00:00
|
|
|
}
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
void
|
2009-04-26 21:16:03 +00:00
|
|
|
_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
|
|
|
struct pcpu *pc;
|
|
|
|
struct thread *td = tracker->rmp_thread;
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
td->td_critnest++; /* critical_enter(); */
|
|
|
|
pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
|
2009-04-26 21:16:03 +00:00
|
|
|
rm_tracker_remove(pc, tracker);
|
2007-11-08 14:47:55 +00:00
|
|
|
td->td_critnest--;
|
2009-05-29 14:20:10 +00:00
|
|
|
sched_unpin();
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
|
|
|
THREAD_SLEEPING_OK();
|
|
|
|
|
2018-05-11 06:59:54 +00:00
|
|
|
if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
|
2007-11-08 14:47:55 +00:00
|
|
|
return;
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
_rm_unlock_hard(td, tracker);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_rm_wlock(struct rmlock *rm)
|
|
|
|
{
|
|
|
|
struct rm_priotracker *prio;
|
|
|
|
struct turnstile *ts;
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
cpuset_t readcpus;
|
2007-11-08 14:47:55 +00:00
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
2010-09-01 19:50:03 +00:00
|
|
|
sx_xlock(&rm->rm_lock_sx);
|
|
|
|
else
|
|
|
|
mtx_lock(&rm->rm_lock_mtx);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
|
2007-11-08 14:47:55 +00:00
|
|
|
/* Get all read tokens back */
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
readcpus = all_cpus;
|
2019-12-13 09:32:16 +00:00
|
|
|
CPU_ANDNOT(&readcpus, &rm->rm_writecpus);
|
2010-09-01 19:50:03 +00:00
|
|
|
rm->rm_writecpus = all_cpus;
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
/*
|
2010-09-01 19:50:03 +00:00
|
|
|
* Assumes rm->rm_writecpus update is visible on other CPUs
|
2009-04-26 21:16:03 +00:00
|
|
|
* before rm_cleanIPI is called.
|
2007-11-08 14:47:55 +00:00
|
|
|
*/
|
|
|
|
#ifdef SMP
|
2010-09-01 19:50:03 +00:00
|
|
|
smp_rendezvous_cpus(readcpus,
|
2017-04-09 02:00:03 +00:00
|
|
|
smp_no_rendezvous_barrier,
|
2007-11-08 14:47:55 +00:00
|
|
|
rm_cleanIPI,
|
2017-04-09 02:00:03 +00:00
|
|
|
smp_no_rendezvous_barrier,
|
2009-04-26 21:16:03 +00:00
|
|
|
rm);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
rm_cleanIPI(rm);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mtx_lock_spin(&rm_spinlock);
|
2009-04-26 21:16:03 +00:00
|
|
|
while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
|
2007-11-08 14:47:55 +00:00
|
|
|
ts = turnstile_trywait(&rm->lock_object);
|
|
|
|
prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
|
|
|
|
mtx_unlock_spin(&rm_spinlock);
|
2009-04-26 21:16:03 +00:00
|
|
|
turnstile_wait(ts, prio->rmp_thread,
|
|
|
|
TS_EXCLUSIVE_QUEUE);
|
2007-11-08 14:47:55 +00:00
|
|
|
mtx_lock_spin(&rm_spinlock);
|
|
|
|
}
|
|
|
|
mtx_unlock_spin(&rm_spinlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_rm_wunlock(struct rmlock *rm)
|
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
|
2010-09-01 19:50:03 +00:00
|
|
|
sx_xunlock(&rm->rm_lock_sx);
|
|
|
|
else
|
|
|
|
mtx_unlock(&rm->rm_lock_mtx);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2015-07-26 10:53:32 +00:00
|
|
|
#if LOCK_DEBUG > 0
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
void
|
|
|
|
_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2012-12-22 09:37:34 +00:00
|
|
|
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
|
2012-09-12 22:10:53 +00:00
|
|
|
("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
|
|
|
|
curthread, rm->lock_object.lo_name, file, line));
|
2013-06-25 18:44:15 +00:00
|
|
|
KASSERT(!rm_destroyed(rm),
|
|
|
|
("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
|
|
|
|
_rm_assert(rm, RA_UNLOCKED, file, line);
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
|
2008-09-10 19:13:30 +00:00
|
|
|
file, line, NULL);
|
2007-11-08 14:47:55 +00:00
|
|
|
|
|
|
|
_rm_wlock(rm);
|
|
|
|
|
|
|
|
LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
|
2013-06-25 18:44:15 +00:00
|
|
|
WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_INC(curthread);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
void
|
|
|
|
_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
KASSERT(!rm_destroyed(rm),
|
|
|
|
("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
|
|
|
|
_rm_assert(rm, RA_WLOCKED, file, line);
|
|
|
|
WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
|
2007-11-08 14:47:55 +00:00
|
|
|
LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
|
|
|
|
_rm_wunlock(rm);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_DEC(curthread);
|
2009-04-26 21:16:03 +00:00
|
|
|
}
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
int
|
2007-11-08 14:47:55 +00:00
|
|
|
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
|
2010-09-01 19:50:03 +00:00
|
|
|
int trylock, const char *file, int line)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
|
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return (1);
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
|
|
|
|
critical_enter();
|
2017-02-19 02:03:09 +00:00
|
|
|
KASSERT(rm_trackers_present(get_pcpu(), rm,
|
2013-06-25 18:44:15 +00:00
|
|
|
curthread) == 0,
|
|
|
|
("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line));
|
|
|
|
critical_exit();
|
|
|
|
}
|
|
|
|
#endif
|
2012-12-22 09:37:34 +00:00
|
|
|
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
|
2012-09-12 22:10:53 +00:00
|
|
|
("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
|
|
|
|
curthread, rm->lock_object.lo_name, file, line));
|
2013-06-25 18:44:15 +00:00
|
|
|
KASSERT(!rm_destroyed(rm),
|
|
|
|
("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
|
|
|
|
if (!trylock) {
|
|
|
|
KASSERT(!rm_wowned(rm),
|
|
|
|
("rm_rlock: wlock already held for %s @ %s:%d",
|
|
|
|
rm->lock_object.lo_name, file, line));
|
2019-11-27 01:54:39 +00:00
|
|
|
WITNESS_CHECKORDER(&rm->lock_object,
|
|
|
|
LOP_NEWORDER | LOP_NOSLEEP, file, line, NULL);
|
2013-06-25 18:44:15 +00:00
|
|
|
}
|
2007-11-08 14:47:55 +00:00
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
if (_rm_rlock(rm, tracker, trylock)) {
|
2013-06-25 18:44:15 +00:00
|
|
|
if (trylock)
|
|
|
|
LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
|
|
|
|
line);
|
|
|
|
else
|
|
|
|
LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
|
|
|
|
line);
|
2019-11-27 01:54:39 +00:00
|
|
|
WITNESS_LOCK(&rm->lock_object, LOP_NOSLEEP, file, line);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_INC(curthread);
|
2010-09-01 19:50:03 +00:00
|
|
|
return (1);
|
2013-06-25 18:44:15 +00:00
|
|
|
} else if (trylock)
|
|
|
|
LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
|
2010-09-01 19:50:03 +00:00
|
|
|
|
|
|
|
return (0);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
void
|
2009-12-25 21:14:05 +00:00
|
|
|
_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
|
2009-04-26 21:16:03 +00:00
|
|
|
const char *file, int line)
|
|
|
|
{
|
|
|
|
|
panic: add a switch and infrastructure for stopping other CPUs in SMP case
Historical behavior of letting other CPUs merily go on is a default for
time being. The new behavior can be switched on via
kern.stop_scheduler_on_panic tunable and sysctl.
Stopping of the CPUs has (at least) the following benefits:
- more of the system state at panic time is preserved intact
- threads and interrupts do not interfere with dumping of the system
state
Only one thread runs uninterrupted after panic if stop_scheduler_on_panic
is set. That thread might call code that is also used in normal context
and that code might use locks to prevent concurrent execution of certain
parts. Those locks might be held by the stopped threads and would never
be released. To work around this issue, it was decided that instead of
explicit checks for panic context, we would rather put those checks
inside the locking primitives.
This change has substantial portions written and re-written by attilio
and kib at various times. Other changes are heavily based on the ideas
and patches submitted by jhb and mdf. bde has provided many insights
into the details and history of the current code.
The new behavior may cause problems for systems that use a USB keyboard
for interfacing with system console. This is because of some unusual
locking patterns in the ukbd code which have to be used because on one
hand ukbd is below syscons, but on the other hand it has to interface
with other usb code that uses regular mutexes/Giant for its concurrency
protection. Dumping to USB-connected disks may also be affected.
PR: amd64/139614 (at least)
In cooperation with: attilio, jhb, kib, mdf
Discussed with: arch@, bde
Tested by: Eugene Grosbein <eugen@grosbein.net>,
gnn,
Steven Hartland <killing@multiplay.co.uk>,
glebius,
Andrew Boyer <aboyer@averesystems.com>
(various versions of the patch)
MFC after: 3 months (or never)
2011-12-11 21:02:01 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
|
|
|
return;
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
KASSERT(!rm_destroyed(rm),
|
|
|
|
("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
|
|
|
|
_rm_assert(rm, RA_RLOCKED, file, line);
|
2009-04-26 21:16:03 +00:00
|
|
|
WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
|
2007-11-08 14:47:55 +00:00
|
|
|
LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
|
|
|
|
_rm_runlock(rm, tracker);
|
2015-08-02 00:03:08 +00:00
|
|
|
TD_LOCKS_DEC(curthread);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
/*
|
|
|
|
* Just strip out file and line arguments if no lock debugging is enabled in
|
|
|
|
* the kernel - we are called from a kernel module.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
_rm_wlock(rm);
|
|
|
|
}
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
void
|
|
|
|
_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
_rm_wunlock(rm);
|
2009-04-26 21:16:03 +00:00
|
|
|
}
|
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
int
|
2007-11-08 14:47:55 +00:00
|
|
|
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
|
2010-09-01 19:50:03 +00:00
|
|
|
int trylock, const char *file, int line)
|
2007-11-08 14:47:55 +00:00
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2010-09-01 19:50:03 +00:00
|
|
|
return _rm_rlock(rm, tracker, trylock);
|
2007-11-08 14:47:55 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:16:03 +00:00
|
|
|
void
|
2009-12-25 21:14:05 +00:00
|
|
|
_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
|
2009-05-29 14:25:51 +00:00
|
|
|
const char *file, int line)
|
|
|
|
{
|
2009-04-26 21:16:03 +00:00
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
_rm_runlock(rm, tracker);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2013-06-25 18:44:15 +00:00
|
|
|
|
|
|
|
#ifdef INVARIANT_SUPPORT
|
2013-07-08 21:17:20 +00:00
|
|
|
#ifndef INVARIANTS
|
|
|
|
#undef _rm_assert
|
|
|
|
#endif
|
|
|
|
|
2013-06-25 18:44:15 +00:00
|
|
|
/*
|
|
|
|
* Note that this does not need to use witness_assert() for read lock
|
|
|
|
* assertions since an exact count of read locks held by this thread
|
|
|
|
* is computable.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_rm_assert(const struct rmlock *rm, int what, const char *file, int line)
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
|
2018-11-13 20:48:05 +00:00
|
|
|
if (SCHEDULER_STOPPED())
|
2013-06-25 18:44:15 +00:00
|
|
|
return;
|
|
|
|
switch (what) {
|
|
|
|
case RA_LOCKED:
|
|
|
|
case RA_LOCKED | RA_RECURSED:
|
|
|
|
case RA_LOCKED | RA_NOTRECURSED:
|
|
|
|
case RA_RLOCKED:
|
|
|
|
case RA_RLOCKED | RA_RECURSED:
|
|
|
|
case RA_RLOCKED | RA_NOTRECURSED:
|
|
|
|
/*
|
|
|
|
* Handle the write-locked case. Unlike other
|
|
|
|
* primitives, writers can never recurse.
|
|
|
|
*/
|
|
|
|
if (rm_wowned(rm)) {
|
|
|
|
if (what & RA_RLOCKED)
|
|
|
|
panic("Lock %s exclusively locked @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
if (what & RA_RECURSED)
|
|
|
|
panic("Lock %s not recursed @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
critical_enter();
|
2017-02-19 02:03:09 +00:00
|
|
|
count = rm_trackers_present(get_pcpu(), rm, curthread);
|
2013-06-25 18:44:15 +00:00
|
|
|
critical_exit();
|
|
|
|
|
|
|
|
if (count == 0)
|
|
|
|
panic("Lock %s not %slocked @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, (what & RA_RLOCKED) ?
|
|
|
|
"read " : "", file, line);
|
|
|
|
if (count > 1) {
|
|
|
|
if (what & RA_NOTRECURSED)
|
|
|
|
panic("Lock %s recursed @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
} else if (what & RA_RECURSED)
|
|
|
|
panic("Lock %s not recursed @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
case RA_WLOCKED:
|
|
|
|
if (!rm_wowned(rm))
|
|
|
|
panic("Lock %s not exclusively locked @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
case RA_UNLOCKED:
|
|
|
|
if (rm_wowned(rm))
|
|
|
|
panic("Lock %s exclusively locked @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
|
|
|
|
critical_enter();
|
2017-02-19 02:03:09 +00:00
|
|
|
count = rm_trackers_present(get_pcpu(), rm, curthread);
|
2013-06-25 18:44:15 +00:00
|
|
|
critical_exit();
|
|
|
|
|
|
|
|
if (count != 0)
|
|
|
|
panic("Lock %s read locked @ %s:%d\n",
|
|
|
|
rm->lock_object.lo_name, file, line);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
|
|
|
|
line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* INVARIANT_SUPPORT */
|
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
static void
|
|
|
|
print_tracker(struct rm_priotracker *tr)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
|
|
|
td = tr->rmp_thread;
|
|
|
|
db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
|
|
|
|
td->td_proc->p_pid, td->td_name);
|
|
|
|
if (tr->rmp_flags & RMPF_ONQUEUE) {
|
|
|
|
db_printf("ONQUEUE");
|
|
|
|
if (tr->rmp_flags & RMPF_SIGNAL)
|
|
|
|
db_printf(",SIGNAL");
|
|
|
|
} else
|
|
|
|
db_printf("0");
|
|
|
|
db_printf("}\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
db_show_rm(const struct lock_object *lock)
|
|
|
|
{
|
|
|
|
struct rm_priotracker *tr;
|
|
|
|
struct rm_queue *queue;
|
|
|
|
const struct rmlock *rm;
|
|
|
|
struct lock_class *lc;
|
|
|
|
struct pcpu *pc;
|
|
|
|
|
|
|
|
rm = (const struct rmlock *)lock;
|
|
|
|
db_printf(" writecpus: ");
|
|
|
|
ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
|
|
|
|
db_printf("\n");
|
|
|
|
db_printf(" per-CPU readers:\n");
|
|
|
|
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
|
|
|
|
for (queue = pc->pc_rm_queue.rmq_next;
|
|
|
|
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
|
|
|
|
tr = (struct rm_priotracker *)queue;
|
|
|
|
if (tr->rmp_rmlock == rm)
|
|
|
|
print_tracker(tr);
|
|
|
|
}
|
|
|
|
db_printf(" active readers:\n");
|
|
|
|
LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
|
|
|
|
print_tracker(tr);
|
|
|
|
lc = LOCK_CLASS(&rm->rm_wlock_object);
|
|
|
|
db_printf("Backing write-lock (%s):\n", lc->lc_name);
|
|
|
|
lc->lc_ddb_show(&rm->rm_wlock_object);
|
|
|
|
}
|
|
|
|
#endif
|
2019-12-27 11:19:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-mostly sleepable locks.
|
|
|
|
*
|
|
|
|
* These primitives allow both readers and writers to sleep. However, neither
|
|
|
|
* readers nor writers are tracked and subsequently there is no priority
|
|
|
|
* propagation.
|
|
|
|
*
|
|
|
|
* They are intended to be only used when write-locking is almost never needed
|
|
|
|
* (e.g., they can guard against unloading a kernel module) while read-locking
|
|
|
|
* happens all the time.
|
|
|
|
*
|
|
|
|
* Concurrent writers take turns taking the lock while going off cpu. If this is
|
|
|
|
* of concern for your usecase, this is not the right primitive.
|
|
|
|
*
|
2021-02-26 23:54:17 +00:00
|
|
|
* Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt
|
|
|
|
* fences are inserted to ensure ordering with the code executed in the IPI
|
|
|
|
* handler.
|
2020-08-04 19:52:16 +00:00
|
|
|
*
|
|
|
|
* No attempt is made to track which CPUs read locked at least once,
|
|
|
|
* consequently write locking sends IPIs to all of them. This will become a
|
2020-08-04 20:31:03 +00:00
|
|
|
* problem at some point. The easiest way to lessen it is to provide a bitmap.
|
2019-12-27 11:19:57 +00:00
|
|
|
*/
|
|
|
|
|
2020-11-04 21:18:08 +00:00
|
|
|
#define RMS_NOOWNER ((void *)0x1)
|
|
|
|
#define RMS_TRANSIENT ((void *)0x2)
|
|
|
|
#define RMS_FLAGMASK 0xf
|
|
|
|
|
2020-11-07 16:57:53 +00:00
|
|
|
struct rmslock_pcpu {
|
|
|
|
int influx;
|
|
|
|
int readers;
|
|
|
|
};
|
|
|
|
|
|
|
|
_Static_assert(sizeof(struct rmslock_pcpu) == 8, "bad size");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal routines
|
|
|
|
*/
|
|
|
|
static struct rmslock_pcpu *
|
|
|
|
rms_int_pcpu(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
|
|
|
return (zpcpu_get(rms->pcpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rmslock_pcpu *
|
|
|
|
rms_int_remote_pcpu(struct rmslock *rms, int cpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (zpcpu_get_cpu(rms->pcpu, cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
|
|
|
MPASS(pcpu->influx == 0);
|
|
|
|
pcpu->influx = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
|
|
|
MPASS(pcpu->influx == 1);
|
|
|
|
pcpu->influx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
static void
|
|
|
|
rms_int_debug_readers_inc(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
int old;
|
|
|
|
old = atomic_fetchadd_int(&rms->debug_readers, 1);
|
|
|
|
KASSERT(old >= 0, ("%s: bad readers count %d\n", __func__, old));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_int_debug_readers_dec(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
int old;
|
|
|
|
|
|
|
|
old = atomic_fetchadd_int(&rms->debug_readers, -1);
|
|
|
|
KASSERT(old > 0, ("%s: bad readers count %d\n", __func__, old));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void
|
|
|
|
rms_int_debug_readers_inc(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_int_debug_readers_dec(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
|
|
|
rms_int_debug_readers_inc(rms);
|
|
|
|
pcpu->readers++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
|
|
|
rms_int_debug_readers_dec(rms);
|
|
|
|
pcpu->readers--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Public API
|
|
|
|
*/
|
2019-12-27 11:19:57 +00:00
|
|
|
void
|
|
|
|
rms_init(struct rmslock *rms, const char *name)
|
|
|
|
{
|
|
|
|
|
2020-11-04 21:18:08 +00:00
|
|
|
rms->owner = RMS_NOOWNER;
|
2019-12-27 11:19:57 +00:00
|
|
|
rms->writers = 0;
|
|
|
|
rms->readers = 0;
|
2020-11-07 16:57:53 +00:00
|
|
|
rms->debug_readers = 0;
|
2019-12-27 11:19:57 +00:00
|
|
|
mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW);
|
2020-11-07 16:57:53 +00:00
|
|
|
rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO);
|
2019-12-27 11:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rms_destroy(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(rms->writers == 0);
|
|
|
|
MPASS(rms->readers == 0);
|
|
|
|
mtx_destroy(&rms->mtx);
|
2020-11-07 16:57:53 +00:00
|
|
|
uma_zfree_pcpu(pcpu_zone_8, rms->pcpu);
|
2019-12-27 11:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __noinline
|
|
|
|
rms_rlock_fallback(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_influx_exit(rms, rms_int_pcpu(rms));
|
2019-12-27 11:19:57 +00:00
|
|
|
critical_exit();
|
|
|
|
|
|
|
|
mtx_lock(&rms->mtx);
|
|
|
|
while (rms->writers > 0)
|
|
|
|
msleep(&rms->readers, &rms->mtx, PUSER - 1, mtx_name(&rms->mtx), 0);
|
2020-02-07 22:44:41 +00:00
|
|
|
critical_enter();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_readers_inc(rms, rms_int_pcpu(rms));
|
2019-12-27 11:19:57 +00:00
|
|
|
mtx_unlock(&rms->mtx);
|
2020-02-07 22:44:41 +00:00
|
|
|
critical_exit();
|
2019-12-27 11:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rms_rlock(struct rmslock *rms)
|
|
|
|
{
|
2020-11-07 16:57:53 +00:00
|
|
|
struct rmslock_pcpu *pcpu;
|
2019-12-27 11:19:57 +00:00
|
|
|
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
|
2020-11-04 21:18:08 +00:00
|
|
|
MPASS(atomic_load_ptr(&rms->owner) != curthread);
|
2019-12-27 11:19:57 +00:00
|
|
|
|
|
|
|
critical_enter();
|
2020-11-07 16:57:53 +00:00
|
|
|
pcpu = rms_int_pcpu(rms);
|
|
|
|
rms_int_influx_enter(rms, pcpu);
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2019-12-27 11:19:57 +00:00
|
|
|
if (__predict_false(rms->writers > 0)) {
|
|
|
|
rms_rlock_fallback(rms);
|
|
|
|
return;
|
|
|
|
}
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_readers_inc(rms, pcpu);
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_influx_exit(rms, pcpu);
|
2019-12-27 11:19:57 +00:00
|
|
|
critical_exit();
|
|
|
|
}
|
|
|
|
|
2020-01-31 08:36:49 +00:00
|
|
|
int
|
|
|
|
rms_try_rlock(struct rmslock *rms)
|
|
|
|
{
|
2020-11-07 16:57:53 +00:00
|
|
|
struct rmslock_pcpu *pcpu;
|
2020-01-31 08:36:49 +00:00
|
|
|
|
2020-11-04 21:18:08 +00:00
|
|
|
MPASS(atomic_load_ptr(&rms->owner) != curthread);
|
|
|
|
|
2020-01-31 08:36:49 +00:00
|
|
|
critical_enter();
|
2020-11-07 16:57:53 +00:00
|
|
|
pcpu = rms_int_pcpu(rms);
|
|
|
|
rms_int_influx_enter(rms, pcpu);
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-01-31 08:36:49 +00:00
|
|
|
if (__predict_false(rms->writers > 0)) {
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_influx_exit(rms, pcpu);
|
2020-01-31 08:36:49 +00:00
|
|
|
critical_exit();
|
|
|
|
return (0);
|
|
|
|
}
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_readers_inc(rms, pcpu);
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_influx_exit(rms, pcpu);
|
2020-01-31 08:36:49 +00:00
|
|
|
critical_exit();
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2019-12-27 11:19:57 +00:00
|
|
|
static void __noinline
|
|
|
|
rms_runlock_fallback(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_influx_exit(rms, rms_int_pcpu(rms));
|
2019-12-27 11:19:57 +00:00
|
|
|
critical_exit();
|
|
|
|
|
|
|
|
mtx_lock(&rms->mtx);
|
|
|
|
MPASS(rms->writers > 0);
|
|
|
|
MPASS(rms->readers > 0);
|
2020-11-07 16:57:53 +00:00
|
|
|
MPASS(rms->debug_readers == rms->readers);
|
|
|
|
rms_int_debug_readers_dec(rms);
|
2019-12-27 11:19:57 +00:00
|
|
|
rms->readers--;
|
|
|
|
if (rms->readers == 0)
|
|
|
|
wakeup_one(&rms->writers);
|
|
|
|
mtx_unlock(&rms->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rms_runlock(struct rmslock *rms)
|
|
|
|
{
|
2020-11-07 16:57:53 +00:00
|
|
|
struct rmslock_pcpu *pcpu;
|
2019-12-27 11:19:57 +00:00
|
|
|
|
|
|
|
critical_enter();
|
2020-11-07 16:57:53 +00:00
|
|
|
pcpu = rms_int_pcpu(rms);
|
|
|
|
rms_int_influx_enter(rms, pcpu);
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2019-12-27 11:19:57 +00:00
|
|
|
if (__predict_false(rms->writers > 0)) {
|
|
|
|
rms_runlock_fallback(rms);
|
|
|
|
return;
|
|
|
|
}
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_readers_dec(rms, pcpu);
|
2021-02-26 23:54:17 +00:00
|
|
|
atomic_interrupt_fence();
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_int_influx_exit(rms, pcpu);
|
2019-12-27 11:19:57 +00:00
|
|
|
critical_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rmslock_ipi {
|
|
|
|
struct rmslock *rms;
|
2020-02-12 11:17:18 +00:00
|
|
|
struct smp_rendezvous_cpus_retry_arg srcra;
|
2019-12-27 11:19:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
2020-02-12 11:17:18 +00:00
|
|
|
rms_action_func(void *arg)
|
2019-12-27 11:19:57 +00:00
|
|
|
{
|
|
|
|
struct rmslock_ipi *rmsipi;
|
2020-11-07 16:57:53 +00:00
|
|
|
struct rmslock_pcpu *pcpu;
|
2019-12-27 11:19:57 +00:00
|
|
|
struct rmslock *rms;
|
|
|
|
|
2020-02-12 11:17:18 +00:00
|
|
|
rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
|
2019-12-27 11:19:57 +00:00
|
|
|
rms = rmsipi->rms;
|
2020-11-07 16:57:53 +00:00
|
|
|
pcpu = rms_int_pcpu(rms);
|
2019-12-27 11:19:57 +00:00
|
|
|
|
2020-11-07 16:57:53 +00:00
|
|
|
if (pcpu->influx)
|
2019-12-27 11:19:57 +00:00
|
|
|
return;
|
2020-11-07 16:57:53 +00:00
|
|
|
if (pcpu->readers != 0) {
|
|
|
|
atomic_add_int(&rms->readers, pcpu->readers);
|
|
|
|
pcpu->readers = 0;
|
|
|
|
}
|
2020-02-12 11:17:18 +00:00
|
|
|
smp_rendezvous_cpus_done(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rms_wait_func(void *arg, int cpu)
|
|
|
|
{
|
|
|
|
struct rmslock_ipi *rmsipi;
|
2020-11-07 16:57:53 +00:00
|
|
|
struct rmslock_pcpu *pcpu;
|
2020-02-12 11:17:18 +00:00
|
|
|
struct rmslock *rms;
|
|
|
|
|
|
|
|
rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
|
|
|
|
rms = rmsipi->rms;
|
2020-11-07 16:57:53 +00:00
|
|
|
pcpu = rms_int_remote_pcpu(rms, cpu);
|
2020-02-12 11:17:18 +00:00
|
|
|
|
2020-11-07 16:57:53 +00:00
|
|
|
while (atomic_load_int(&pcpu->influx))
|
2020-02-12 11:17:18 +00:00
|
|
|
cpu_spinwait();
|
2019-12-27 11:19:57 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 16:57:53 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
static void
|
|
|
|
rms_assert_no_pcpu_readers(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
struct rmslock_pcpu *pcpu;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
pcpu = rms_int_remote_pcpu(rms, cpu);
|
|
|
|
if (pcpu->readers != 0) {
|
|
|
|
panic("%s: got %d readers on cpu %d\n", __func__,
|
|
|
|
pcpu->readers, cpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void
|
|
|
|
rms_assert_no_pcpu_readers(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-12-27 11:19:57 +00:00
|
|
|
static void
|
|
|
|
rms_wlock_switch(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
struct rmslock_ipi rmsipi;
|
|
|
|
|
|
|
|
MPASS(rms->readers == 0);
|
|
|
|
MPASS(rms->writers == 1);
|
|
|
|
|
|
|
|
rmsipi.rms = rms;
|
|
|
|
|
2020-02-12 11:17:18 +00:00
|
|
|
smp_rendezvous_cpus_retry(all_cpus,
|
|
|
|
smp_no_rendezvous_barrier,
|
|
|
|
rms_action_func,
|
|
|
|
smp_no_rendezvous_barrier,
|
|
|
|
rms_wait_func,
|
|
|
|
&rmsipi.srcra);
|
2019-12-27 11:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rms_wlock(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
|
2020-11-04 21:18:08 +00:00
|
|
|
MPASS(atomic_load_ptr(&rms->owner) != curthread);
|
2019-12-27 11:19:57 +00:00
|
|
|
|
|
|
|
mtx_lock(&rms->mtx);
|
|
|
|
rms->writers++;
|
|
|
|
if (rms->writers > 1) {
|
2020-11-04 21:18:08 +00:00
|
|
|
msleep(&rms->owner, &rms->mtx, (PUSER - 1),
|
2019-12-27 13:37:19 +00:00
|
|
|
mtx_name(&rms->mtx), 0);
|
2019-12-27 11:19:57 +00:00
|
|
|
MPASS(rms->readers == 0);
|
2020-11-04 21:18:08 +00:00
|
|
|
KASSERT(rms->owner == RMS_TRANSIENT,
|
|
|
|
("%s: unexpected owner value %p\n", __func__,
|
|
|
|
rms->owner));
|
|
|
|
goto out_grab;
|
2019-12-27 11:19:57 +00:00
|
|
|
}
|
|
|
|
|
2020-11-04 21:18:08 +00:00
|
|
|
KASSERT(rms->owner == RMS_NOOWNER,
|
|
|
|
("%s: unexpected owner value %p\n", __func__, rms->owner));
|
|
|
|
|
2019-12-27 11:19:57 +00:00
|
|
|
rms_wlock_switch(rms);
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_assert_no_pcpu_readers(rms);
|
2019-12-27 11:19:57 +00:00
|
|
|
|
2020-11-04 21:18:08 +00:00
|
|
|
if (rms->readers > 0) {
|
|
|
|
msleep(&rms->writers, &rms->mtx, (PUSER - 1),
|
2019-12-27 13:37:19 +00:00
|
|
|
mtx_name(&rms->mtx), 0);
|
2020-11-04 21:18:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out_grab:
|
|
|
|
rms->owner = curthread;
|
2020-11-07 16:57:53 +00:00
|
|
|
rms_assert_no_pcpu_readers(rms);
|
2020-11-04 21:18:08 +00:00
|
|
|
mtx_unlock(&rms->mtx);
|
2019-12-27 11:19:57 +00:00
|
|
|
MPASS(rms->readers == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rms_wunlock(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_lock(&rms->mtx);
|
2020-11-04 21:18:08 +00:00
|
|
|
KASSERT(rms->owner == curthread,
|
|
|
|
("%s: unexpected owner value %p\n", __func__, rms->owner));
|
2019-12-27 11:19:57 +00:00
|
|
|
MPASS(rms->writers >= 1);
|
|
|
|
MPASS(rms->readers == 0);
|
|
|
|
rms->writers--;
|
2020-11-04 21:18:08 +00:00
|
|
|
if (rms->writers > 0) {
|
|
|
|
wakeup_one(&rms->owner);
|
|
|
|
rms->owner = RMS_TRANSIENT;
|
|
|
|
} else {
|
2019-12-27 11:19:57 +00:00
|
|
|
wakeup(&rms->readers);
|
2020-11-04 21:18:08 +00:00
|
|
|
rms->owner = RMS_NOOWNER;
|
|
|
|
}
|
2019-12-27 11:19:57 +00:00
|
|
|
mtx_unlock(&rms->mtx);
|
|
|
|
}
|
2020-11-04 21:18:08 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
rms_unlock(struct rmslock *rms)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (rms_wowned(rms))
|
|
|
|
rms_wunlock(rms);
|
|
|
|
else
|
|
|
|
rms_runlock(rms);
|
|
|
|
}
|