callout(9): Allow spin locks use with callout_init_mtx().

Implement lock_spin()/unlock_spin() lock class methods, moving the
assertion to _sleep() instead.  Change assertions in callout(9) to
allow spin locks for both regular and C_DIRECT_EXEC cases. In case of
C_DIRECT_EXEC callouts spin locks are the only locks allowed actually.

As the first use case allow taskqueue_enqueue_timeout() use on fast
task queues.  It actually becomes more efficient due to avoided extra
context switches in callout(9) thanks to C_DIRECT_EXEC.

MFC after:	2 weeks
Reviewed by:	hselasky
Differential Revision:	https://reviews.freebsd.org/D31778
This commit is contained in:
Alexander Motin 2021-09-02 21:16:46 -04:00
parent 7af4475a6e
commit 4730a8972b
6 changed files with 17 additions and 16 deletions

View File

@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd December 13, 2019
.Dd September 1, 2021
.Dt CALLOUT 9
.Os
.Sh NAME
@ -241,9 +241,6 @@ and the associated lock is released.
This ensures that stopping or rescheduling the callout will abort any
previously scheduled invocation.
.Pp
Only regular mutexes may be used with
.Fn callout_init_mtx ;
spin mutexes are not supported.
A sleepable read-mostly lock
.Po
one initialized with the

View File

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 6, 2020
.Dd September 1, 2021
.Dt TASKQUEUE 9
.Os
.Sh NAME
@ -237,9 +237,6 @@ and
.Va flags ,
as detailed in
.Xr callout 9 .
Only non-fast task queues can be used for
.Va timeout_task
scheduling.
If the
.Va ticks
argument is negative, the already scheduled enqueueing is not re-scheduled.

View File

@ -215,7 +215,7 @@ void
lock_spin(struct lock_object *lock, uintptr_t how)
{
panic("spin locks can only use msleep_spin");
mtx_lock_spin((struct mtx *)lock);
}
uintptr_t
@ -232,8 +232,12 @@ unlock_mtx(struct lock_object *lock)
uintptr_t
unlock_spin(struct lock_object *lock)
{
struct mtx *m;
panic("spin locks can only use msleep_spin");
m = (struct mtx *)lock;
mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
mtx_unlock_spin(m);
return (0);
}
#ifdef KDTRACE_HOOKS

View File

@ -188,6 +188,8 @@ _sleep(const void *ident, struct lock_object *lock, int priority,
DROP_GIANT();
if (lock != NULL && lock != &Giant.lock_object &&
!(class->lc_flags & LC_SLEEPABLE)) {
KASSERT(!(class->lc_flags & LC_SPINLOCK),
("spin locks can only use msleep_spin"));
WITNESS_SAVE(lock, lock_witness);
lock_state = class->lc_unlock(lock);
} else

View File

@ -919,8 +919,9 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
} else {
direct = 0;
}
KASSERT(!direct || c->c_lock == NULL,
("%s: direct callout %p has lock", __func__, c));
KASSERT(!direct || c->c_lock == NULL ||
(LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK),
("%s: direct callout %p has non-spin lock", __func__, c));
cc = callout_lock(c);
/*
* Don't allow migration if the user does not care.
@ -1332,9 +1333,8 @@ _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
("callout_init_lock: bad flags %d", flags));
KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
(LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
__func__));
KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE),
("%s: callout %p has sleepable lock", __func__, c));
c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
c->c_cpu = cc_default_cpu;
}

View File

@ -309,7 +309,6 @@ taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
TQ_LOCK(queue);
KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
("Migrated queue"));
KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
timeout_task->q = queue;
res = timeout_task->t.ta_pending;
if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
@ -329,6 +328,8 @@ taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
sbt = -sbt; /* Ignore overflow. */
}
if (sbt > 0) {
if (queue->tq_spin)
flags |= C_DIRECT_EXEC;
callout_reset_sbt(&timeout_task->c, sbt, pr,
taskqueue_timeout_func, timeout_task, flags);
}