Major callout subsystem cleanup and rewrite:
- Close a migration race where callout_reset() failed to set the CALLOUT_ACTIVE flag. - Callout callback functions are now allowed to be protected by spinlocks. - Switching the callout CPU number cannot always be done on a per-callout basis. See the updated timeout(9) manual page for more information. - The timeout(9) manual page has been updated to reflect how all the functions inside the callout API are working. The manual page has been made function oriented to make it easier to deduce how each of the functions making up the callout API are working without having to first read the whole manual page. Group all functions into a handful of sections which should give a quick top-level overview when the different functions should be used. - The CALLOUT_SHAREDLOCK flag and its functionality has been removed to reduce the complexity in the callout code and to avoid problems about atomically stopping callouts via callout_stop(). If someone needs it, it can be re-added. From my quick grep there are no CALLOUT_SHAREDLOCK clients in the kernel. - A new callout API function named "callout_drain_async()" has been added. See the updated timeout(9) manual page for a complete description. - Update the callout clients in the "kern/" folder to use the callout API properly, like cv_timedwait(). Previously there was some custom sleepqueue code in the callout subsystem, which has been removed, because we now allow callouts to be protected by spinlocks. This allows us to tear down the callout like done with regular mutexes, and a "td_slpmutex" has been added to "struct thread" to atomically teardown the "td_slpcallout". Further the "TDF_TIMOFAIL" and "SWT_SLEEPQTIMO" states can now be completely removed. Currently they are marked as available and will be cleaned up in a follow up commit. - Bump the __FreeBSD_version to indicate kernel modules need recompilation. - There has been several reports that this patch "seems to squash a serious bug leading to a callout timeout and panic". Kernel build testing: all architectures were built MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D1438 Sponsored by: Mellanox Technologies Reviewed by: jhb, adrian, sbruno and emaste
This commit is contained in:
parent
d7275b3f14
commit
1a26c3c047
@ -1570,6 +1570,7 @@ MLINKS+=timeout.9 callout.9 \
|
|||||||
timeout.9 callout_active.9 \
|
timeout.9 callout_active.9 \
|
||||||
timeout.9 callout_deactivate.9 \
|
timeout.9 callout_deactivate.9 \
|
||||||
timeout.9 callout_drain.9 \
|
timeout.9 callout_drain.9 \
|
||||||
|
timeout.9 callout_drain_async.9 \
|
||||||
timeout.9 callout_handle_init.9 \
|
timeout.9 callout_handle_init.9 \
|
||||||
timeout.9 callout_init.9 \
|
timeout.9 callout_init.9 \
|
||||||
timeout.9 callout_init_mtx.9 \
|
timeout.9 callout_init_mtx.9 \
|
||||||
|
@ -29,13 +29,14 @@
|
|||||||
.\"
|
.\"
|
||||||
.\" $FreeBSD$
|
.\" $FreeBSD$
|
||||||
.\"
|
.\"
|
||||||
.Dd October 8, 2014
|
.Dd January 14, 2015
|
||||||
.Dt TIMEOUT 9
|
.Dt TIMEOUT 9
|
||||||
.Os
|
.Os
|
||||||
.Sh NAME
|
.Sh NAME
|
||||||
.Nm callout_active ,
|
.Nm callout_active ,
|
||||||
.Nm callout_deactivate ,
|
.Nm callout_deactivate ,
|
||||||
.Nm callout_drain ,
|
.Nm callout_drain ,
|
||||||
|
.Nm callout_drain_async ,
|
||||||
.Nm callout_handle_init ,
|
.Nm callout_handle_init ,
|
||||||
.Nm callout_init ,
|
.Nm callout_init ,
|
||||||
.Nm callout_init_mtx ,
|
.Nm callout_init_mtx ,
|
||||||
@ -63,279 +64,232 @@
|
|||||||
.In sys/systm.h
|
.In sys/systm.h
|
||||||
.Bd -literal
|
.Bd -literal
|
||||||
typedef void timeout_t (void *);
|
typedef void timeout_t (void *);
|
||||||
|
typedef void callout_func_t (void *);
|
||||||
.Ed
|
.Ed
|
||||||
.Ft int
|
|
||||||
.Fn callout_active "struct callout *c"
|
|
||||||
.Ft void
|
|
||||||
.Fn callout_deactivate "struct callout *c"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_drain "struct callout *c"
|
|
||||||
.Ft void
|
|
||||||
.Fn callout_handle_init "struct callout_handle *handle"
|
|
||||||
.Bd -literal
|
|
||||||
struct callout_handle handle = CALLOUT_HANDLE_INITIALIZER(&handle);
|
|
||||||
.Ed
|
|
||||||
.Ft void
|
|
||||||
.Fn callout_init "struct callout *c" "int mpsafe"
|
|
||||||
.Ft void
|
|
||||||
.Fn callout_init_mtx "struct callout *c" "struct mtx *mtx" "int flags"
|
|
||||||
.Ft void
|
|
||||||
.Fn callout_init_rm "struct callout *c" "struct rmlock *rm" "int flags"
|
|
||||||
.Ft void
|
|
||||||
.Fn callout_init_rw "struct callout *c" "struct rwlock *rw" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_pending "struct callout *c"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_reset "struct callout *c" "int ticks" "timeout_t *func" "void *arg"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_reset_curcpu "struct callout *c" "int ticks" "timeout_t *func" \
|
|
||||||
"void *arg"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_reset_on "struct callout *c" "int ticks" "timeout_t *func" \
|
|
||||||
"void *arg" "int cpu"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_reset_sbt "struct callout *c" "sbintime_t sbt" \
|
|
||||||
"sbintime_t pr" "timeout_t *func" "void *arg" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_reset_sbt_curcpu "struct callout *c" "sbintime_t sbt" \
|
|
||||||
"sbintime_t pr" "timeout_t *func" "void *arg" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_reset_sbt_on "struct callout *c" "sbintime_t sbt" \
|
|
||||||
"sbintime_t pr" "timeout_t *func" "void *arg" "int cpu" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_schedule "struct callout *c" "int ticks"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_schedule_curcpu "struct callout *c" "int ticks"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_schedule_on "struct callout *c" "int ticks" "int cpu"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_schedule_sbt "struct callout *c" "sbintime_t sbt" \
|
|
||||||
"sbintime_t pr" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_schedule_sbt_curcpu "struct callout *c" "sbintime_t sbt" \
|
|
||||||
"sbintime_t pr" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_schedule_sbt_on "struct callout *c" "sbintime_t sbt" \
|
|
||||||
"sbintime_t pr" "int cpu" "int flags"
|
|
||||||
.Ft int
|
|
||||||
.Fn callout_stop "struct callout *c"
|
|
||||||
.Ft struct callout_handle
|
|
||||||
.Fn timeout "timeout_t *func" "void *arg" "int ticks"
|
|
||||||
.Ft void
|
|
||||||
.Fn untimeout "timeout_t *func" "void *arg" "struct callout_handle handle"
|
|
||||||
.Sh DESCRIPTION
|
.Sh DESCRIPTION
|
||||||
The
|
The
|
||||||
.Nm callout
|
.Nm callout
|
||||||
API is used to schedule a call to an arbitrary function at a specific
|
API is used to schedule a call to an arbitrary function at a specific
|
||||||
time in the future.
|
time in the future in a single-shot fashion.
|
||||||
Consumers of this API are required to allocate a callout structure
|
Consumers of this API are required to allocate a
|
||||||
.Pq struct callout
|
.Pq struct callout
|
||||||
for each pending function invocation.
|
structure for each pending function invocation.
|
||||||
This structure stores state about the pending function invocation including
|
|
||||||
the function to be called and the time at which the function should be invoked.
|
|
||||||
Pending function calls can be cancelled or rescheduled to a different time.
|
|
||||||
In addition,
|
|
||||||
a callout structure may be reused to schedule a new function call after a
|
|
||||||
scheduled call is completed.
|
|
||||||
.Pp
|
|
||||||
Callouts only provide a single-shot mode.
|
|
||||||
If a consumer requires a periodic timer,
|
|
||||||
it must explicitly reschedule each function call.
|
|
||||||
This is normally done by rescheduling the subsequent call within the called
|
|
||||||
function.
|
|
||||||
.Pp
|
|
||||||
Callout functions must not sleep.
|
|
||||||
They may not acquire sleepable locks,
|
|
||||||
wait on condition variables,
|
|
||||||
perform blocking allocation requests,
|
|
||||||
or invoke any other action that might sleep.
|
|
||||||
.Pp
|
|
||||||
Each callout structure must be initialized by
|
|
||||||
.Fn callout_init ,
|
|
||||||
.Fn callout_init_mtx ,
|
|
||||||
.Fn callout_init_rm ,
|
|
||||||
or
|
|
||||||
.Fn callout_init_rw
|
|
||||||
before it is passed to any of the other callout functions.
|
|
||||||
The
|
The
|
||||||
.Fn callout_init
|
.Pq struct callout
|
||||||
function initializes a callout structure in
|
structure stores the full state about any pending function call and
|
||||||
.Fa c
|
should be drained by a call to
|
||||||
that is not associated with a specific lock.
|
.Fn callout_drain
|
||||||
|
or
|
||||||
|
.Fn callout_drain_async
|
||||||
|
before freeing.
|
||||||
|
.Sh INITIALISATION
|
||||||
|
.Ft void
|
||||||
|
.Fn callout_handle_init "struct callout_handle *handle"
|
||||||
|
This function is deprecated and is used to prepare a
|
||||||
|
.Pq struct callout_handle
|
||||||
|
structure before it can be used the first time.
|
||||||
|
If this function is called on a pending timeout, the pending timeout
|
||||||
|
cannot be cancelled and the
|
||||||
|
.Fn untimeout
|
||||||
|
function will return as if there was no timeout pending.
|
||||||
|
.Pp
|
||||||
|
.Fn CALLOUT_HANDLE_INITIALIZER "&handle"
|
||||||
|
This macro is deprecated and can be used instead of
|
||||||
|
.Fn callout_handle_init
|
||||||
|
to assign the default state to the
|
||||||
|
.Pq struct callout_handle
|
||||||
|
structure when declaring static timeouts.
|
||||||
|
.Pp
|
||||||
|
.Ft void
|
||||||
|
.Fn callout_init "struct callout *c" "int mpsafe"
|
||||||
|
This function prepares a
|
||||||
|
.Pq struct callout
|
||||||
|
structure before it can be used.
|
||||||
|
This function should not be used when the callout is pending a timeout.
|
||||||
If the
|
If the
|
||||||
.Fa mpsafe
|
.Fa mpsafe
|
||||||
argument is zero,
|
argument is non-zero, the callback function will be running unlocked.
|
||||||
the callout structure is not considered to be
|
Else the Giant mutex will be locked before calling the callback function.
|
||||||
.Dq multi-processor safe ;
|
|
||||||
and the Giant lock will be acquired before calling the callout function
|
|
||||||
and released when the callout function returns.
|
|
||||||
.Pp
|
.Pp
|
||||||
|
.Ft void
|
||||||
|
.Fn callout_init_mtx "struct callout *c" "struct mtx *mtx" "int flags"
|
||||||
|
This function prepares a
|
||||||
|
.Pq struct callout
|
||||||
|
structure before it can be used.
|
||||||
|
This function should not be used when the callout is pending a timeout.
|
||||||
The
|
The
|
||||||
.Fn callout_init_mtx ,
|
.Fa mtx
|
||||||
.Fn callout_init_rm ,
|
argument should be non-zero and should specify a pointer to a valid
|
||||||
and
|
spinlock type of mutex or a valid regular non-sleepable mutex which
|
||||||
.Fn callout_init_rw
|
the callback subsystem should lock before calling the callback
|
||||||
functions initialize a callout structure in
|
function.
|
||||||
.Fa c
|
Valid
|
||||||
that is associated with a specific lock.
|
|
||||||
The lock is specified by the
|
|
||||||
.Fa mtx ,
|
|
||||||
.Fa rm ,
|
|
||||||
or
|
|
||||||
.Fa rw
|
|
||||||
parameter.
|
|
||||||
The associated lock must be held while stopping or rescheduling the
|
|
||||||
callout.
|
|
||||||
The callout subsystem acquires the associated lock before calling the
|
|
||||||
callout function and releases it after the function returns.
|
|
||||||
If the callout was cancelled while the callout subsystem waited for the
|
|
||||||
associated lock,
|
|
||||||
the callout function is not called,
|
|
||||||
and the associated lock is released.
|
|
||||||
This ensures that stopping or rescheduling the callout will abort any
|
|
||||||
previously scheduled invocation.
|
|
||||||
.Pp
|
|
||||||
Only regular mutexes may be used with
|
|
||||||
.Fn callout_init_mtx ;
|
|
||||||
spin mutexes are not supported.
|
|
||||||
A sleepable read-mostly lock
|
|
||||||
.Po
|
|
||||||
one initialized with the
|
|
||||||
.Dv RM_SLEEPABLE
|
|
||||||
flag
|
|
||||||
.Pc
|
|
||||||
may not be used with
|
|
||||||
.Fn callout_init_rm .
|
|
||||||
Similarly, other sleepable lock types such as
|
|
||||||
.Xr sx 9
|
|
||||||
and
|
|
||||||
.Xr lockmgr 9
|
|
||||||
cannot be used with callouts because sleeping is not permitted in
|
|
||||||
the callout subsystem.
|
|
||||||
.Pp
|
|
||||||
These
|
|
||||||
.Fa flags
|
.Fa flags
|
||||||
may be specified for
|
are:
|
||||||
.Fn callout_init_mtx ,
|
|
||||||
.Fn callout_init_rm ,
|
|
||||||
or
|
|
||||||
.Fn callout_init_rw :
|
|
||||||
.Bl -tag -width ".Dv CALLOUT_RETURNUNLOCKED"
|
.Bl -tag -width ".Dv CALLOUT_RETURNUNLOCKED"
|
||||||
.It Dv CALLOUT_RETURNUNLOCKED
|
.It Dv CALLOUT_RETURNUNLOCKED
|
||||||
The callout function will release the associated lock itself,
|
It is assumed that the callout function has released the specified
|
||||||
so the callout subsystem should not attempt to unlock it
|
mutex before returning.
|
||||||
after the callout function returns.
|
Else the callout subsystem will release the specified mutex after the
|
||||||
.It Dv CALLOUT_SHAREDLOCK
|
callout function has returned.
|
||||||
The lock is only acquired in read mode when running the callout handler.
|
|
||||||
This flag is ignored by
|
|
||||||
.Fn callout_init_mtx .
|
|
||||||
.El
|
.El
|
||||||
.Pp
|
.Pp
|
||||||
The function
|
.Ft void
|
||||||
.Fn callout_stop
|
.Fn callout_init_rm "struct callout *c" "struct rmlock *rm" "int flags"
|
||||||
cancels a callout
|
This function is the same like the
|
||||||
.Fa c
|
.Fn callout_init_mtx
|
||||||
if it is currently pending.
|
function except it accepts a read-mostly type of lock.
|
||||||
If the callout is pending, then
|
The read-mostly lock must not be initialised with the
|
||||||
.Fn callout_stop
|
.Dv RM_SLEEPABLE
|
||||||
returns a non-zero value.
|
flag.
|
||||||
If the callout is not set,
|
|
||||||
has already been serviced,
|
|
||||||
or is currently being serviced,
|
|
||||||
then zero will be returned.
|
|
||||||
If the callout has an associated lock,
|
|
||||||
then that lock must be held when this function is called.
|
|
||||||
.Pp
|
.Pp
|
||||||
The function
|
.Ft void
|
||||||
.Fn callout_drain
|
.Fn callout_init_rw "struct callout *c" "struct rwlock *rw" "int flags"
|
||||||
is identical to
|
This function is the same like the
|
||||||
.Fn callout_stop
|
.Fn callout_init_mtx
|
||||||
except that it will wait for the callout
|
function except it accepts a reader-writer type of lock.
|
||||||
.Fa c
|
.Sh SCHEDULING CALLOUTS
|
||||||
to complete if it is already in progress.
|
.Ft struct callout_handle
|
||||||
This function MUST NOT be called while holding any
|
.Fn timeout "timeout_t *func" "void *arg" "int ticks"
|
||||||
locks on which the callout might block, or deadlock will result.
|
This function is deprecated and schedules a call to the function given by the argument
|
||||||
Note that if the callout subsystem has already begun processing this
|
.Fa func
|
||||||
callout, then the callout function may be invoked before
|
to take place after
|
||||||
.Fn callout_drain
|
|
||||||
returns.
|
|
||||||
However, the callout subsystem does guarantee that the callout will be
|
|
||||||
fully stopped before
|
|
||||||
.Fn callout_drain
|
|
||||||
returns.
|
|
||||||
.Pp
|
|
||||||
The
|
|
||||||
.Fn callout_reset
|
|
||||||
and
|
|
||||||
.Fn callout_schedule
|
|
||||||
function families schedule a future function invocation for callout
|
|
||||||
.Fa c .
|
|
||||||
If
|
|
||||||
.Fa c
|
|
||||||
already has a pending callout,
|
|
||||||
it is cancelled before the new invocation is scheduled.
|
|
||||||
These functions return a non-zero value if a pending callout was cancelled
|
|
||||||
and zero if there was no pending callout.
|
|
||||||
If the callout has an associated lock,
|
|
||||||
then that lock must be held when any of these functions are called.
|
|
||||||
.Pp
|
|
||||||
The time at which the callout function will be invoked is determined by
|
|
||||||
either the
|
|
||||||
.Fa ticks
|
|
||||||
argument or the
|
|
||||||
.Fa sbt ,
|
|
||||||
.Fa pr ,
|
|
||||||
and
|
|
||||||
.Fa flags
|
|
||||||
arguments.
|
|
||||||
When
|
|
||||||
.Fa ticks
|
|
||||||
is used,
|
|
||||||
the callout is scheduled to execute after
|
|
||||||
.Fa ticks Ns No /hz
|
.Fa ticks Ns No /hz
|
||||||
seconds.
|
seconds.
|
||||||
Non-positive values of
|
Non-positive values of
|
||||||
.Fa ticks
|
.Fa ticks
|
||||||
are silently converted to the value
|
are silently converted to the value
|
||||||
.Sq 1 .
|
.Sq 1 .
|
||||||
.Pp
|
|
||||||
The
|
The
|
||||||
.Fa sbt ,
|
.Fa func
|
||||||
.Fa pr ,
|
argument should be a valid pointer to a function that takes a single
|
||||||
and
|
.Fa void *
|
||||||
.Fa flags
|
argument.
|
||||||
arguments provide more control over the scheduled time including
|
Upon invocation, the
|
||||||
support for higher resolution times,
|
.Fa func
|
||||||
specifying the precision of the scheduled time,
|
function will receive
|
||||||
and setting an absolute deadline instead of a relative timeout.
|
.Fa arg
|
||||||
The callout is scheduled to execute in a time window which begins at
|
as its only argument.
|
||||||
the time specified in
|
The Giant lock is locked when the
|
||||||
|
.Fa arg
|
||||||
|
function is invoked and should not be unlocked by this function.
|
||||||
|
The returned value from
|
||||||
|
.Fn timeout
|
||||||
|
is a
|
||||||
|
.Ft struct callout_handle
|
||||||
|
structure which can be used in conjunction with the
|
||||||
|
.Fn untimeout
|
||||||
|
function to request that a scheduled timeout be cancelled.
|
||||||
|
As handles are recycled by the system, it is possible, although unlikely,
|
||||||
|
that a handle from one invocation of
|
||||||
|
.Fn timeout
|
||||||
|
may match the handle of another invocation of
|
||||||
|
.Fn timeout
|
||||||
|
if both calls used the same function pointer and argument, and the first
|
||||||
|
timeout is expired or canceled before the second call.
|
||||||
|
Please ensure that the function and argument pointers are unique when using this function.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_reset "struct callout *c" "int ticks" "callout_func_t *func" "void *arg"
|
||||||
|
This function is used to schedule or re-schedule a callout.
|
||||||
|
This function at first stops the callout given by the
|
||||||
|
.Fa c
|
||||||
|
argument, if any.
|
||||||
|
Then it will start the callout given by the
|
||||||
|
.Fa c
|
||||||
|
argument.
|
||||||
|
The relative time until the timeout callback happens is given by the
|
||||||
|
.Fa ticks
|
||||||
|
argument.
|
||||||
|
The number of ticks in a second is defined by
|
||||||
|
.Dv hz
|
||||||
|
and can vary from system to system.
|
||||||
|
This function returns a non-zero value if the given callout was pending and
|
||||||
|
the callback function was prevented from being called.
|
||||||
|
Else a value of zero is returned.
|
||||||
|
If a lock is associated with the callout given by the
|
||||||
|
.Fa c
|
||||||
|
argument and it is exclusivly locked when this function is called this
|
||||||
|
function will always ensure that previous callback function, if any,
|
||||||
|
is never reached.
|
||||||
|
In other words the callout will be atomically restarted.
|
||||||
|
Else there is no such guarantee.
|
||||||
|
The callback function is given by the
|
||||||
|
.Fa func
|
||||||
|
argument and its function argument is given by the
|
||||||
|
.Fa arg
|
||||||
|
argument.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_reset_curcpu "struct callout *c" "int ticks" "callout_func_t *func" \
|
||||||
|
"void *arg"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset
|
||||||
|
function except the callback function given by the
|
||||||
|
.Fa func
|
||||||
|
argument will be executed on the same CPU which called this function.
|
||||||
|
A change in the CPU selection can happen if the callout has a lock
|
||||||
|
associated with it and is locked when this function is called.
|
||||||
|
A change in the CPU selection cannot happen if this function is
|
||||||
|
re-scheduled inside a callout function.
|
||||||
|
Else the callback function given by the
|
||||||
|
.Fa func
|
||||||
|
argument will be executed on the same CPU like previously done.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_reset_on "struct callout *c" "int ticks" "callout_func_t *func" \
|
||||||
|
"void *arg" "int cpu"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset
|
||||||
|
function except the callback function given by the
|
||||||
|
.Fa func
|
||||||
|
argument will be executed on the CPU given by the
|
||||||
|
.Fa cpu
|
||||||
|
argument.
|
||||||
|
A change in the CPU selection can happen if the callout has a lock
|
||||||
|
associated with it and is locked when this function is called.
|
||||||
|
A change in the CPU selection cannot happen if this function is
|
||||||
|
re-scheduled inside a callout function.
|
||||||
|
Else the callback function given by the
|
||||||
|
.Fa func
|
||||||
|
argument will be executed on the same CPU like previously done.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_reset_sbt "struct callout *c" "sbintime_t sbt" \
|
||||||
|
"sbintime_t pr" "callout_func_t *func" "void *arg" "int flags"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset
|
||||||
|
function except the relative or absolute time after which the timeout
|
||||||
|
callback should happen is given by the
|
||||||
.Fa sbt
|
.Fa sbt
|
||||||
and extends for the amount of time specified in
|
argument and extends for the amount of time specified in the
|
||||||
.Fa pr .
|
.Fa pr
|
||||||
If
|
argument.
|
||||||
|
This function is used when you need high precision timeouts.
|
||||||
|
If the
|
||||||
.Fa sbt
|
.Fa sbt
|
||||||
specifies a time in the past,
|
argument specifies a time in the past,
|
||||||
the window is adjusted to start at the current time.
|
the window is adjusted to start at the current time.
|
||||||
A non-zero value for
|
A non-zero value for
|
||||||
.Fa pr
|
.Fa pr
|
||||||
allows the callout subsystem to coalesce callouts scheduled close to each
|
allows the callout subsystem to coalesce callouts scheduled close to each
|
||||||
other into fewer timer interrupts,
|
other into fewer timer interrupts,
|
||||||
reducing processing overhead and power consumption.
|
reducing processing overhead and power consumption.
|
||||||
These
|
The
|
||||||
.Fa flags
|
.Fa flags
|
||||||
may be specified to adjust the interpretation of
|
argument may be non-zero to adjust the interpretation of the
|
||||||
.Fa sbt
|
.Fa sbt
|
||||||
and
|
and the
|
||||||
.Fa pr :
|
.Fa pr
|
||||||
|
arguments:
|
||||||
.Bl -tag -width ".Dv C_DIRECT_EXEC"
|
.Bl -tag -width ".Dv C_DIRECT_EXEC"
|
||||||
.It Dv C_ABSOLUTE
|
.It Dv C_ABSOLUTE
|
||||||
Handle the
|
Handle the
|
||||||
.Fa sbt
|
.Fa sbt
|
||||||
argument as an absolute time since boot.
|
argument as an absolute time since boot.
|
||||||
By default,
|
By default, the
|
||||||
.Fa sbt
|
.Fa sbt
|
||||||
is treated as a relative amount of time,
|
argument is treated like a relative amount of time,
|
||||||
similar to
|
similar to
|
||||||
.Fa ticks .
|
.Fa ticks .
|
||||||
.It Dv C_DIRECT_EXEC
|
.It Dv C_DIRECT_EXEC
|
||||||
@ -347,7 +301,7 @@ Callout functions run in this context may use only spin mutexes for locking
|
|||||||
and should be as small as possible because they run with absolute priority.
|
and should be as small as possible because they run with absolute priority.
|
||||||
.It Fn C_PREL
|
.It Fn C_PREL
|
||||||
Specifies relative event time precision as binary logarithm of time interval
|
Specifies relative event time precision as binary logarithm of time interval
|
||||||
divided by acceptable time deviation: 1 -- 1/2, 2 -- 1/4, etc.
|
divided by acceptable time deviation: 1 -- 1/2, 2 -- 1/4 and so on.
|
||||||
Note that the larger of
|
Note that the larger of
|
||||||
.Fa pr
|
.Fa pr
|
||||||
or this value is used as the length of the time window.
|
or this value is used as the length of the time window.
|
||||||
@ -360,65 +314,215 @@ Align the timeouts to
|
|||||||
calls if possible.
|
calls if possible.
|
||||||
.El
|
.El
|
||||||
.Pp
|
.Pp
|
||||||
The
|
.Ft int
|
||||||
.Fn callout_reset
|
.Fn callout_reset_sbt_curcpu "struct callout *c" "sbintime_t sbt" \
|
||||||
functions accept a
|
"sbintime_t pr" "callout_func_t *func" "void *arg" "int flags"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_sbt
|
||||||
|
function except the callback function given by the
|
||||||
.Fa func
|
.Fa func
|
||||||
argument which identifies the function to be called when the time expires.
|
argument will be executed on the same CPU which called this function.
|
||||||
It must be a pointer to a function that takes a single
|
A change in the CPU selection can happen if the callout has a lock
|
||||||
.Fa void *
|
associated with it and is locked when this function is called.
|
||||||
|
A change in the CPU selection cannot happen if this function is
|
||||||
|
re-scheduled inside a callout function.
|
||||||
|
Else the callback function given by the
|
||||||
|
.Fa func
|
||||||
|
argument will be executed on the same CPU like previously done.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_reset_sbt_on "struct callout *c" "sbintime_t sbt" \
|
||||||
|
"sbintime_t pr" "callout_func_t *func" "void *arg" "int cpu" "int flags"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_sbt
|
||||||
|
function except the callback function given by the
|
||||||
|
.Fa func
|
||||||
|
argument will be executed on the CPU given by the
|
||||||
|
.Fa cpu
|
||||||
argument.
|
argument.
|
||||||
Upon invocation,
|
A change in the CPU selection can happen if the callout has a lock
|
||||||
|
associated with it and is locked when this function is called.
|
||||||
|
A change in the CPU selection cannot happen if this function is
|
||||||
|
re-scheduled inside a callout function.
|
||||||
|
Else the callback function given by the
|
||||||
.Fa func
|
.Fa func
|
||||||
will receive
|
argument will be executed on the same CPU like previously done.
|
||||||
.Fa arg
|
.Pp
|
||||||
as its only argument.
|
.Ft int
|
||||||
The
|
.Fn callout_schedule "struct callout *c" "int ticks"
|
||||||
.Fn callout_schedule
|
This function works the same like the
|
||||||
functions reuse the
|
|
||||||
.Fa func
|
|
||||||
and
|
|
||||||
.Fa arg
|
|
||||||
arguments from the previous callout.
|
|
||||||
Note that one of the
|
|
||||||
.Fn callout_reset
|
.Fn callout_reset
|
||||||
functions must always be called to initialize
|
function except it re-uses the callback function and the callback argument
|
||||||
|
already stored in the
|
||||||
|
.Pq struct callout
|
||||||
|
structure.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_schedule_curcpu "struct callout *c" "int ticks"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_curcpu
|
||||||
|
function except it re-uses the callback function and the callback argument
|
||||||
|
already stored in the
|
||||||
|
.Pq struct callout
|
||||||
|
structure.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_schedule_on "struct callout *c" "int ticks" "int cpu"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_on
|
||||||
|
function except it re-uses the callback function and the callback argument
|
||||||
|
already stored in the
|
||||||
|
.Pq struct callout
|
||||||
|
structure.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_schedule_sbt "struct callout *c" "sbintime_t sbt" \
|
||||||
|
"sbintime_t pr" "int flags"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_sbt
|
||||||
|
function except it re-uses the callback function and the callback argument
|
||||||
|
already stored in the
|
||||||
|
.Pq struct callout
|
||||||
|
structure.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_schedule_sbt_curcpu "struct callout *c" "sbintime_t sbt" \
|
||||||
|
"sbintime_t pr" "int flags"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_sbt_curcpu
|
||||||
|
function except it re-uses the callback function and the callback argument
|
||||||
|
already stored in the
|
||||||
|
.Pq struct callout
|
||||||
|
structure.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_schedule_sbt_on "struct callout *c" "sbintime_t sbt" \
|
||||||
|
"sbintime_t pr" "int cpu" "int flags"
|
||||||
|
This function works the same like the
|
||||||
|
.Fn callout_reset_sbt_on
|
||||||
|
function except it re-uses the callback function and the callback argument
|
||||||
|
already stored in the
|
||||||
|
.Pq struct callout
|
||||||
|
structure.
|
||||||
|
.Sh CHECKING THE STATE OF CALLOUTS
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_pending "struct callout *c"
|
||||||
|
This function returns non-zero if the callout pointed to by the
|
||||||
|
.Fa c
|
||||||
|
argument is pending for callback.
|
||||||
|
Else this function returns zero.
|
||||||
|
This function returns zero when inside the callout function if the
|
||||||
|
callout is not re-scheduled.
|
||||||
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_active "struct callout *c"
|
||||||
|
This function is deprecated and returns non-zero if the callout
|
||||||
|
pointed to by the
|
||||||
|
.Fa c
|
||||||
|
argument was scheduled in the past.
|
||||||
|
Else this function returns zero.
|
||||||
|
This function also returns zero after the
|
||||||
|
.Fn callout_deactivate
|
||||||
|
or the
|
||||||
|
.Fn callout_stop
|
||||||
|
or the
|
||||||
|
.Fn callout_drain
|
||||||
|
or the
|
||||||
|
.Fn callout_drain_async
|
||||||
|
function is called on the same callout as given by the
|
||||||
|
.Fa c
|
||||||
|
argument.
|
||||||
|
.Pp
|
||||||
|
.Ft void
|
||||||
|
.Fn callout_deactivate "struct callout *c"
|
||||||
|
This function is deprecated and ensures that subsequent calls to the
|
||||||
|
.Fn callout_activate
|
||||||
|
function returns zero until the callout is scheduled again.
|
||||||
|
.Sh STOPPING CALLOUTS
|
||||||
|
.Ft void
|
||||||
|
.Fn untimeout "timeout_t *func" "void *arg" "struct callout_handle handle"
|
||||||
|
This function is deprecated and cancels the timeout associated with the
|
||||||
|
.Fa handle
|
||||||
|
argument using the function pointed to by the
|
||||||
.Fa func
|
.Fa func
|
||||||
and
|
argument and having the
|
||||||
.Fa arg
|
.Fa arg
|
||||||
before one of the
|
arguments to validate the handle.
|
||||||
.Fn callout_schedule
|
If the handle does not correspond to a timeout with
|
||||||
functions can be used.
|
the function
|
||||||
|
.Fa func
|
||||||
|
taking the argument
|
||||||
|
.Fa arg
|
||||||
|
no action is taken. The
|
||||||
|
.Fa handle
|
||||||
|
must be initialised by a previous call to
|
||||||
|
.Fn timeout ,
|
||||||
|
.Fn callout_handle_init
|
||||||
|
or assigned the value of
|
||||||
|
.Fn CALLOUT_HANDLE_INITIALIZER "&handle"
|
||||||
|
before being passed to
|
||||||
|
.Fn untimeout .
|
||||||
|
The behavior of calling
|
||||||
|
.Fn untimeout
|
||||||
|
with an uninitialised handle
|
||||||
|
is undefined.
|
||||||
.Pp
|
.Pp
|
||||||
The callout subsystem provides a softclock thread for each CPU in the system.
|
.Ft int
|
||||||
Callouts are assigned to a single CPU and are executed by the softclock thread
|
.Fn callout_stop "struct callout *c"
|
||||||
for that CPU.
|
This function is used to stop a timeout function invocation associated with the callout pointed to by the
|
||||||
Initially,
|
.Fa c
|
||||||
callouts are assigned to CPU 0.
|
argument, in a non-blocking fashion.
|
||||||
The
|
This function can be called multiple times in a row with no side effects, even if the callout is already stopped. This function however should not be called before the callout has been initialised.
|
||||||
.Fn callout_reset_on ,
|
This function returns a non-zero value if the given callout was pending and
|
||||||
.Fn callout_reset_sbt_on ,
|
the callback function was prevented from being called.
|
||||||
.Fn callout_schedule_on
|
Else a value of zero is returned.
|
||||||
and
|
If a lock is associated with the callout given by the
|
||||||
.Fn callout_schedule_sbt_on
|
.Fa c
|
||||||
functions assign the callout to CPU
|
argument and it is exclusivly locked when this function is called, the
|
||||||
.Fa cpu .
|
.Fn callout_stop
|
||||||
The
|
function will always ensure that the callback function is never reached.
|
||||||
.Fn callout_reset_curcpu ,
|
In other words the callout will be atomically stopped.
|
||||||
.Fn callout_reset_sbt_curpu ,
|
Else there is no such guarantee.
|
||||||
.Fn callout_schedule_curcpu
|
.Sh DRAINING CALLOUTS
|
||||||
and
|
.Ft int
|
||||||
.Fn callout_schedule_sbt_curcpu
|
.Fn callout_drain "struct callout *c"
|
||||||
functions assign the callout to the current CPU.
|
This function works the same like the
|
||||||
The
|
.Fn callout_stop
|
||||||
.Fn callout_reset ,
|
function except it ensures that all callback functions have returned and there are no more references to the callout pointed to by the
|
||||||
.Fn callout_reset_sbt ,
|
.Fa c
|
||||||
.Fn callout_schedule
|
argument inside the callout subsystem before it returns.
|
||||||
and
|
Also this function ensures that the lock, if any, associated with the
|
||||||
.Fn callout_schedule_sbt
|
callout is no longer being used.
|
||||||
functions schedule the callout to execute in the softclock thread of the CPU
|
When this function returns, it is safe to free the callout structure pointed to by the
|
||||||
to which it is currently assigned.
|
.Fa c
|
||||||
|
argument.
|
||||||
.Pp
|
.Pp
|
||||||
|
.Ft int
|
||||||
|
.Fn callout_drain_async "struct callout *c" "callout_func_t *fn" "void *arg"
|
||||||
|
This function is non-blocking and works the same like the
|
||||||
|
.Fn callout_stop
|
||||||
|
function except if it returns non-zero it means the callback function pointed to by the
|
||||||
|
.Fa fn
|
||||||
|
argument will be called back with the
|
||||||
|
.Fa arg
|
||||||
|
argument when all references to the callout pointed to by the
|
||||||
|
.Fa c
|
||||||
|
argument are gone.
|
||||||
|
If this function returns zero, it is safe to free the callout structure pointed to by the
|
||||||
|
.Fa c
|
||||||
|
argument right away.
|
||||||
|
.Sh CALLOUT FUNCTION RESTRICTIONS
|
||||||
|
Callout functions must not sleep.
|
||||||
|
They may not acquire sleepable locks, wait on condition variables,
|
||||||
|
perform blocking allocation requests, or invoke any other action that
|
||||||
|
might sleep.
|
||||||
|
.Sh CALLOUT SUBSYSTEM INTERNALS
|
||||||
|
The callout subsystem has its own set of spinlocks to protect its internal state.
|
||||||
|
The callout subsystem provides a softclock thread for each CPU in the
|
||||||
|
system.
|
||||||
|
Callouts are assigned to a single CPU and are executed by the
|
||||||
|
softclock thread for that CPU.
|
||||||
|
Initially, callouts are assigned to CPU 0.
|
||||||
Softclock threads are not pinned to their respective CPUs by default.
|
Softclock threads are not pinned to their respective CPUs by default.
|
||||||
The softclock thread for CPU 0 can be pinned to CPU 0 by setting the
|
The softclock thread for CPU 0 can be pinned to CPU 0 by setting the
|
||||||
.Va kern.pin_default_swi
|
.Va kern.pin_default_swi
|
||||||
@ -427,50 +531,7 @@ Softclock threads for CPUs other than zero can be pinned to their
|
|||||||
respective CPUs by setting the
|
respective CPUs by setting the
|
||||||
.Va kern.pin_pcpu_swi
|
.Va kern.pin_pcpu_swi
|
||||||
loader tunable to a non-zero value.
|
loader tunable to a non-zero value.
|
||||||
.Pp
|
.Sh "AVOIDING RACE CONDITIONS"
|
||||||
The macros
|
|
||||||
.Fn callout_pending ,
|
|
||||||
.Fn callout_active
|
|
||||||
and
|
|
||||||
.Fn callout_deactivate
|
|
||||||
provide access to the current state of the callout.
|
|
||||||
The
|
|
||||||
.Fn callout_pending
|
|
||||||
macro checks whether a callout is
|
|
||||||
.Em pending ;
|
|
||||||
a callout is considered
|
|
||||||
.Em pending
|
|
||||||
when a timeout has been set but the time has not yet arrived.
|
|
||||||
Note that once the timeout time arrives and the callout subsystem
|
|
||||||
starts to process this callout,
|
|
||||||
.Fn callout_pending
|
|
||||||
will return
|
|
||||||
.Dv FALSE
|
|
||||||
even though the callout function may not have finished
|
|
||||||
.Pq or even begun
|
|
||||||
executing.
|
|
||||||
The
|
|
||||||
.Fn callout_active
|
|
||||||
macro checks whether a callout is marked as
|
|
||||||
.Em active ,
|
|
||||||
and the
|
|
||||||
.Fn callout_deactivate
|
|
||||||
macro clears the callout's
|
|
||||||
.Em active
|
|
||||||
flag.
|
|
||||||
The callout subsystem marks a callout as
|
|
||||||
.Em active
|
|
||||||
when a timeout is set and it clears the
|
|
||||||
.Em active
|
|
||||||
flag in
|
|
||||||
.Fn callout_stop
|
|
||||||
and
|
|
||||||
.Fn callout_drain ,
|
|
||||||
but it
|
|
||||||
.Em does not
|
|
||||||
clear it when a callout expires normally via the execution of the
|
|
||||||
callout function.
|
|
||||||
.Ss "Avoiding Race Conditions"
|
|
||||||
The callout subsystem invokes callout functions from its own thread
|
The callout subsystem invokes callout functions from its own thread
|
||||||
context.
|
context.
|
||||||
Without some kind of synchronization,
|
Without some kind of synchronization,
|
||||||
@ -487,7 +548,7 @@ synchronization concerns.
|
|||||||
The first approach is preferred as it is the simplest:
|
The first approach is preferred as it is the simplest:
|
||||||
.Bl -enum -offset indent
|
.Bl -enum -offset indent
|
||||||
.It
|
.It
|
||||||
Callouts can be associated with a specific lock when they are initialized
|
Callouts can be associated with a specific lock when they are initialised
|
||||||
by
|
by
|
||||||
.Fn callout_init_mtx ,
|
.Fn callout_init_mtx ,
|
||||||
.Fn callout_init_rm ,
|
.Fn callout_init_rm ,
|
||||||
@ -508,7 +569,7 @@ or
|
|||||||
.Fn callout_schedule
|
.Fn callout_schedule
|
||||||
functions to provide this safety.
|
functions to provide this safety.
|
||||||
.Pp
|
.Pp
|
||||||
A callout initialized via
|
A callout initialised via
|
||||||
.Fn callout_init
|
.Fn callout_init
|
||||||
with
|
with
|
||||||
.Fa mpsafe
|
.Fa mpsafe
|
||||||
@ -531,9 +592,8 @@ function families
|
|||||||
.Pc
|
.Pc
|
||||||
indicates whether or not the callout was removed.
|
indicates whether or not the callout was removed.
|
||||||
If it is known that the callout was set and the callout function has
|
If it is known that the callout was set and the callout function has
|
||||||
not yet executed, then a return value of
|
not yet executed, then a return value of zero indicates that the
|
||||||
.Dv FALSE
|
callout function is about to be called.
|
||||||
indicates that the callout function is about to be called.
|
|
||||||
For example:
|
For example:
|
||||||
.Bd -literal -offset indent
|
.Bd -literal -offset indent
|
||||||
if (sc->sc_flags & SCFLG_CALLOUT_RUNNING) {
|
if (sc->sc_flags & SCFLG_CALLOUT_RUNNING) {
|
||||||
@ -589,16 +649,14 @@ The callout function should first check the
|
|||||||
.Em pending
|
.Em pending
|
||||||
flag and return without action if
|
flag and return without action if
|
||||||
.Fn callout_pending
|
.Fn callout_pending
|
||||||
returns
|
returns non-zero.
|
||||||
.Dv TRUE .
|
|
||||||
This indicates that the callout was rescheduled using
|
This indicates that the callout was rescheduled using
|
||||||
.Fn callout_reset
|
.Fn callout_reset
|
||||||
just before the callout function was invoked.
|
just before the callout function was invoked.
|
||||||
If
|
If
|
||||||
.Fn callout_active
|
.Fn callout_active
|
||||||
returns
|
returns zero then the callout function should also return without
|
||||||
.Dv FALSE
|
action.
|
||||||
then the callout function should also return without action.
|
|
||||||
This indicates that the callout has been stopped.
|
This indicates that the callout has been stopped.
|
||||||
Finally, the callout function should call
|
Finally, the callout function should call
|
||||||
.Fn callout_deactivate
|
.Fn callout_deactivate
|
||||||
@ -668,129 +726,13 @@ a callout should always be drained prior to destroying its associated lock
|
|||||||
or releasing the storage for the callout structure.
|
or releasing the storage for the callout structure.
|
||||||
.Sh LEGACY API
|
.Sh LEGACY API
|
||||||
.Bf Sy
|
.Bf Sy
|
||||||
The functions below are a legacy API that will be removed in a future release.
|
The
|
||||||
|
.Fn timeout
|
||||||
|
and
|
||||||
|
.Fn untimeout
|
||||||
|
functions are a legacy API that will be removed in a future release.
|
||||||
New code should not use these routines.
|
New code should not use these routines.
|
||||||
.Ef
|
.Ef
|
||||||
.Pp
|
|
||||||
The function
|
|
||||||
.Fn timeout
|
|
||||||
schedules a call to the function given by the argument
|
|
||||||
.Fa func
|
|
||||||
to take place after
|
|
||||||
.Fa ticks Ns No /hz
|
|
||||||
seconds.
|
|
||||||
Non-positive values of
|
|
||||||
.Fa ticks
|
|
||||||
are silently converted to the value
|
|
||||||
.Sq 1 .
|
|
||||||
.Fa func
|
|
||||||
should be a pointer to a function that takes a
|
|
||||||
.Fa void *
|
|
||||||
argument.
|
|
||||||
Upon invocation,
|
|
||||||
.Fa func
|
|
||||||
will receive
|
|
||||||
.Fa arg
|
|
||||||
as its only argument.
|
|
||||||
The return value from
|
|
||||||
.Fn timeout
|
|
||||||
is a
|
|
||||||
.Ft struct callout_handle
|
|
||||||
which can be used in conjunction with the
|
|
||||||
.Fn untimeout
|
|
||||||
function to request that a scheduled timeout be canceled.
|
|
||||||
.Pp
|
|
||||||
The function
|
|
||||||
.Fn callout_handle_init
|
|
||||||
can be used to initialize a handle to a state which will cause
|
|
||||||
any calls to
|
|
||||||
.Fn untimeout
|
|
||||||
with that handle to return with no side
|
|
||||||
effects.
|
|
||||||
.Pp
|
|
||||||
Assigning a callout handle the value of
|
|
||||||
.Fn CALLOUT_HANDLE_INITIALIZER
|
|
||||||
performs the same function as
|
|
||||||
.Fn callout_handle_init
|
|
||||||
and is provided for use on statically declared or global callout handles.
|
|
||||||
.Pp
|
|
||||||
The function
|
|
||||||
.Fn untimeout
|
|
||||||
cancels the timeout associated with
|
|
||||||
.Fa handle
|
|
||||||
using the
|
|
||||||
.Fa func
|
|
||||||
and
|
|
||||||
.Fa arg
|
|
||||||
arguments to validate the handle.
|
|
||||||
If the handle does not correspond to a timeout with
|
|
||||||
the function
|
|
||||||
.Fa func
|
|
||||||
taking the argument
|
|
||||||
.Fa arg
|
|
||||||
no action is taken.
|
|
||||||
.Fa handle
|
|
||||||
must be initialized by a previous call to
|
|
||||||
.Fn timeout ,
|
|
||||||
.Fn callout_handle_init ,
|
|
||||||
or assigned the value of
|
|
||||||
.Fn CALLOUT_HANDLE_INITIALIZER "&handle"
|
|
||||||
before being passed to
|
|
||||||
.Fn untimeout .
|
|
||||||
The behavior of calling
|
|
||||||
.Fn untimeout
|
|
||||||
with an uninitialized handle
|
|
||||||
is undefined.
|
|
||||||
.Pp
|
|
||||||
As handles are recycled by the system, it is possible (although unlikely)
|
|
||||||
that a handle from one invocation of
|
|
||||||
.Fn timeout
|
|
||||||
may match the handle of another invocation of
|
|
||||||
.Fn timeout
|
|
||||||
if both calls used the same function pointer and argument, and the first
|
|
||||||
timeout is expired or canceled before the second call.
|
|
||||||
The timeout facility offers O(1) running time for
|
|
||||||
.Fn timeout
|
|
||||||
and
|
|
||||||
.Fn untimeout .
|
|
||||||
Timeouts are executed from
|
|
||||||
.Fn softclock
|
|
||||||
with the
|
|
||||||
.Va Giant
|
|
||||||
lock held.
|
|
||||||
Thus they are protected from re-entrancy.
|
|
||||||
.Sh RETURN VALUES
|
|
||||||
The
|
|
||||||
.Fn callout_active
|
|
||||||
macro returns the state of a callout's
|
|
||||||
.Em active
|
|
||||||
flag.
|
|
||||||
.Pp
|
|
||||||
The
|
|
||||||
.Fn callout_pending
|
|
||||||
macro returns the state of a callout's
|
|
||||||
.Em pending
|
|
||||||
flag.
|
|
||||||
.Pp
|
|
||||||
The
|
|
||||||
.Fn callout_reset
|
|
||||||
and
|
|
||||||
.Fn callout_schedule
|
|
||||||
function families return non-zero if the callout was pending before the new
|
|
||||||
function invocation was scheduled.
|
|
||||||
.Pp
|
|
||||||
The
|
|
||||||
.Fn callout_stop
|
|
||||||
and
|
|
||||||
.Fn callout_drain
|
|
||||||
functions return non-zero if the callout was still pending when it was
|
|
||||||
called or zero otherwise.
|
|
||||||
The
|
|
||||||
.Fn timeout
|
|
||||||
function returns a
|
|
||||||
.Ft struct callout_handle
|
|
||||||
that can be passed to
|
|
||||||
.Fn untimeout .
|
|
||||||
.Sh HISTORY
|
.Sh HISTORY
|
||||||
The current timeout and untimeout routines are based on the work of
|
The current timeout and untimeout routines are based on the work of
|
||||||
.An Adam M. Costello
|
.An Adam M. Costello
|
||||||
@ -815,4 +757,4 @@ The current implementation replaces the long standing
|
|||||||
.Bx
|
.Bx
|
||||||
linked list
|
linked list
|
||||||
callout mechanism which offered O(n) insertion and removal running time
|
callout mechanism which offered O(n) insertion and removal running time
|
||||||
but did not generate or require handles for untimeout operations.
|
and did not generate or require handles for untimeout operations.
|
||||||
|
@ -504,7 +504,8 @@ proc0_init(void *dummy __unused)
|
|||||||
|
|
||||||
callout_init_mtx(&p->p_itcallout, &p->p_mtx, 0);
|
callout_init_mtx(&p->p_itcallout, &p->p_mtx, 0);
|
||||||
callout_init_mtx(&p->p_limco, &p->p_mtx, 0);
|
callout_init_mtx(&p->p_limco, &p->p_mtx, 0);
|
||||||
callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
|
mtx_init(&td->td_slpmutex, "td_slpmutex", NULL, MTX_SPIN);
|
||||||
|
callout_init_mtx(&td->td_slpcallout, &td->td_slpmutex, 0);
|
||||||
|
|
||||||
/* Create credentials. */
|
/* Create credentials. */
|
||||||
p->p_ucred = crget();
|
p->p_ucred = crget();
|
||||||
|
@ -313,15 +313,13 @@ _cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt,
|
|||||||
DROP_GIANT();
|
DROP_GIANT();
|
||||||
|
|
||||||
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
|
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
|
||||||
|
sleepq_release(cvp);
|
||||||
sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
|
sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
|
||||||
if (lock != &Giant.lock_object) {
|
if (lock != &Giant.lock_object) {
|
||||||
if (class->lc_flags & LC_SLEEPABLE)
|
|
||||||
sleepq_release(cvp);
|
|
||||||
WITNESS_SAVE(lock, lock_witness);
|
WITNESS_SAVE(lock, lock_witness);
|
||||||
lock_state = class->lc_unlock(lock);
|
lock_state = class->lc_unlock(lock);
|
||||||
if (class->lc_flags & LC_SLEEPABLE)
|
|
||||||
sleepq_lock(cvp);
|
|
||||||
}
|
}
|
||||||
|
sleepq_lock(cvp);
|
||||||
rval = sleepq_timedwait(cvp, 0);
|
rval = sleepq_timedwait(cvp, 0);
|
||||||
|
|
||||||
#ifdef KTRACE
|
#ifdef KTRACE
|
||||||
@ -383,15 +381,13 @@ _cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock,
|
|||||||
|
|
||||||
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR |
|
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR |
|
||||||
SLEEPQ_INTERRUPTIBLE, 0);
|
SLEEPQ_INTERRUPTIBLE, 0);
|
||||||
|
sleepq_release(cvp);
|
||||||
sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
|
sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
|
||||||
if (lock != &Giant.lock_object) {
|
if (lock != &Giant.lock_object) {
|
||||||
if (class->lc_flags & LC_SLEEPABLE)
|
|
||||||
sleepq_release(cvp);
|
|
||||||
WITNESS_SAVE(lock, lock_witness);
|
WITNESS_SAVE(lock, lock_witness);
|
||||||
lock_state = class->lc_unlock(lock);
|
lock_state = class->lc_unlock(lock);
|
||||||
if (class->lc_flags & LC_SLEEPABLE)
|
|
||||||
sleepq_lock(cvp);
|
|
||||||
}
|
}
|
||||||
|
sleepq_lock(cvp);
|
||||||
rval = sleepq_timedwait_sig(cvp, 0);
|
rval = sleepq_timedwait_sig(cvp, 0);
|
||||||
|
|
||||||
#ifdef KTRACE
|
#ifdef KTRACE
|
||||||
|
@ -210,9 +210,11 @@ sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
|
|||||||
GIANT_SAVE();
|
GIANT_SAVE();
|
||||||
sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
|
sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
|
||||||
SLEEPQ_INTERRUPTIBLE : 0), queue);
|
SLEEPQ_INTERRUPTIBLE : 0), queue);
|
||||||
if ((flags & LK_TIMELOCK) && timo)
|
if ((flags & LK_TIMELOCK) && timo) {
|
||||||
|
sleepq_release(&lk->lock_object);
|
||||||
sleepq_set_timeout(&lk->lock_object, timo);
|
sleepq_set_timeout(&lk->lock_object, timo);
|
||||||
|
sleepq_lock(&lk->lock_object);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Decisional switch for real sleeping.
|
* Decisional switch for real sleeping.
|
||||||
*/
|
*/
|
||||||
|
@ -93,8 +93,6 @@ SCHED_STAT_DEFINE_VAR(turnstile,
|
|||||||
&DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
|
&DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
|
||||||
SCHED_STAT_DEFINE_VAR(sleepq,
|
SCHED_STAT_DEFINE_VAR(sleepq,
|
||||||
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
|
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
|
||||||
SCHED_STAT_DEFINE_VAR(sleepqtimo,
|
|
||||||
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
|
|
||||||
SCHED_STAT_DEFINE_VAR(relinquish,
|
SCHED_STAT_DEFINE_VAR(relinquish,
|
||||||
&DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
|
&DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
|
||||||
SCHED_STAT_DEFINE_VAR(needresched,
|
SCHED_STAT_DEFINE_VAR(needresched,
|
||||||
|
@ -236,12 +236,16 @@ _sleep(void *ident, struct lock_object *lock, int priority,
|
|||||||
* return from cursig().
|
* return from cursig().
|
||||||
*/
|
*/
|
||||||
sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
|
sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
|
||||||
if (sbt != 0)
|
|
||||||
sleepq_set_timeout_sbt(ident, sbt, pr, flags);
|
|
||||||
if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
|
if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
|
||||||
sleepq_release(ident);
|
sleepq_release(ident);
|
||||||
WITNESS_SAVE(lock, lock_witness);
|
WITNESS_SAVE(lock, lock_witness);
|
||||||
lock_state = class->lc_unlock(lock);
|
lock_state = class->lc_unlock(lock);
|
||||||
|
if (sbt != 0)
|
||||||
|
sleepq_set_timeout_sbt(ident, sbt, pr, flags);
|
||||||
|
sleepq_lock(ident);
|
||||||
|
} else if (sbt != 0) {
|
||||||
|
sleepq_release(ident);
|
||||||
|
sleepq_set_timeout_sbt(ident, sbt, pr, flags);
|
||||||
sleepq_lock(ident);
|
sleepq_lock(ident);
|
||||||
}
|
}
|
||||||
if (sbt != 0 && catch)
|
if (sbt != 0 && catch)
|
||||||
@ -306,8 +310,11 @@ msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
|
|||||||
* We put ourselves on the sleep queue and start our timeout.
|
* We put ourselves on the sleep queue and start our timeout.
|
||||||
*/
|
*/
|
||||||
sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
|
sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
|
||||||
if (sbt != 0)
|
if (sbt != 0) {
|
||||||
|
sleepq_release(ident);
|
||||||
sleepq_set_timeout_sbt(ident, sbt, pr, flags);
|
sleepq_set_timeout_sbt(ident, sbt, pr, flags);
|
||||||
|
sleepq_lock(ident);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Can't call ktrace with any spin locks held so it can lock the
|
* Can't call ktrace with any spin locks held so it can lock the
|
||||||
|
@ -149,6 +149,9 @@ thread_ctor(void *mem, int size, void *arg, int flags)
|
|||||||
audit_thread_alloc(td);
|
audit_thread_alloc(td);
|
||||||
#endif
|
#endif
|
||||||
umtx_thread_alloc(td);
|
umtx_thread_alloc(td);
|
||||||
|
|
||||||
|
mtx_init(&td->td_slpmutex, "td_slpmutex", NULL, MTX_SPIN);
|
||||||
|
callout_init_mtx(&td->td_slpcallout, &td->td_slpmutex, 0);
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,6 +165,10 @@ thread_dtor(void *mem, int size, void *arg)
|
|||||||
|
|
||||||
td = (struct thread *)mem;
|
td = (struct thread *)mem;
|
||||||
|
|
||||||
|
/* make sure to drain any use of the "td->td_slpcallout" */
|
||||||
|
callout_drain(&td->td_slpcallout);
|
||||||
|
mtx_destroy(&td->td_slpmutex);
|
||||||
|
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
/* Verify that this thread is in a safe state to free. */
|
/* Verify that this thread is in a safe state to free. */
|
||||||
switch (td->td_state) {
|
switch (td->td_state) {
|
||||||
@ -544,7 +551,6 @@ thread_link(struct thread *td, struct proc *p)
|
|||||||
LIST_INIT(&td->td_lprof[0]);
|
LIST_INIT(&td->td_lprof[0]);
|
||||||
LIST_INIT(&td->td_lprof[1]);
|
LIST_INIT(&td->td_lprof[1]);
|
||||||
sigqueue_init(&td->td_sigqueue, p);
|
sigqueue_init(&td->td_sigqueue, p);
|
||||||
callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
|
|
||||||
TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
|
TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
|
||||||
p->p_numthreads++;
|
p->p_numthreads++;
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -152,7 +152,8 @@ static uma_zone_t sleepq_zone;
|
|||||||
*/
|
*/
|
||||||
static int sleepq_catch_signals(void *wchan, int pri);
|
static int sleepq_catch_signals(void *wchan, int pri);
|
||||||
static int sleepq_check_signals(void);
|
static int sleepq_check_signals(void);
|
||||||
static int sleepq_check_timeout(void);
|
static int sleepq_check_timeout(struct thread *);
|
||||||
|
static void sleepq_stop_timeout(struct thread *);
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
static void sleepq_dtor(void *mem, int size, void *arg);
|
static void sleepq_dtor(void *mem, int size, void *arg);
|
||||||
#endif
|
#endif
|
||||||
@ -373,17 +374,14 @@ void
|
|||||||
sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
|
sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
struct sleepqueue_chain *sc;
|
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
|
|
||||||
td = curthread;
|
td = curthread;
|
||||||
sc = SC_LOOKUP(wchan);
|
|
||||||
mtx_assert(&sc->sc_lock, MA_OWNED);
|
mtx_lock_spin(&td->td_slpmutex);
|
||||||
MPASS(TD_ON_SLEEPQ(td));
|
|
||||||
MPASS(td->td_sleepqueue == NULL);
|
|
||||||
MPASS(wchan != NULL);
|
|
||||||
callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
|
callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
|
||||||
sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
|
sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
|
||||||
|
mtx_unlock_spin(&td->td_slpmutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -559,11 +557,8 @@ sleepq_switch(void *wchan, int pri)
|
|||||||
* Check to see if we timed out.
|
* Check to see if we timed out.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
sleepq_check_timeout(void)
|
sleepq_check_timeout(struct thread *td)
|
||||||
{
|
{
|
||||||
struct thread *td;
|
|
||||||
|
|
||||||
td = curthread;
|
|
||||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -573,27 +568,20 @@ sleepq_check_timeout(void)
|
|||||||
td->td_flags &= ~TDF_TIMEOUT;
|
td->td_flags &= ~TDF_TIMEOUT;
|
||||||
return (EWOULDBLOCK);
|
return (EWOULDBLOCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If TDF_TIMOFAIL is set, the timeout ran after we had
|
|
||||||
* already been woken up.
|
|
||||||
*/
|
|
||||||
if (td->td_flags & TDF_TIMOFAIL)
|
|
||||||
td->td_flags &= ~TDF_TIMOFAIL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If callout_stop() fails, then the timeout is running on
|
|
||||||
* another CPU, so synchronize with it to avoid having it
|
|
||||||
* accidentally wake up a subsequent sleep.
|
|
||||||
*/
|
|
||||||
else if (callout_stop(&td->td_slpcallout) == 0) {
|
|
||||||
td->td_flags |= TDF_TIMEOUT;
|
|
||||||
TD_SET_SLEEPING(td);
|
|
||||||
mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
|
|
||||||
}
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Atomically stop the timeout by using a mutex.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
sleepq_stop_timeout(struct thread *td)
|
||||||
|
{
|
||||||
|
mtx_lock_spin(&td->td_slpmutex);
|
||||||
|
callout_stop(&td->td_slpcallout);
|
||||||
|
mtx_unlock_spin(&td->td_slpmutex);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to see if we were awoken by a signal.
|
* Check to see if we were awoken by a signal.
|
||||||
*/
|
*/
|
||||||
@ -664,9 +652,11 @@ sleepq_timedwait(void *wchan, int pri)
|
|||||||
MPASS(!(td->td_flags & TDF_SINTR));
|
MPASS(!(td->td_flags & TDF_SINTR));
|
||||||
thread_lock(td);
|
thread_lock(td);
|
||||||
sleepq_switch(wchan, pri);
|
sleepq_switch(wchan, pri);
|
||||||
rval = sleepq_check_timeout();
|
rval = sleepq_check_timeout(td);
|
||||||
thread_unlock(td);
|
thread_unlock(td);
|
||||||
|
|
||||||
|
sleepq_stop_timeout(td);
|
||||||
|
|
||||||
return (rval);
|
return (rval);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -677,12 +667,18 @@ sleepq_timedwait(void *wchan, int pri)
|
|||||||
int
|
int
|
||||||
sleepq_timedwait_sig(void *wchan, int pri)
|
sleepq_timedwait_sig(void *wchan, int pri)
|
||||||
{
|
{
|
||||||
|
struct thread *td;
|
||||||
int rcatch, rvalt, rvals;
|
int rcatch, rvalt, rvals;
|
||||||
|
|
||||||
|
td = curthread;
|
||||||
|
|
||||||
rcatch = sleepq_catch_signals(wchan, pri);
|
rcatch = sleepq_catch_signals(wchan, pri);
|
||||||
rvalt = sleepq_check_timeout();
|
rvalt = sleepq_check_timeout(td);
|
||||||
rvals = sleepq_check_signals();
|
rvals = sleepq_check_signals();
|
||||||
thread_unlock(curthread);
|
thread_unlock(td);
|
||||||
|
|
||||||
|
sleepq_stop_timeout(td);
|
||||||
|
|
||||||
if (rcatch)
|
if (rcatch)
|
||||||
return (rcatch);
|
return (rcatch);
|
||||||
if (rvals)
|
if (rvals)
|
||||||
@ -889,64 +885,49 @@ sleepq_broadcast(void *wchan, int flags, int pri, int queue)
|
|||||||
static void
|
static void
|
||||||
sleepq_timeout(void *arg)
|
sleepq_timeout(void *arg)
|
||||||
{
|
{
|
||||||
struct sleepqueue_chain *sc;
|
struct thread *td = arg;
|
||||||
struct sleepqueue *sq;
|
int wakeup_swapper = 0;
|
||||||
struct thread *td;
|
|
||||||
void *wchan;
|
|
||||||
int wakeup_swapper;
|
|
||||||
|
|
||||||
td = arg;
|
|
||||||
wakeup_swapper = 0;
|
|
||||||
CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
|
CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
|
||||||
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
|
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
|
||||||
|
|
||||||
/*
|
/* Handle the three cases which can happen */
|
||||||
* First, see if the thread is asleep and get the wait channel if
|
|
||||||
* it is.
|
|
||||||
*/
|
|
||||||
thread_lock(td);
|
thread_lock(td);
|
||||||
if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
|
|
||||||
wchan = td->td_wchan;
|
|
||||||
sc = SC_LOOKUP(wchan);
|
|
||||||
THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
|
|
||||||
sq = sleepq_lookup(wchan);
|
|
||||||
MPASS(sq != NULL);
|
|
||||||
td->td_flags |= TDF_TIMEOUT;
|
|
||||||
wakeup_swapper = sleepq_resume_thread(sq, td, 0);
|
|
||||||
thread_unlock(td);
|
|
||||||
if (wakeup_swapper)
|
|
||||||
kick_proc0();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the thread is on the SLEEPQ but isn't sleeping yet, it
|
|
||||||
* can either be on another CPU in between sleepq_add() and
|
|
||||||
* one of the sleepq_*wait*() routines or it can be in
|
|
||||||
* sleepq_catch_signals().
|
|
||||||
*/
|
|
||||||
if (TD_ON_SLEEPQ(td)) {
|
if (TD_ON_SLEEPQ(td)) {
|
||||||
td->td_flags |= TDF_TIMEOUT;
|
if (TD_IS_SLEEPING(td)) {
|
||||||
thread_unlock(td);
|
struct sleepqueue_chain *sc;
|
||||||
return;
|
struct sleepqueue *sq;
|
||||||
}
|
void *wchan;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now check for the edge cases. First, if TDF_TIMEOUT is set,
|
* Case I - thread is asleep and needs to be
|
||||||
* then the other thread has already yielded to us, so clear
|
* awoken:
|
||||||
* the flag and resume it. If TDF_TIMEOUT is not set, then the
|
*/
|
||||||
* we know that the other thread is not on a sleep queue, but it
|
wchan = td->td_wchan;
|
||||||
* hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
|
sc = SC_LOOKUP(wchan);
|
||||||
* to let it know that the timeout has already run and doesn't
|
THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
|
||||||
* need to be canceled.
|
sq = sleepq_lookup(wchan);
|
||||||
*/
|
MPASS(sq != NULL);
|
||||||
if (td->td_flags & TDF_TIMEOUT) {
|
td->td_flags |= TDF_TIMEOUT;
|
||||||
MPASS(TD_IS_SLEEPING(td));
|
wakeup_swapper = sleepq_resume_thread(sq, td, 0);
|
||||||
td->td_flags &= ~TDF_TIMEOUT;
|
} else {
|
||||||
TD_CLR_SLEEPING(td);
|
/*
|
||||||
wakeup_swapper = setrunnable(td);
|
* Case II - cancel going to sleep by setting
|
||||||
} else
|
* the timeout flag because the target thread
|
||||||
td->td_flags |= TDF_TIMOFAIL;
|
* is not asleep yet. It can be on another CPU
|
||||||
|
* in between sleepq_add() and one of the
|
||||||
|
* sleepq_*wait*() routines or it can be in
|
||||||
|
* sleepq_catch_signals().
|
||||||
|
*/
|
||||||
|
td->td_flags |= TDF_TIMEOUT;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Case III - thread is already woken up by a wakeup
|
||||||
|
* call and should not timeout. Nothing to do!
|
||||||
|
*/
|
||||||
|
}
|
||||||
thread_unlock(td);
|
thread_unlock(td);
|
||||||
if (wakeup_swapper)
|
if (wakeup_swapper)
|
||||||
kick_proc0();
|
kick_proc0();
|
||||||
|
@ -105,7 +105,9 @@ _wait_for_timeout_common(struct completion *c, long timeout, int flags)
|
|||||||
if (c->done)
|
if (c->done)
|
||||||
break;
|
break;
|
||||||
sleepq_add(c, NULL, "completion", flags, 0);
|
sleepq_add(c, NULL, "completion", flags, 0);
|
||||||
|
sleepq_release(c);
|
||||||
sleepq_set_timeout(c, end - ticks);
|
sleepq_set_timeout(c, end - ticks);
|
||||||
|
sleepq_lock(c);
|
||||||
if (flags & SLEEPQ_INTERRUPTIBLE) {
|
if (flags & SLEEPQ_INTERRUPTIBLE) {
|
||||||
if (sleepq_timedwait_sig(c, 0) != 0)
|
if (sleepq_timedwait_sig(c, 0) != 0)
|
||||||
return (-ERESTARTSYS);
|
return (-ERESTARTSYS);
|
||||||
|
@ -46,19 +46,30 @@ LIST_HEAD(callout_list, callout);
|
|||||||
SLIST_HEAD(callout_slist, callout);
|
SLIST_HEAD(callout_slist, callout);
|
||||||
TAILQ_HEAD(callout_tailq, callout);
|
TAILQ_HEAD(callout_tailq, callout);
|
||||||
|
|
||||||
|
typedef void callout_func_t(void *);
|
||||||
|
|
||||||
|
struct callout_args {
|
||||||
|
sbintime_t time; /* absolute time for the event */
|
||||||
|
sbintime_t precision; /* delta allowed wrt opt */
|
||||||
|
void *arg; /* function argument */
|
||||||
|
callout_func_t *func; /* function to call */
|
||||||
|
int flags; /* flags passed to callout_reset() */
|
||||||
|
int cpu; /* CPU we're scheduled on */
|
||||||
|
};
|
||||||
|
|
||||||
struct callout {
|
struct callout {
|
||||||
union {
|
union {
|
||||||
LIST_ENTRY(callout) le;
|
LIST_ENTRY(callout) le;
|
||||||
SLIST_ENTRY(callout) sle;
|
SLIST_ENTRY(callout) sle;
|
||||||
TAILQ_ENTRY(callout) tqe;
|
TAILQ_ENTRY(callout) tqe;
|
||||||
} c_links;
|
} c_links;
|
||||||
sbintime_t c_time; /* ticks to the event */
|
sbintime_t c_time; /* absolute time for the event */
|
||||||
sbintime_t c_precision; /* delta allowed wrt opt */
|
sbintime_t c_precision; /* delta allowed wrt opt */
|
||||||
void *c_arg; /* function argument */
|
void *c_arg; /* function argument */
|
||||||
void (*c_func)(void *); /* function to call */
|
callout_func_t *c_func; /* function to call */
|
||||||
struct lock_object *c_lock; /* lock to handle */
|
struct lock_object *c_lock; /* callback lock */
|
||||||
int c_flags; /* state of this entry */
|
int c_flags; /* state of this entry */
|
||||||
volatile int c_cpu; /* CPU we're scheduled on */
|
int c_cpu; /* CPU we're scheduled on */
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -45,10 +45,12 @@
|
|||||||
#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
|
#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
|
||||||
#define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */
|
#define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */
|
||||||
#define CALLOUT_RETURNUNLOCKED 0x0010 /* handler returns with mtx unlocked */
|
#define CALLOUT_RETURNUNLOCKED 0x0010 /* handler returns with mtx unlocked */
|
||||||
#define CALLOUT_SHAREDLOCK 0x0020 /* callout lock held in shared mode */
|
#define CALLOUT_UNUSED_5 0x0020 /* --available-- */
|
||||||
#define CALLOUT_DFRMIGRATION 0x0040 /* callout in deferred migration mode */
|
#define CALLOUT_DEFRESTART 0x0040 /* callout restart is deferred */
|
||||||
#define CALLOUT_PROCESSED 0x0080 /* callout in wheel or processing list? */
|
#define CALLOUT_PROCESSED 0x0080 /* callout in wheel or processing list? */
|
||||||
#define CALLOUT_DIRECT 0x0100 /* allow exec from hw int context */
|
#define CALLOUT_DIRECT 0x0100 /* allow exec from hw int context */
|
||||||
|
#define CALLOUT_SET_LC(x) (((x) & 7) << 16) /* set lock class */
|
||||||
|
#define CALLOUT_GET_LC(x) (((x) >> 16) & 7) /* get lock class */
|
||||||
|
|
||||||
#define C_DIRECT_EXEC 0x0001 /* direct execution of callout */
|
#define C_DIRECT_EXEC 0x0001 /* direct execution of callout */
|
||||||
#define C_PRELBITS 7
|
#define C_PRELBITS 7
|
||||||
@ -65,7 +67,8 @@ struct callout_handle {
|
|||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
|
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
|
||||||
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
|
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
|
||||||
#define callout_drain(c) _callout_stop_safe(c, 1)
|
int callout_drain(struct callout *);
|
||||||
|
int callout_drain_async(struct callout *, callout_func_t *, void *);
|
||||||
void callout_init(struct callout *, int);
|
void callout_init(struct callout *, int);
|
||||||
void _callout_init_lock(struct callout *, struct lock_object *, int);
|
void _callout_init_lock(struct callout *, struct lock_object *, int);
|
||||||
#define callout_init_mtx(c, mtx, flags) \
|
#define callout_init_mtx(c, mtx, flags) \
|
||||||
@ -79,7 +82,7 @@ void _callout_init_lock(struct callout *, struct lock_object *, int);
|
|||||||
NULL, (flags))
|
NULL, (flags))
|
||||||
#define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING)
|
#define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING)
|
||||||
int callout_reset_sbt_on(struct callout *, sbintime_t, sbintime_t,
|
int callout_reset_sbt_on(struct callout *, sbintime_t, sbintime_t,
|
||||||
void (*)(void *), void *, int, int);
|
callout_func_t *, void *, int, int);
|
||||||
#define callout_reset_sbt(c, sbt, pr, fn, arg, flags) \
|
#define callout_reset_sbt(c, sbt, pr, fn, arg, flags) \
|
||||||
callout_reset_sbt_on((c), (sbt), (pr), (fn), (arg), (c)->c_cpu, (flags))
|
callout_reset_sbt_on((c), (sbt), (pr), (fn), (arg), (c)->c_cpu, (flags))
|
||||||
#define callout_reset_sbt_curcpu(c, sbt, pr, fn, arg, flags) \
|
#define callout_reset_sbt_curcpu(c, sbt, pr, fn, arg, flags) \
|
||||||
@ -103,8 +106,7 @@ int callout_schedule(struct callout *, int);
|
|||||||
int callout_schedule_on(struct callout *, int, int);
|
int callout_schedule_on(struct callout *, int, int);
|
||||||
#define callout_schedule_curcpu(c, on_tick) \
|
#define callout_schedule_curcpu(c, on_tick) \
|
||||||
callout_schedule_on((c), (on_tick), PCPU_GET(cpuid))
|
callout_schedule_on((c), (on_tick), PCPU_GET(cpuid))
|
||||||
#define callout_stop(c) _callout_stop_safe(c, 0)
|
int callout_stop(struct callout *);
|
||||||
int _callout_stop_safe(struct callout *, int);
|
|
||||||
void callout_process(sbintime_t now);
|
void callout_process(sbintime_t now);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -58,7 +58,7 @@
|
|||||||
* in the range 5 to 9.
|
* in the range 5 to 9.
|
||||||
*/
|
*/
|
||||||
#undef __FreeBSD_version
|
#undef __FreeBSD_version
|
||||||
#define __FreeBSD_version 1100053 /* Master, propagated to newvers */
|
#define __FreeBSD_version 1100054 /* Master, propagated to newvers */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
||||||
|
@ -308,6 +308,7 @@ struct thread {
|
|||||||
} td_uretoff; /* (k) Syscall aux returns. */
|
} td_uretoff; /* (k) Syscall aux returns. */
|
||||||
#define td_retval td_uretoff.tdu_retval
|
#define td_retval td_uretoff.tdu_retval
|
||||||
struct callout td_slpcallout; /* (h) Callout for sleep. */
|
struct callout td_slpcallout; /* (h) Callout for sleep. */
|
||||||
|
struct mtx td_slpmutex; /* (h) Mutex for sleep callout */
|
||||||
struct trapframe *td_frame; /* (k) */
|
struct trapframe *td_frame; /* (k) */
|
||||||
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
|
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
|
||||||
vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */
|
vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */
|
||||||
@ -364,7 +365,7 @@ do { \
|
|||||||
#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
|
#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
|
||||||
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
|
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
|
||||||
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
|
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
|
||||||
#define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */
|
#define TDF_UNUSED12 0x00001000 /* --available-- */
|
||||||
#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
|
#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
|
||||||
#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
|
#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
|
||||||
#define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
|
#define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
|
||||||
@ -704,7 +705,7 @@ struct proc {
|
|||||||
#define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */
|
#define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */
|
||||||
#define SWT_TURNSTILE 3 /* Turnstile contention. */
|
#define SWT_TURNSTILE 3 /* Turnstile contention. */
|
||||||
#define SWT_SLEEPQ 4 /* Sleepq wait. */
|
#define SWT_SLEEPQ 4 /* Sleepq wait. */
|
||||||
#define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */
|
#define SWT_UNUSED5 5 /* --available-- */
|
||||||
#define SWT_RELINQUISH 6 /* yield call. */
|
#define SWT_RELINQUISH 6 /* yield call. */
|
||||||
#define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */
|
#define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */
|
||||||
#define SWT_IDLE 8 /* Switching from the idle thread. */
|
#define SWT_IDLE 8 /* Switching from the idle thread. */
|
||||||
|
Loading…
Reference in New Issue
Block a user