freebsd-nq/module/spl/spl-condvar.c
Brian Behlendorf 93ce2b4ca5 Update build system and packaging
Minimal changes required to integrate the SPL sources in to the
ZFS repository build infrastructure and packaging.

Build system and packaging:
  * Renamed SPL_* autoconf m4 macros to ZFS_*.
  * Removed redundant SPL_* autoconf m4 macros.
  * Updated the RPM spec files to remove SPL package dependency.
  * The zfs package obsoletes the spl package, and the zfs-kmod
    package obsoletes the spl-kmod package.
  * The zfs-kmod-devel* packages were updated to add compatibility
    symlinks under /usr/src/spl-x.y.z until all dependent packages
    can be updated.  They will be removed in a future release.
  * Updated copy-builtin script for in-kernel builds.
  * Updated DKMS package to include the spl.ko.
  * Updated stale AUTHORS file to include all contributors.
  * Updated stale COPYRIGHT and included the SPL as an exception.
  * Renamed README.markdown to README.md
  * Renamed OPENSOLARIS.LICENSE to LICENSE.
  * Renamed DISCLAIMER to NOTICE.

Required code changes:
  * Removed redundant HAVE_SPL macro.
  * Removed _BOOT from nvpairs since it doesn't apply for Linux.
  * Initial header cleanup (removal of empty headers, refactoring).
  * Remove SPL repository clone/build from zimport.sh.
  * Use of DEFINE_RATELIMIT_STATE and DEFINE_SPINLOCK removed due
    to build issues when forcing C99 compilation.
  * Replaced legacy ACCESS_ONCE with READ_ONCE.
  * Include needed headers for `current` and `EXPORT_SYMBOL`.

Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Olaf Faaland <faaland1@llnl.gov>
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
TEST_ZIMPORT_SKIP="yes"
Closes #7556
2018-05-29 16:00:33 -07:00

412 lines
10 KiB
C

/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
* For details, see <http://zfsonlinux.org/>.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Credential Implementation.
*/
#include <sys/condvar.h>
#include <sys/time.h>
#include <linux/hrtimer.h>
#include <linux/compiler_compat.h>
void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
ASSERT(cvp);
ASSERT(name == NULL);
ASSERT(type == CV_DEFAULT);
ASSERT(arg == NULL);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
init_waitqueue_head(&cvp->cv_destroy);
atomic_set(&cvp->cv_waiters, 0);
atomic_set(&cvp->cv_refs, 1);
cvp->cv_mutex = NULL;
}
EXPORT_SYMBOL(__cv_init);
static int
cv_destroy_wakeup(kcondvar_t *cvp)
{
if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
ASSERT(cvp->cv_mutex == NULL);
ASSERT(!waitqueue_active(&cvp->cv_event));
return (1);
}
return (0);
}
void
__cv_destroy(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
cvp->cv_magic = CV_DESTROY;
atomic_dec(&cvp->cv_refs);
/* Block until all waiters are woken and references dropped. */
while (cv_destroy_wakeup(cvp) == 0)
wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
ASSERT3P(cvp->cv_mutex, ==, NULL);
ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
}
EXPORT_SYMBOL(__cv_destroy);
static void
cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
{
DEFINE_WAIT(wait);
kmutex_t *m;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT(mutex_owned(mp));
atomic_inc(&cvp->cv_refs);
m = READ_ONCE(cvp->cv_mutex);
if (!m)
m = xchg(&cvp->cv_mutex, mp);
/* Ensure the same mutex is used by all callers */
ASSERT(m == NULL || m == mp);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
atomic_inc(&cvp->cv_waiters);
/*
* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty.
*/
mutex_exit(mp);
if (io)
io_schedule();
else
schedule();
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters)) {
/*
* This is set without any lock, so it's racy. But this is
* just for debug anyway, so make it best-effort
*/
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
/*
* Hold mutex after we release the cvp, otherwise we could dead lock
* with a thread holding the mutex and call cv_destroy.
*/
mutex_enter(mp);
}
void
__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(__cv_wait);
void
__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
}
EXPORT_SYMBOL(__cv_wait_io);
void
__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(__cv_wait_sig);
#if defined(HAVE_IO_SCHEDULE_TIMEOUT)
#define spl_io_schedule_timeout(t) io_schedule_timeout(t)
#else
static void
__cv_wakeup(unsigned long data)
{
wake_up_process((struct task_struct *)data);
}
static long
spl_io_schedule_timeout(long time_left)
{
long expire_time = jiffies + time_left;
struct timer_list timer;
init_timer(&timer);
setup_timer(&timer, __cv_wakeup, (unsigned long)current);
timer.expires = expire_time;
add_timer(&timer);
io_schedule();
del_timer_sync(&timer);
time_left = expire_time - jiffies;
return (time_left < 0 ? 0 : time_left);
}
#endif
/*
* 'expire_time' argument is an absolute wall clock time in jiffies.
* Return value is time left (expire_time - now) or -1 if timeout occurred.
*/
static clock_t
__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
int state, int io)
{
DEFINE_WAIT(wait);
kmutex_t *m;
clock_t time_left;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT(mutex_owned(mp));
/* XXX - Does not handle jiffie wrap properly */
time_left = expire_time - jiffies;
if (time_left <= 0)
return (-1);
atomic_inc(&cvp->cv_refs);
m = READ_ONCE(cvp->cv_mutex);
if (!m)
m = xchg(&cvp->cv_mutex, mp);
/* Ensure the same mutex is used by all callers */
ASSERT(m == NULL || m == mp);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
atomic_inc(&cvp->cv_waiters);
/*
* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty.
*/
mutex_exit(mp);
if (io)
time_left = spl_io_schedule_timeout(time_left);
else
time_left = schedule_timeout(time_left);
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters)) {
/*
* This is set without any lock, so it's racy. But this is
* just for debug anyway, so make it best-effort
*/
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
/*
* Hold mutex after we release the cvp, otherwise we could dead lock
* with a thread holding the mutex and call cv_destroy.
*/
mutex_enter(mp);
return (time_left > 0 ? time_left : -1);
}
clock_t
__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
return (__cv_timedwait_common(cvp, mp, exp_time,
TASK_UNINTERRUPTIBLE, 0));
}
EXPORT_SYMBOL(__cv_timedwait);
clock_t
__cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
return (__cv_timedwait_common(cvp, mp, exp_time,
TASK_UNINTERRUPTIBLE, 1));
}
EXPORT_SYMBOL(__cv_timedwait_io);
clock_t
__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
return (__cv_timedwait_common(cvp, mp, exp_time,
TASK_INTERRUPTIBLE, 0));
}
EXPORT_SYMBOL(__cv_timedwait_sig);
/*
* 'expire_time' argument is an absolute clock time in nanoseconds.
* Return value is time left (expire_time - now) or -1 if timeout occurred.
*/
static clock_t
__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
int state)
{
DEFINE_WAIT(wait);
kmutex_t *m;
hrtime_t time_left;
ktime_t ktime_left;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT(mutex_owned(mp));
time_left = expire_time - gethrtime();
if (time_left <= 0)
return (-1);
atomic_inc(&cvp->cv_refs);
m = READ_ONCE(cvp->cv_mutex);
if (!m)
m = xchg(&cvp->cv_mutex, mp);
/* Ensure the same mutex is used by all callers */
ASSERT(m == NULL || m == mp);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
atomic_inc(&cvp->cv_waiters);
/*
* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty.
*/
mutex_exit(mp);
/*
* Allow a 100 us range to give kernel an opportunity to coalesce
* interrupts
*/
ktime_left = ktime_set(0, time_left);
schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
HRTIMER_MODE_REL);
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters)) {
/*
* This is set without any lock, so it's racy. But this is
* just for debug anyway, so make it best-effort
*/
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
mutex_enter(mp);
time_left = expire_time - gethrtime();
return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
}
/*
* Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
*/
static clock_t
cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag, int state)
{
if (res > 1) {
/*
* Align expiration to the specified resolution.
*/
if (flag & CALLOUT_FLAG_ROUNDUP)
tim += res - 1;
tim = (tim / res) * res;
}
if (!(flag & CALLOUT_FLAG_ABSOLUTE))
tim += gethrtime();
return (__cv_timedwait_hires(cvp, mp, tim, state));
}
clock_t
cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
int flag)
{
return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
TASK_UNINTERRUPTIBLE));
}
EXPORT_SYMBOL(cv_timedwait_hires);
clock_t
cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag)
{
return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
TASK_INTERRUPTIBLE));
}
EXPORT_SYMBOL(cv_timedwait_sig_hires);
void
__cv_signal(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
/*
* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
* waiter will be set runable with each call to wake_up().
* Additionally wake_up() holds a spin_lock assoicated with
* the wait queue to ensure we don't race waking up processes.
*/
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
}
EXPORT_SYMBOL(__cv_signal);
void
__cv_broadcast(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
/*
* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set.
*/
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up_all(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
}
EXPORT_SYMBOL(__cv_broadcast);