From 53bf4bb2cfca3f5e68b6ce23e80dd6856f9e04b2 Mon Sep 17 00:00:00 2001 From: Peter Wemm Date: Tue, 25 Mar 1997 16:27:20 +0000 Subject: [PATCH] Import 4.4BSD-Lite2 onto CSRG branch --- sys/kern/kern_lock.c | 531 +++++++++++++++++++++++++++++++++++++++++++ sys/sys/lock.h | 180 +++++++++++++++ 2 files changed, 711 insertions(+) create mode 100644 sys/kern/kern_lock.c create mode 100644 sys/sys/lock.h diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c new file mode 100644 index 000000000000..ddc9185176c9 --- /dev/null +++ b/sys/kern/kern_lock.c @@ -0,0 +1,531 @@ +/* + * Copyright (c) 1995 + * The Regents of the University of California. All rights reserved. + * + * This code contains ideas from software contributed to Berkeley by + * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating + * System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 + */ + +#include +#include +#include +#include + +/* + * Locking primitives implementation. + * Locks provide shared/exclusive sychronization. + */ + +#ifdef DEBUG +#define COUNT(p, x) if (p) (p)->p_locks += (x) +#else +#define COUNT(p, x) +#endif + +#if NCPUS > 1 + +/* + * For multiprocessor system, try spin lock first. + * + * This should be inline expanded below, but we cannot have #if + * inside a multiline define. + */ +int lock_wait_time = 100; +#define PAUSE(lkp, wanted) \ + if (lock_wait_time > 0) { \ + int i; \ + \ + simple_unlock(&lkp->lk_interlock); \ + for (i = lock_wait_time; i > 0; i--) \ + if (!(wanted)) \ + break; \ + simple_lock(&lkp->lk_interlock); \ + } \ + if (!(wanted)) \ + break; + +#else /* NCPUS == 1 */ + +/* + * It is an error to spin on a uniprocessor as nothing will ever cause + * the simple lock to clear while we are executing. + */ +#define PAUSE(lkp, wanted) + +#endif /* NCPUS == 1 */ + +/* + * Acquire a resource. + */ +#define ACQUIRE(lkp, error, extflags, wanted) \ + PAUSE(lkp, wanted); \ + for (error = 0; wanted; ) { \ + (lkp)->lk_waitcount++; \ + simple_unlock(&(lkp)->lk_interlock); \ + error = tsleep((void *)lkp, (lkp)->lk_prio, \ + (lkp)->lk_wmesg, (lkp)->lk_timo); \ + simple_lock(&(lkp)->lk_interlock); \ + (lkp)->lk_waitcount--; \ + if (error) \ + break; \ + if ((extflags) & LK_SLEEPFAIL) { \ + error = ENOLCK; \ + break; \ + } \ + } + +/* + * Initialize a lock; required before use. + */ +void +lockinit(lkp, prio, wmesg, timo, flags) + struct lock *lkp; + int prio; + char *wmesg; + int timo; + int flags; +{ + + bzero(lkp, sizeof(struct lock)); + simple_lock_init(&lkp->lk_interlock); + lkp->lk_flags = flags & LK_EXTFLG_MASK; + lkp->lk_prio = prio; + lkp->lk_timo = timo; + lkp->lk_wmesg = wmesg; + lkp->lk_lockholder = LK_NOPROC; +} + +/* + * Determine the status of a lock. + */ +int +lockstatus(lkp) + struct lock *lkp; +{ + int lock_type = 0; + + simple_lock(&lkp->lk_interlock); + if (lkp->lk_exclusivecount != 0) + lock_type = LK_EXCLUSIVE; + else if (lkp->lk_sharecount != 0) + lock_type = LK_SHARED; + simple_unlock(&lkp->lk_interlock); + return (lock_type); +} + +/* + * Set, change, or release a lock. + * + * Shared requests increment the shared count. Exclusive requests set the + * LK_WANT_EXCL flag (preventing further shared locks), and wait for already + * accepted shared locks and shared-to-exclusive upgrades to go away. + */ +int +lockmgr(lkp, flags, interlkp, p) + __volatile struct lock *lkp; + u_int flags; + struct simplelock *interlkp; + struct proc *p; +{ + int error; + pid_t pid; + int extflags; + + error = 0; + if (p) + pid = p->p_pid; + else + pid = LK_KERNPROC; + simple_lock(&lkp->lk_interlock); + if (flags & LK_INTERLOCK) + simple_unlock(interlkp); + extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; +#ifdef DIAGNOSTIC + /* + * Once a lock has drained, the LK_DRAINING flag is set and an + * exclusive lock is returned. The only valid operation thereafter + * is a single release of that exclusive lock. This final release + * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any + * further requests of any sort will result in a panic. The bits + * selected for these two flags are chosen so that they will be set + * in memory that is freed (freed memory is filled with 0xdeadbeef). + * The final release is permitted to give a new lease on life to + * the lock by specifying LK_REENABLE. + */ + if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { + if (lkp->lk_flags & LK_DRAINED) + panic("lockmgr: using decommissioned lock"); + if ((flags & LK_TYPE_MASK) != LK_RELEASE || + lkp->lk_lockholder != pid) + panic("lockmgr: non-release on draining lock: %d\n", + flags & LK_TYPE_MASK); + lkp->lk_flags &= ~LK_DRAINING; + if ((flags & LK_REENABLE) == 0) + lkp->lk_flags |= LK_DRAINED; + } +#endif DIAGNOSTIC + + switch (flags & LK_TYPE_MASK) { + + case LK_SHARED: + if (lkp->lk_lockholder != pid) { + /* + * If just polling, check to see if we will block. + */ + if ((extflags & LK_NOWAIT) && (lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { + error = EBUSY; + break; + } + /* + * Wait for exclusive locks and upgrades to clear. + */ + ACQUIRE(lkp, error, extflags, lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); + if (error) + break; + lkp->lk_sharecount++; + COUNT(p, 1); + break; + } + /* + * We hold an exclusive lock, so downgrade it to shared. + * An alternative would be to fail with EDEADLK. + */ + lkp->lk_sharecount++; + COUNT(p, 1); + /* fall into downgrade */ + + case LK_DOWNGRADE: + if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) + panic("lockmgr: not holding exclusive lock"); + lkp->lk_sharecount += lkp->lk_exclusivecount; + lkp->lk_exclusivecount = 0; + lkp->lk_flags &= ~LK_HAVE_EXCL; + lkp->lk_lockholder = LK_NOPROC; + if (lkp->lk_waitcount) + wakeup((void *)lkp); + break; + + case LK_EXCLUPGRADE: + /* + * If another process is ahead of us to get an upgrade, + * then we want to fail rather than have an intervening + * exclusive access. + */ + if (lkp->lk_flags & LK_WANT_UPGRADE) { + lkp->lk_sharecount--; + COUNT(p, -1); + error = EBUSY; + break; + } + /* fall into normal upgrade */ + + case LK_UPGRADE: + /* + * Upgrade a shared lock to an exclusive one. If another + * shared lock has already requested an upgrade to an + * exclusive lock, our shared lock is released and an + * exclusive lock is requested (which will be granted + * after the upgrade). If we return an error, the file + * will always be unlocked. + */ + if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) + panic("lockmgr: upgrade exclusive lock"); + lkp->lk_sharecount--; + COUNT(p, -1); + /* + * If we are just polling, check to see if we will block. + */ + if ((extflags & LK_NOWAIT) && + ((lkp->lk_flags & LK_WANT_UPGRADE) || + lkp->lk_sharecount > 1)) { + error = EBUSY; + break; + } + if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { + /* + * We are first shared lock to request an upgrade, so + * request upgrade and wait for the shared count to + * drop to zero, then take exclusive lock. + */ + lkp->lk_flags |= LK_WANT_UPGRADE; + ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); + lkp->lk_flags &= ~LK_WANT_UPGRADE; + if (error) + break; + lkp->lk_flags |= LK_HAVE_EXCL; + lkp->lk_lockholder = pid; + if (lkp->lk_exclusivecount != 0) + panic("lockmgr: non-zero exclusive count"); + lkp->lk_exclusivecount = 1; + COUNT(p, 1); + break; + } + /* + * Someone else has requested upgrade. Release our shared + * lock, awaken upgrade requestor if we are the last shared + * lock, then request an exclusive lock. + */ + if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) + wakeup((void *)lkp); + /* fall into exclusive request */ + + case LK_EXCLUSIVE: + if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { + /* + * Recursive lock. + */ + if ((extflags & LK_CANRECURSE) == 0) + panic("lockmgr: locking against myself"); + lkp->lk_exclusivecount++; + COUNT(p, 1); + break; + } + /* + * If we are just polling, check to see if we will sleep. + */ + if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0)) { + error = EBUSY; + break; + } + /* + * Try to acquire the want_exclusive flag. + */ + ACQUIRE(lkp, error, extflags, lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL)); + if (error) + break; + lkp->lk_flags |= LK_WANT_EXCL; + /* + * Wait for shared locks and upgrades to finish. + */ + ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || + (lkp->lk_flags & LK_WANT_UPGRADE)); + lkp->lk_flags &= ~LK_WANT_EXCL; + if (error) + break; + lkp->lk_flags |= LK_HAVE_EXCL; + lkp->lk_lockholder = pid; + if (lkp->lk_exclusivecount != 0) + panic("lockmgr: non-zero exclusive count"); + lkp->lk_exclusivecount = 1; + COUNT(p, 1); + break; + + case LK_RELEASE: + if (lkp->lk_exclusivecount != 0) { + if (pid != lkp->lk_lockholder) + panic("lockmgr: pid %d, not %s %d unlocking", + pid, "exclusive lock holder", + lkp->lk_lockholder); + lkp->lk_exclusivecount--; + COUNT(p, -1); + if (lkp->lk_exclusivecount == 0) { + lkp->lk_flags &= ~LK_HAVE_EXCL; + lkp->lk_lockholder = LK_NOPROC; + } + } else if (lkp->lk_sharecount != 0) { + lkp->lk_sharecount--; + COUNT(p, -1); + } + if (lkp->lk_waitcount) + wakeup((void *)lkp); + break; + + case LK_DRAIN: + /* + * Check that we do not already hold the lock, as it can + * never drain if we do. Unfortunately, we have no way to + * check for holding a shared lock, but at least we can + * check for an exclusive one. + */ + if (lkp->lk_lockholder == pid) + panic("lockmgr: draining against myself"); + /* + * If we are just polling, check to see if we will sleep. + */ + if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { + error = EBUSY; + break; + } + PAUSE(lkp, ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); + for (error = 0; ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { + lkp->lk_flags |= LK_WAITDRAIN; + simple_unlock(&lkp->lk_interlock); + if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, + lkp->lk_wmesg, lkp->lk_timo)) + return (error); + if ((extflags) & LK_SLEEPFAIL) + return (ENOLCK); + simple_lock(&lkp->lk_interlock); + } + lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; + lkp->lk_lockholder = pid; + lkp->lk_exclusivecount = 1; + COUNT(p, 1); + break; + + default: + simple_unlock(&lkp->lk_interlock); + panic("lockmgr: unknown locktype request %d", + flags & LK_TYPE_MASK); + /* NOTREACHED */ + } + if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && + lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { + lkp->lk_flags &= ~LK_WAITDRAIN; + wakeup((void *)&lkp->lk_flags); + } + simple_unlock(&lkp->lk_interlock); + return (error); +} + +/* + * Print out information about state of a lock. Used by VOP_PRINT + * routines to display ststus about contained locks. + */ +lockmgr_printinfo(lkp) + struct lock *lkp; +{ + + if (lkp->lk_sharecount) + printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, + lkp->lk_sharecount); + else if (lkp->lk_flags & LK_HAVE_EXCL) + printf(" lock type %s: EXCL (count %d) by pid %d", + lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); + if (lkp->lk_waitcount > 0) + printf(" with %d pending", lkp->lk_waitcount); +} + +#if defined(DEBUG) && NCPUS == 1 +#include +#include +#include +int lockpausetime = 0; +struct ctldebug debug2 = { "lockpausetime", &lockpausetime }; +int simplelockrecurse; +/* + * Simple lock functions so that the debugger can see from whence + * they are being called. + */ +void +simple_lock_init(alp) + struct simplelock *alp; +{ + + alp->lock_data = 0; +} + +void +_simple_lock(alp, id, l) + __volatile struct simplelock *alp; + const char *id; + int l; +{ + + if (simplelockrecurse) + return; + if (alp->lock_data == 1) { + if (lockpausetime == -1) + panic("%s:%d: simple_lock: lock held", id, l); + printf("%s:%d: simple_lock: lock held\n", id, l); + if (lockpausetime == 1) { + BACKTRACE(curproc); + } else if (lockpausetime > 1) { + printf("%s:%d: simple_lock: lock held...", id, l); + tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", + lockpausetime * hz); + printf(" continuing\n"); + } + } + alp->lock_data = 1; + if (curproc) + curproc->p_simple_locks++; +} + +int +_simple_lock_try(alp, id, l) + __volatile struct simplelock *alp; + const char *id; + int l; +{ + + if (alp->lock_data) + return (0); + if (simplelockrecurse) + return (1); + alp->lock_data = 1; + if (curproc) + curproc->p_simple_locks++; + return (1); +} + +void +_simple_unlock(alp, id, l) + __volatile struct simplelock *alp; + const char *id; + int l; +{ + + if (simplelockrecurse) + return; + if (alp->lock_data == 0) { + if (lockpausetime == -1) + panic("%s:%d: simple_unlock: lock not held", id, l); + printf("%s:%d: simple_unlock: lock not held\n", id, l); + if (lockpausetime == 1) { + BACKTRACE(curproc); + } else if (lockpausetime > 1) { + printf("%s:%d: simple_unlock: lock not held...", id, l); + tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", + lockpausetime * hz); + printf(" continuing\n"); + } + } + alp->lock_data = 0; + if (curproc) + curproc->p_simple_locks--; +} +#endif /* DEBUG && NCPUS == 1 */ diff --git a/sys/sys/lock.h b/sys/sys/lock.h new file mode 100644 index 000000000000..6adf53be6ded --- /dev/null +++ b/sys/sys/lock.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1995 + * The Regents of the University of California. All rights reserved. + * + * This code contains ideas from software contributed to Berkeley by + * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating + * System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)lock.h 8.12 (Berkeley) 5/19/95 + */ + +#ifndef _LOCK_H_ +#define _LOCK_H_ + +/* + * The general lock structure. Provides for multiple shared locks, + * upgrading from shared to exclusive, and sleeping until the lock + * can be gained. The simple locks are defined in . + */ +struct lock { + struct simplelock lk_interlock; /* lock on remaining fields */ + u_int lk_flags; /* see below */ + int lk_sharecount; /* # of accepted shared locks */ + int lk_waitcount; /* # of processes sleeping for lock */ + short lk_exclusivecount; /* # of recursive exclusive locks */ + short lk_prio; /* priority at which to sleep */ + char *lk_wmesg; /* resource sleeping (for tsleep) */ + int lk_timo; /* maximum sleep time (for tsleep) */ + pid_t lk_lockholder; /* pid of exclusive lock holder */ +}; +/* + * Lock request types: + * LK_SHARED - get one of many possible shared locks. If a process + * holding an exclusive lock requests a shared lock, the exclusive + * lock(s) will be downgraded to shared locks. + * LK_EXCLUSIVE - stop further shared locks, when they are cleared, + * grant a pending upgrade if it exists, then grant an exclusive + * lock. Only one exclusive lock may exist at a time, except that + * a process holding an exclusive lock may get additional exclusive + * locks if it explicitly sets the LK_CANRECURSE flag in the lock + * request, or if the LK_CANRECUSE flag was set when the lock was + * initialized. + * LK_UPGRADE - the process must hold a shared lock that it wants to + * have upgraded to an exclusive lock. Other processes may get + * exclusive access to the resource between the time that the upgrade + * is requested and the time that it is granted. + * LK_EXCLUPGRADE - the process must hold a shared lock that it wants to + * have upgraded to an exclusive lock. If the request succeeds, no + * other processes will have gotten exclusive access to the resource + * between the time that the upgrade is requested and the time that + * it is granted. However, if another process has already requested + * an upgrade, the request will fail (see error returns below). + * LK_DOWNGRADE - the process must hold an exclusive lock that it wants + * to have downgraded to a shared lock. If the process holds multiple + * (recursive) exclusive locks, they will all be downgraded to shared + * locks. + * LK_RELEASE - release one instance of a lock. + * LK_DRAIN - wait for all activity on the lock to end, then mark it + * decommissioned. This feature is used before freeing a lock that + * is part of a piece of memory that is about to be freed. + * + * These are flags that are passed to the lockmgr routine. + */ +#define LK_TYPE_MASK 0x0000000f /* type of lock sought */ +#define LK_SHARED 0x00000001 /* shared lock */ +#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */ +#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */ +#define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */ +#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */ +#define LK_RELEASE 0x00000006 /* release any type of lock */ +#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */ +/* + * External lock flags. + * + * The first three flags may be set in lock_init to set their mode permanently, + * or passed in as arguments to the lock manager. The LK_REENABLE flag may be + * set only at the release of a lock obtained by drain. + */ +#define LK_EXTFLG_MASK 0x00000070 /* mask of external flags */ +#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */ +#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */ +#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */ +#define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */ +/* + * Internal lock flags. + * + * These flags are used internally to the lock manager. + */ +#define LK_WANT_UPGRADE 0x00000100 /* waiting for share-to-excl upgrade */ +#define LK_WANT_EXCL 0x00000200 /* exclusive lock sought */ +#define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */ +#define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */ +#define LK_DRAINING 0x00004000 /* lock is being drained */ +#define LK_DRAINED 0x00008000 /* lock has been decommissioned */ +/* + * Control flags + * + * Non-persistent external flags. + */ +#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after + getting lk_interlock */ +#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */ + +/* + * Lock return status. + * + * Successfully obtained locks return 0. Locks will always succeed + * unless one of the following is true: + * LK_FORCEUPGRADE is requested and some other process has already + * requested a lock upgrade (returns EBUSY). + * LK_WAIT is set and a sleep would be required (returns EBUSY). + * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK). + * PCATCH is set in lock priority and a signal arrives (returns + * either EINTR or ERESTART if system calls is to be restarted). + * Non-null lock timeout and timeout expires (returns EWOULDBLOCK). + * A failed lock attempt always returns a non-zero error value. No lock + * is held after an error return (in particular, a failed LK_UPGRADE + * or LK_FORCEUPGRADE will have released its shared access lock). + */ + +/* + * Indicator that no process holds exclusive lock + */ +#define LK_KERNPROC ((pid_t) -2) +#define LK_NOPROC ((pid_t) -1) + +struct proc; + +void lockinit __P((struct lock *, int prio, char *wmesg, int timo, + int flags)); +int lockmgr __P((__volatile struct lock *, u_int flags, + struct simplelock *, struct proc *p)); +int lockstatus __P((struct lock *)); + +#ifdef DEBUG +void _simple_unlock __P((__volatile struct simplelock *alp, const char *, int)); +#define simple_unlock(alp) _simple_unlock(alp, __FILE__, __LINE__) +int _simple_lock_try __P((__volatile struct simplelock *alp, const char *, int)); +#define simple_lock_try(alp) _simple_lock_try(alp, __FILE__, __LINE__) +void _simple_lock __P((__volatile struct simplelock *alp, const char *, int)); +#define simple_lock(alp) _simple_lock(alp, __FILE__, __LINE__) +void simple_lock_init __P((struct simplelock *alp)); +#else /* !DEBUG */ +#if NCPUS == 1 /* no multiprocessor locking is necessary */ +#define simple_lock_init(alp) +#define simple_lock(alp) +#define simple_lock_try(alp) (1) /* always succeeds */ +#define simple_unlock(alp) +#endif /* NCPUS == 1 */ +#endif /* !DEBUG */ + +#endif /* !_LOCK_H_ */