1998-12-28 04:56:24 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1997, 1998
|
|
|
|
* Nan Yang Computer Services Limited. All rights reserved.
|
|
|
|
*
|
1999-08-07 08:11:22 +00:00
|
|
|
* Parts copyright (c) 1997, 1998 Cybernet Corporation, NetMAX project.
|
|
|
|
*
|
|
|
|
* Written by Greg Lehey
|
|
|
|
*
|
1998-12-28 04:56:24 +00:00
|
|
|
* This software is distributed under the so-called ``Berkeley
|
|
|
|
* License'':
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Nan Yang Computer
|
|
|
|
* Services Limited.
|
|
|
|
* 4. Neither the name of the Company nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
1999-04-10 08:11:21 +00:00
|
|
|
*
|
1998-12-28 04:56:24 +00:00
|
|
|
* This software is provided ``as is'', and any express or implied
|
|
|
|
* warranties, including, but not limited to, the implied warranties of
|
|
|
|
* merchantability and fitness for a particular purpose are disclaimed.
|
|
|
|
* In no event shall the company or contributors be liable for any
|
|
|
|
* direct, indirect, incidental, special, exemplary, or consequential
|
|
|
|
* damages (including, but not limited to, procurement of substitute
|
|
|
|
* goods or services; loss of use, data, or profits; or business
|
|
|
|
* interruption) however caused and on any theory of liability, whether
|
|
|
|
* in contract, strict liability, or tort (including negligence or
|
|
|
|
* otherwise) arising in any way out of the use of this software, even if
|
|
|
|
* advised of the possibility of such damage.
|
|
|
|
*
|
2003-05-01 01:31:48 +00:00
|
|
|
* $Id: vinumlock.c,v 1.17 2003/02/02 05:07:51 grog Exp $
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1998-12-28 04:56:24 +00:00
|
|
|
*/
|
|
|
|
|
1998-12-28 16:28:24 +00:00
|
|
|
#include <dev/vinum/vinumhdr.h>
|
1999-08-14 06:28:21 +00:00
|
|
|
#include <dev/vinum/request.h>
|
1998-12-28 04:56:24 +00:00
|
|
|
|
1999-01-21 00:37:38 +00:00
|
|
|
/* Lock a drive, wait if it's in use */
|
2003-05-01 01:31:48 +00:00
|
|
|
#ifdef VINUMDEBUG
|
1999-08-24 02:23:21 +00:00
|
|
|
int
|
1999-04-10 08:11:21 +00:00
|
|
|
lockdrive(struct drive *drive, char *file, int line)
|
|
|
|
#else
|
1999-08-24 02:23:21 +00:00
|
|
|
int
|
1999-01-21 00:37:38 +00:00
|
|
|
lockdrive(struct drive *drive)
|
1999-04-10 08:11:21 +00:00
|
|
|
#endif
|
1999-01-21 00:37:38 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* XXX get rid of drive->flags |= VF_LOCKING; */
|
1999-03-13 07:35:40 +00:00
|
|
|
if ((drive->flags & VF_LOCKED) /* it's locked */
|
|
|
|
&&(drive->pid == curproc->p_pid)) { /* by us! */
|
1999-04-10 08:11:21 +00:00
|
|
|
#ifdef VINUMDEBUG
|
|
|
|
log(LOG_WARNING,
|
|
|
|
"vinum lockdrive: already locking %s from %s:%d, called from %s:%d\n",
|
|
|
|
drive->label.name,
|
|
|
|
drive->lockfilename,
|
|
|
|
drive->lockline,
|
|
|
|
basename(file),
|
|
|
|
line);
|
|
|
|
#else
|
1999-03-13 07:35:40 +00:00
|
|
|
log(LOG_WARNING,
|
|
|
|
"vinum lockdrive: already locking %s\n",
|
|
|
|
drive->label.name);
|
1999-04-10 08:11:21 +00:00
|
|
|
#endif
|
1999-03-13 07:35:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
1999-01-21 00:37:38 +00:00
|
|
|
while ((drive->flags & VF_LOCKED) != 0) {
|
1999-01-29 01:17:54 +00:00
|
|
|
/*
|
|
|
|
* There are problems sleeping on a unique identifier,
|
1999-01-21 00:37:38 +00:00
|
|
|
* since the drive structure can move, and the unlock
|
|
|
|
* function can be called after killing the drive.
|
|
|
|
* Solve this by waiting on this function; the number
|
1999-04-10 08:11:21 +00:00
|
|
|
* of conflicts is negligible.
|
1999-01-29 01:17:54 +00:00
|
|
|
*/
|
1999-01-21 00:37:38 +00:00
|
|
|
if ((error = tsleep(&lockdrive,
|
2000-01-05 22:59:36 +00:00
|
|
|
PRIBIO,
|
1999-01-21 00:37:38 +00:00
|
|
|
"vindrv",
|
|
|
|
0)) != 0)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
drive->flags |= VF_LOCKED;
|
|
|
|
drive->pid = curproc->p_pid; /* it's a panic error if curproc is null */
|
1999-04-10 08:11:21 +00:00
|
|
|
#ifdef VINUMDEBUG
|
|
|
|
bcopy(basename(file), drive->lockfilename, 15);
|
|
|
|
drive->lockfilename[15] = '\0'; /* truncate if necessary */
|
|
|
|
drive->lockline = line;
|
|
|
|
#endif
|
1999-01-21 00:37:38 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock a drive and let the next one at it */
|
1999-08-24 02:23:21 +00:00
|
|
|
void
|
1999-01-21 00:37:38 +00:00
|
|
|
unlockdrive(struct drive *drive)
|
|
|
|
{
|
|
|
|
drive->flags &= ~VF_LOCKED;
|
|
|
|
/* we don't reset pid: it's of hysterical interest */
|
|
|
|
wakeup(&lockdrive);
|
|
|
|
}
|
|
|
|
|
1999-08-14 06:28:21 +00:00
|
|
|
/* Lock a stripe of a plex, wait if it's in use */
|
|
|
|
struct rangelock *
|
|
|
|
lockrange(daddr_t stripe, struct buf *bp, struct plex *plex)
|
1999-08-07 08:11:22 +00:00
|
|
|
{
|
1999-08-14 06:28:21 +00:00
|
|
|
struct rangelock *lock;
|
|
|
|
struct rangelock *pos; /* position of first free lock */
|
|
|
|
int foundlocks; /* number of locks found */
|
1999-08-07 08:11:22 +00:00
|
|
|
|
1999-08-14 06:28:21 +00:00
|
|
|
/*
|
|
|
|
* We could get by without counting the number
|
|
|
|
* of locks we find, but we have a linear search
|
|
|
|
* through a table which in most cases will be
|
|
|
|
* empty. It's faster to stop when we've found
|
|
|
|
* all the locks that are there. This is also
|
|
|
|
* the reason why we put pos at the beginning
|
|
|
|
* instead of the end, though it requires an
|
|
|
|
* extra test.
|
|
|
|
*/
|
|
|
|
pos = NULL;
|
|
|
|
foundlocks = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we can't use 0 as a valid address, so
|
|
|
|
* increment all addresses by 1.
|
|
|
|
*/
|
|
|
|
stripe++;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&plex->lockmtx);
|
1999-08-14 06:28:21 +00:00
|
|
|
|
2001-01-10 05:06:37 +00:00
|
|
|
/* Wait here if the table is full */
|
|
|
|
while (plex->usedlocks == PLEX_LOCKS) /* all in use */
|
|
|
|
msleep(&plex->usedlocks, &plex->lockmtx, PRIBIO, "vlock", 0);
|
1999-08-14 06:28:21 +00:00
|
|
|
|
2001-01-10 05:06:37 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (plex->usedlocks >= PLEX_LOCKS)
|
|
|
|
panic("lockrange: Too many locks in use");
|
1999-08-14 06:28:21 +00:00
|
|
|
#endif
|
2001-01-10 05:06:37 +00:00
|
|
|
|
|
|
|
lock = plex->lock; /* pointer in lock table */
|
|
|
|
if (plex->usedlocks > 0) /* something locked, */
|
|
|
|
/* Search the lock table for our stripe */
|
|
|
|
for (; lock < &plex->lock[PLEX_LOCKS]
|
|
|
|
&& foundlocks < plex->usedlocks;
|
|
|
|
lock++) {
|
|
|
|
if (lock->stripe) { /* in use */
|
|
|
|
foundlocks++; /* found another one in use */
|
|
|
|
if ((lock->stripe == stripe) /* it's our stripe */
|
2001-01-14 06:34:57 +00:00
|
|
|
&&(lock->bp != bp)) { /* but not our request */
|
2001-01-10 05:06:37 +00:00
|
|
|
#ifdef VINUMDEBUG
|
2001-05-22 02:34:30 +00:00
|
|
|
if (debug & DEBUG_LOCKREQS) {
|
|
|
|
struct rangelockinfo lockinfo;
|
2001-01-10 05:06:37 +00:00
|
|
|
|
2001-05-22 02:34:30 +00:00
|
|
|
lockinfo.stripe = stripe;
|
|
|
|
lockinfo.bp = bp;
|
|
|
|
lockinfo.plexno = plex->plexno;
|
|
|
|
logrq(loginfo_lockwait, (union rqinfou) &lockinfo, bp);
|
2001-01-10 05:06:37 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
plex->lockwaits++; /* waited one more time */
|
|
|
|
msleep(lock, &plex->lockmtx, PRIBIO, "vrlock", 0);
|
2001-05-22 02:34:30 +00:00
|
|
|
lock = &plex->lock[-1]; /* start again */
|
2001-01-10 05:06:37 +00:00
|
|
|
foundlocks = 0;
|
|
|
|
pos = NULL;
|
|
|
|
}
|
|
|
|
} else if (pos == NULL) /* still looking for somewhere? */
|
1999-08-14 06:28:21 +00:00
|
|
|
pos = lock; /* a place to put this one */
|
2001-01-10 05:06:37 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* This untidy looking code ensures that we'll
|
|
|
|
* always end up pointing to the first free lock
|
|
|
|
* entry, thus minimizing the number of
|
|
|
|
* iterations necessary.
|
|
|
|
*/
|
|
|
|
if (pos == NULL) /* didn't find one on the way, */
|
|
|
|
pos = lock; /* use the one we're pointing to */
|
1999-08-07 08:11:22 +00:00
|
|
|
|
|
|
|
/*
|
2001-01-10 05:06:37 +00:00
|
|
|
* The address range is free, and we're pointing
|
|
|
|
* to the first unused entry. Make it ours.
|
1999-08-07 08:11:22 +00:00
|
|
|
*/
|
1999-08-14 06:28:21 +00:00
|
|
|
pos->stripe = stripe;
|
|
|
|
pos->bp = bp;
|
|
|
|
plex->usedlocks++; /* one more lock */
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&plex->lockmtx);
|
1999-08-14 06:28:21 +00:00
|
|
|
#ifdef VINUMDEBUG
|
2001-05-22 02:34:30 +00:00
|
|
|
if (debug & DEBUG_LOCKREQS) {
|
|
|
|
struct rangelockinfo lockinfo;
|
|
|
|
|
|
|
|
lockinfo.stripe = stripe;
|
|
|
|
lockinfo.bp = bp;
|
|
|
|
lockinfo.plexno = plex->plexno;
|
|
|
|
logrq(loginfo_lock, (union rqinfou) &lockinfo, bp);
|
|
|
|
}
|
1999-08-14 06:28:21 +00:00
|
|
|
#endif
|
|
|
|
return pos;
|
1999-08-07 08:11:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock a volume and let the next one at it */
|
1999-08-24 02:23:21 +00:00
|
|
|
void
|
|
|
|
unlockrange(int plexno, struct rangelock *lock)
|
1999-08-07 08:11:22 +00:00
|
|
|
{
|
2001-01-10 05:06:37 +00:00
|
|
|
struct plex *plex;
|
|
|
|
|
|
|
|
plex = &PLEX[plexno];
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (lock < &plex->lock[0] || lock >= &plex->lock[PLEX_LOCKS])
|
|
|
|
panic("vinum: rangelock %p on plex %d invalid, not between %p and %p",
|
|
|
|
lock,
|
|
|
|
plexno,
|
|
|
|
&plex->lock[0],
|
|
|
|
&plex->lock[PLEX_LOCKS]);
|
|
|
|
#endif
|
1999-08-14 06:28:21 +00:00
|
|
|
#ifdef VINUMDEBUG
|
2001-05-22 02:34:30 +00:00
|
|
|
if (debug & DEBUG_LOCKREQS) {
|
|
|
|
struct rangelockinfo lockinfo;
|
|
|
|
|
|
|
|
lockinfo.stripe = lock->stripe;
|
|
|
|
lockinfo.bp = lock->bp;
|
|
|
|
lockinfo.plexno = plex->plexno;
|
|
|
|
logrq(loginfo_lockwait, (union rqinfou) &lockinfo, lock->bp);
|
|
|
|
}
|
1999-08-14 06:28:21 +00:00
|
|
|
#endif
|
1999-08-24 02:23:21 +00:00
|
|
|
lock->stripe = 0; /* no longer used */
|
2001-01-10 05:06:37 +00:00
|
|
|
plex->usedlocks--; /* one less lock */
|
|
|
|
if (plex->usedlocks == PLEX_LOCKS - 1) /* we were full, */
|
2001-05-22 02:34:30 +00:00
|
|
|
wakeup(&plex->usedlocks); /* get a waiter if one's there */
|
|
|
|
wakeup((void *) lock);
|
1999-08-07 08:11:22 +00:00
|
|
|
}
|
1998-12-28 04:56:24 +00:00
|
|
|
|
|
|
|
/* Get a lock for the global config, wait if it's not available */
|
1999-08-24 02:23:21 +00:00
|
|
|
int
|
1998-12-28 04:56:24 +00:00
|
|
|
lock_config(void)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
while ((vinum_conf.flags & VF_LOCKED) != 0) {
|
|
|
|
vinum_conf.flags |= VF_LOCKING;
|
2000-01-05 22:59:36 +00:00
|
|
|
if ((error = tsleep(&vinum_conf, PRIBIO, "vincfg", 0)) != 0)
|
1998-12-28 04:56:24 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
vinum_conf.flags |= VF_LOCKED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock and wake up any waiters */
|
1999-08-24 02:23:21 +00:00
|
|
|
void
|
1998-12-28 04:56:24 +00:00
|
|
|
unlock_config(void)
|
|
|
|
{
|
|
|
|
vinum_conf.flags &= ~VF_LOCKED;
|
|
|
|
if ((vinum_conf.flags & VF_LOCKING) != 0) {
|
|
|
|
vinum_conf.flags &= ~VF_LOCKING;
|
|
|
|
wakeup(&vinum_conf);
|
|
|
|
}
|
|
|
|
}
|
1999-08-14 06:28:21 +00:00
|
|
|
/* Local Variables: */
|
|
|
|
/* fill-column: 50 */
|
|
|
|
/* End: */
|