- Handle buffer lock waiters count directly in the buffer cache instead

than rely on the lockmgr support [1]:
  * bump the waiters only if the interlock is held
  * let brelvp() return the waiters count
  * rely on brelvp() instead than BUF_LOCKWAITERS() in order to check
    for the waiters number
- Remove a namespace pollution introduced recently with lockmgr.h
  including lock.h by including lock.h directly in the consumers and
  making it mandatory for using lockmgr.
- Modify flags accepted by lockinit():
  * introduce LK_NOPROFILE which disables lock profiling for the
    specified lockmgr
  * introduce LK_QUIET which disables ktr tracing for the specified
    lockmgr [2]
  * disallow LK_SLEEPFAIL and LK_NOWAIT to be passed there so that it
    can only be used on a per-instance basis
- Remove BUF_LOCKWAITERS() and lockwaiters() as they are no longer
  used

This patch breaks KPI so __FreBSD_version will be bumped and manpages
updated by further commits. Additively, 'struct buf' changes results in
a disturbed ABI also.

[2] Really, currently there is no ktr tracing in the lockmgr, but it
will be added soon.

[1] Submitted by:	kib
Tested by:	pho, Andrea Barberio <insomniac at slackware dot it>
This commit is contained in:
Attilio Rao 2008-03-01 19:47:50 +00:00
parent fffba935e4
commit 7fbfba7bf8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=176708
13 changed files with 90 additions and 72 deletions

View File

@ -68,6 +68,7 @@ __FBSDID("$FreeBSD$");
#include <sys/endian.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/lockmgr.h>
#if defined(DIAGNOSTIC) && defined(__i386__) && defined(__FreeBSD__)
#include <machine/cpu.h>

View File

@ -34,7 +34,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/clock.h>
#include <sys/lock.h>
#include <sys/lockmgr.h>
#include <sys/malloc.h>
#include <sys/clock.h>

View File

@ -546,11 +546,14 @@ lockinit(lkp, prio, wmesg, timo, flags)
{
int iflags;
KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0,
("%s: Invalid flags passed with mask 0x%x", __func__,
flags & LK_EXTFLG_MASK));
CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_NOWITNESS | LK_NODUP);
lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_FUNC_MASK);
lkp->lk_sharecount = 0;
lkp->lk_waitcount = 0;
lkp->lk_exclusivecount = 0;
@ -561,8 +564,12 @@ lockinit(lkp, prio, wmesg, timo, flags)
iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
if (!(flags & LK_NODUP))
iflags |= LO_DUPOK;
if (flags & LK_NOPROFILE)
iflags |= LO_NOPROFILE;
if (!(flags & LK_NOWITNESS))
iflags |= LO_WITNESS;
if (flags & LK_QUIET)
iflags |= LO_QUIET;
#ifdef DEBUG_LOCKS
stack_zero(&lkp->lk_stack);
#endif
@ -643,23 +650,6 @@ lockstatus(lkp)
return (lock_type);
}
/*
* Determine the number of waiters on a lock.
*/
int
lockwaiters(lkp)
struct lock *lkp;
{
int count;
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
("%s: %p lockmgr is destroyed", __func__, lkp));
mtx_lock(lkp->lk_interlock);
count = lkp->lk_waitcount;
mtx_unlock(lkp->lk_interlock);
return (count);
}
/*
* Print out information about state of a lock. Used by VOP_PRINT
* routines to display status about contained locks.

View File

@ -557,6 +557,7 @@ bufinit(void)
bp->b_qindex = QUEUE_EMPTY;
bp->b_vflags = 0;
bp->b_xflags = 0;
bp->b_waiters = 0;
LIST_INIT(&bp->b_dep);
BUF_LOCKINIT(bp);
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
@ -1195,7 +1196,7 @@ brelse(struct buf *bp)
if (bp->b_bufsize)
allocbuf(bp, 0);
if (bp->b_vp)
brelvp(bp);
(void) brelvp(bp);
}
}
@ -1337,7 +1338,7 @@ brelse(struct buf *bp)
if (bp->b_bufsize != 0)
allocbuf(bp, 0);
if (bp->b_vp != NULL)
brelvp(bp);
(void) brelvp(bp);
}
if (BUF_LOCKRECURSED(bp)) {
@ -1401,7 +1402,7 @@ brelse(struct buf *bp)
if (bp->b_flags & B_DELWRI)
bundirty(bp);
if (bp->b_vp)
brelvp(bp);
(void) brelvp(bp);
}
/*
@ -1569,7 +1570,7 @@ vfs_vmio_release(struct buf *bp)
bp->b_npages = 0;
bp->b_flags &= ~B_VMIO;
if (bp->b_vp)
brelvp(bp);
(void) brelvp(bp);
}
/*
@ -1706,6 +1707,7 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
struct buf *nbp;
int defrag = 0;
int nqindex;
int waiters = 0;
static int flushingbufs;
/*
@ -1844,7 +1846,7 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
vfs_vmio_release(bp);
}
if (bp->b_vp)
brelvp(bp);
waiters = brelvp(bp);
}
/*
@ -1913,7 +1915,7 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
* Notify any waiters for the buffer lock about
* identity change by freeing the buffer.
*/
if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp) > 0) {
if (qindex == QUEUE_CLEAN && waiters > 0) {
bp->b_flags |= B_INVAL;
bfreekva(bp);
brelse(bp);

View File

@ -1525,11 +1525,12 @@ bgetvp(struct vnode *vp, struct buf *bp)
/*
* Disassociate a buffer from a vnode.
*/
void
int
brelvp(struct buf *bp)
{
struct bufobj *bo;
struct vnode *vp;
int waiters;
CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
@ -1554,7 +1555,10 @@ brelvp(struct buf *bp)
bp->b_flags &= ~B_NEEDSGIANT;
bp->b_vp = NULL;
bp->b_bufobj = NULL;
waiters = bp->b_waiters;
vdropl(vp);
return (waiters);
}
/*

View File

@ -132,6 +132,7 @@ struct ncp_conn_stat {
#ifdef _KERNEL
#ifndef LK_SHARED
#include <sys/lock.h>
#include <sys/lockmgr.h>
#endif

View File

@ -164,6 +164,7 @@ struct smb_share_info {
#ifdef _KERNEL
#include <sys/lock.h>
#include <sys/lockmgr.h>
#include <netsmb/smb_subr.h>

View File

@ -32,6 +32,7 @@
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/lockmgr.h>
#include <sys/fnv_hash.h>
#include <sys/proc.h>

View File

@ -139,6 +139,7 @@ struct buf {
void *b_fsprivate2;
void *b_fsprivate3;
int b_pin_count;
int b_waiters; /* (V) waiters counter */
};
#define b_object b_bufobj->bo_object
@ -266,15 +267,41 @@ extern const char *buf_wmesg; /* Default buffer lock message */
*
* Get a lock sleeping non-interruptably until it becomes available.
*/
#define BUF_LOCK(bp, locktype, interlock) \
(lockmgr(&(bp)->b_lock, (locktype), (interlock)))
static __inline int
BUF_LOCK(struct buf *bp, int locktype, struct mtx *interlock);
static __inline int
BUF_LOCK(struct buf *bp, int locktype, struct mtx *interlock)
{
int res;
if (locktype & LK_INTERLOCK)
bp->b_waiters++;
res = lockmgr(&bp->b_lock, locktype, interlock);
if (locktype & LK_INTERLOCK)
bp->b_waiters--;
return (res);
}
/*
* Get a lock sleeping with specified interruptably and timeout.
*/
#define BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo) \
(lockmgr_args(&(bp)->b_lock, (locktype) | LK_TIMELOCK, \
(interlock), (wmesg), (PRIBIO + 4) | (catch), (timo)))
static __inline int
BUF_TIMELOCK(struct buf *bp, int locktype, struct mtx *interlock,
const char *wmesg, int catch, int timo);
static __inline int
BUF_TIMELOCK(struct buf *bp, int locktype, struct mtx *interlock,
const char *wmesg, int catch, int timo)
{
int res;
if (locktype & LK_INTERLOCK)
bp->b_waiters++;
res = lockmgr_args(&bp->b_lock, locktype | LK_TIMELOCK, interlock,
wmesg, (PRIBIO + 4) | catch, timo);
if (locktype & LK_INTERLOCK)
bp->b_waiters--;
return (res);
}
/*
* Release a lock. Only the acquiring process may free the lock unless
@ -351,16 +378,6 @@ BUF_KERNPROC(struct buf *bp)
}
#endif
/*
* Find out the number of waiters on a lock.
*/
static __inline int BUF_LOCKWAITERS(struct buf *);
static __inline int
BUF_LOCKWAITERS(struct buf *bp)
{
return (lockwaiters(&bp->b_lock));
}
#endif /* _KERNEL */
struct buf_queue_head {
@ -516,7 +533,7 @@ void vfs_unbusy_pages(struct buf *);
int vmapbuf(struct buf *);
void vunmapbuf(struct buf *);
void relpbuf(struct buf *, int *);
void brelvp(struct buf *);
int brelvp(struct buf *);
void bgetvp(struct vnode *, struct buf *);
void pbgetbo(struct bufobj *bo, struct buf *bp);
void pbgetvp(struct vnode *, struct buf *);

View File

@ -55,6 +55,7 @@
struct bufobj;
struct buf_ops;
struct thread;
extern struct buf_ops buf_ops_bio;

View File

@ -41,7 +41,7 @@
#include <sys/stack.h> /* XXX */
#endif
#include <sys/queue.h>
#include <sys/lock.h>
#include <sys/_lock.h>
struct mtx;
@ -111,7 +111,7 @@ struct lock {
* These may be set in lock_init to set their mode permanently,
* or passed in as arguments to the lock manager.
*/
#define LK_EXTFLG_MASK 0x00000ff0 /* mask of external flags */
#define LK_EXTFLG_MASK 0x0000fff0 /* mask of external flags */
#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
@ -119,30 +119,17 @@ struct lock {
#define LK_TIMELOCK 0x00000100 /* use lk_timo, else no timeout */
#define LK_NOWITNESS 0x00000200 /* disable WITNESS */
#define LK_NODUP 0x00000400 /* enable duplication logging */
#define LK_NOPROFILE 0x00000800 /* disable lock profiling */
#define LK_QUIET 0x00001000 /* disable lock operations tracking */
#define LK_FUNC_MASK (LK_NODUP | LK_NOPROFILE | LK_NOWITNESS | LK_QUIET)
/*
* Nonpersistent external flags.
*/
#define LK_RETRY 0x00001000 /* vn_lock: retry until locked */
#define LK_INTERLOCK 0x00002000 /*
#define LK_RETRY 0x00010000 /* vn_lock: retry until locked */
#define LK_INTERLOCK 0x00020000 /*
* unlock passed mutex after getting
* lk_interlock
*/
/*
* Internal lock flags.
*
* These flags are used internally to the lock manager.
*/
#define LK_WANT_UPGRADE 0x00010000 /* waiting for share-to-excl upgrade */
#define LK_WANT_EXCL 0x00020000 /* exclusive lock sought */
#define LK_HAVE_EXCL 0x00040000 /* exclusive lock obtained */
#define LK_WAITDRAIN 0x00080000 /* process waiting for lock to drain */
#define LK_DRAINING 0x00100000 /* lock is being drained */
#define LK_DESTROYED 0x00200000 /* lock is destroyed */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
*/
#define LK_SHARE_NONZERO 0x01000000
#define LK_WAIT_NONZERO 0x02000000
/*
* Default values for lockmgr_args().
@ -151,6 +138,27 @@ struct lock {
#define LK_PRIO_DEFAULT (-1)
#define LK_TIMO_DEFAULT (0)
/*
* Internal lock flags.
*
* These flags are used internally to the lock manager.
*/
#define LK_WANT_UPGRADE 0x00100000 /* waiting for share-to-excl upgrade */
#define LK_WANT_EXCL 0x00200000 /* exclusive lock sought */
#define LK_HAVE_EXCL 0x00400000 /* exclusive lock obtained */
#define LK_WAITDRAIN 0x00800000 /* process waiting for lock to drain */
#define LK_DRAINING 0x01000000 /* lock is being drained */
#define LK_DESTROYED 0x02000000 /* lock is destroyed */
/*
* Internal state flags corresponding to lk_sharecount, and lk_waitcount
*/
#define LK_SHARE_NONZERO 0x10000000
#define LK_WAIT_NONZERO 0x20000000
#ifndef LOCK_FILE
#error "LOCK_FILE not defined, include <sys/lock.h> before <sys/lockmgr.h>"
#endif
/*
* Assertion flags.
*/
@ -203,7 +211,6 @@ void _lockmgr_assert(struct lock *, int what, const char *, int);
void _lockmgr_disown(struct lock *, const char *, int);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *);
int lockwaiters(struct lock *);
#define lockmgr(lock, flags, mtx) \
_lockmgr_args((lock), (flags), (mtx), LK_WMESG_DEFAULT, \

View File

@ -36,8 +36,8 @@
#include <sys/ucred.h>
#include <sys/queue.h>
#ifdef _KERNEL
#include <sys/lock.h>
#include <sys/lockmgr.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#endif

View File

@ -33,17 +33,10 @@
#ifndef _SYS_VNODE_H_
#define _SYS_VNODE_H_
/*
* XXX - compatability until lockmgr() goes away or all the #includes are
* updated.
*/
#include <sys/lockmgr.h>
#include <sys/bufobj.h>
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/lock.h>
#include <sys/_mutex.h>
#include <sys/lockmgr.h>
#include <sys/mutex.h>
#include <sys/selinfo.h>
#include <sys/uio.h>