Move the head of byte-level advisory lock list from the

filesystem-specific vnode data to the struct vnode. Provide the
default implementation for the vop_advlock and vop_advlockasync.
Purge the locks on the vnode reclaim by using the lf_purgelocks().
The default implementation is augmented for the nfs and smbfs.
In the nfs_advlock, push the Giant inside the nfs_dolock.

Before the change, the vop_advlock and vop_advlockasync have taken the
unlocked vnode and dereferenced the fs-private inode data, racing with
with the vnode reclamation due to forced unmount. Now, the vop_getattr
under the shared vnode lock is used to obtain the inode size, and
later, in the lf_advlockasync, after locking the vnode interlock, the
VI_DOOMED flag is checked to prevent an operation on the doomed vnode.

The implementation of the lf_purgelocks() is submitted by dfr.

Reported by:	kris
Tested by:	kris, pho
Discussed with:	jeff, dfr
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2008-04-16 11:33:32 +00:00
parent 92c4ddb268
commit eab626f110
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=178243
24 changed files with 227 additions and 225 deletions

View File

@ -162,7 +162,6 @@ typedef struct znode {
uint32_t z_sync_cnt; /* synchronous open count */
kmutex_t z_acl_lock; /* acl data lock */
list_node_t z_link_node; /* all znodes in fs link */
struct lockf *z_lockf; /* Head of byte-level lock list. */
/*
* These are dmu managed fields.
*/

View File

@ -3533,43 +3533,6 @@ zfs_freebsd_pathconf(ap)
return (error);
}
/*
* Advisory record locking support
*/
static int
zfs_freebsd_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
caddr_t a_id;
int a_op;
struct flock *a_fl;
int a_flags;
} */ *ap;
{
znode_t *zp = VTOZ(ap->a_vp);
return (lf_advlock(ap, &(zp->z_lockf), zp->z_phys->zp_size));
}
/*
* Advisory record locking support
*/
static int
zfs_freebsd_advlockasync(ap)
struct vop_advlockasync_args /* {
struct vnode *a_vp;
caddr_t a_id;
int a_op;
struct flock *a_fl;
int a_flags;
struct task *a_task;
} */ *ap;
{
znode_t *zp = VTOZ(ap->a_vp);
return (lf_advlockasync(ap, &(zp->z_lockf), zp->z_phys->zp_size));
}
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
@ -3602,8 +3565,6 @@ struct vop_vector zfs_vnodeops = {
.vop_write = zfs_freebsd_write,
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_advlock = zfs_freebsd_advlock,
.vop_advlockasync = zfs_freebsd_advlockasync,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = VOP_EOPNOTSUPP,
.vop_fid = zfs_freebsd_fid,

View File

@ -136,7 +136,6 @@ zfs_znode_cache_constructor(void *buf, void *cdrarg, int kmflags)
zp->z_dbuf_held = 0;
zp->z_dirlocks = 0;
zp->z_lockf = NULL;
return (0);
}

View File

@ -159,7 +159,6 @@ struct denode {
u_long de_FileSize; /* size of file in bytes */
struct fatcache de_fc[FC_SIZE]; /* fat cache */
u_quad_t de_modrev; /* Revision level for lease. */
struct lockf *de_lockf; /* lockf */
u_int64_t de_inode; /* Inode number (really byte offset of direntry) */
};

View File

@ -82,8 +82,6 @@
/*
* Prototypes for MSDOSFS vnode operations
*/
static vop_advlock_t msdosfs_advlock;
static vop_advlockasync_t msdosfs_advlockasync;
static vop_create_t msdosfs_create;
static vop_mknod_t msdosfs_mknod;
static vop_open_t msdosfs_open;
@ -1948,37 +1946,6 @@ msdosfs_pathconf(ap)
/* NOTREACHED */
}
static int
msdosfs_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
u_char a_id;
int a_op;
struct flock *a_fl;
int a_flags;
} */ *ap;
{
struct denode *dep = VTODE(ap->a_vp);
return (lf_advlock(ap, &dep->de_lockf, dep->de_FileSize));
}
static int
msdosfs_advlockasync(ap)
struct vop_advlockasync_args /* {
struct vnode *a_vp;
u_char a_id;
int a_op;
struct flock *a_fl;
int a_flags;
struct task *a_task;
} */ *ap;
{
struct denode *dep = VTODE(ap->a_vp);
return (lf_advlockasync(ap, &dep->de_lockf, dep->de_FileSize));
}
static int
msdosfs_vptofh(ap)
struct vop_vptofh_args /* {
@ -2003,8 +1970,6 @@ struct vop_vector msdosfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_access = msdosfs_access,
.vop_advlock = msdosfs_advlock,
.vop_advlockasync = msdosfs_advlockasync,
.vop_bmap = msdosfs_bmap,
.vop_cachedlookup = msdosfs_lookup,
.vop_open = msdosfs_open,

View File

@ -66,7 +66,6 @@ struct smbnode {
u_char * n_name;
struct smbfs_fctx * n_dirseq; /* ff context */
long n_dirofs; /* last ff offset */
struct lockf * n_lockf; /* Locking records of file */
LIST_ENTRY(smbnode) n_hash;
};

View File

@ -1008,7 +1008,7 @@ smbfs_advlock(ap)
default:
return EINVAL;
}
error = lf_advlock(ap, &np->n_lockf, size);
error = lf_advlock(ap, &vp->v_lockf, size);
if (error)
break;
lkop = SMB_LOCK_EXCL;
@ -1017,16 +1017,16 @@ smbfs_advlock(ap)
int oldtype = fl->l_type;
fl->l_type = F_UNLCK;
ap->a_op = F_UNLCK;
lf_advlock(ap, &np->n_lockf, size);
lf_advlock(ap, &vp->v_lockf, size);
fl->l_type = oldtype;
}
break;
case F_UNLCK:
lf_advlock(ap, &np->n_lockf, size);
lf_advlock(ap, &vp->v_lockf, size);
error = smbfs_smb_lock(np, SMB_LOCK_RELEASE, id, start, end, &scred);
break;
case F_GETLK:
error = lf_advlock(ap, &np->n_lockf, size);
error = lf_advlock(ap, &vp->v_lockf, size);
break;
default:
return EINVAL;

View File

@ -219,9 +219,6 @@ struct tmpfs_node {
struct timespec tn_birthtime;
unsigned long tn_gen;
/* Head of byte-level lock list (used by tmpfs_advlock). */
struct lockf * tn_lockf;
/* As there is a single vnode for each active file within the
* system, care has to be taken to avoid allocating more than one
* vnode per file. In order to do this, a bidirectional association

View File

@ -151,7 +151,6 @@ tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
node->tn_status = 0;
node->tn_flags = 0;
node->tn_links = 0;
node->tn_lockf = NULL;
node->tn_vnode = NULL;
node->tn_vpstate = 0;

View File

@ -1429,36 +1429,6 @@ tmpfs_pathconf(struct vop_pathconf_args *v)
return error;
}
/* --------------------------------------------------------------------- */
static int
tmpfs_advlock(struct vop_advlock_args *v)
{
struct vnode *vp = v->a_vp;
struct tmpfs_node *node;
node = VP_TO_TMPFS_NODE(vp);
return lf_advlock(v, &node->tn_lockf, node->tn_size);
}
/* --------------------------------------------------------------------- */
static int
tmpfs_advlockasync(struct vop_advlockasync_args *v)
{
struct vnode *vp = v->a_vp;
struct tmpfs_node *node;
node = VP_TO_TMPFS_NODE(vp);
return lf_advlockasync(v, &node->tn_lockf, node->tn_size);
}
/* --------------------------------------------------------------------- */
static int
tmpfs_vptofh(struct vop_vptofh_args *ap)
{
@ -1506,8 +1476,6 @@ struct vop_vector tmpfs_vnodeop_entries = {
.vop_reclaim = tmpfs_reclaim,
.vop_print = tmpfs_print,
.vop_pathconf = tmpfs_pathconf,
.vop_advlock = tmpfs_advlock,
.vop_advlockasync = tmpfs_advlockasync,
.vop_vptofh = tmpfs_vptofh,
.vop_bmap = VOP_EOPNOTSUPP,
};

View File

@ -83,7 +83,6 @@
static int ext2_makeinode(int mode, struct vnode *, struct vnode **, struct componentname *);
static vop_access_t ext2_access;
static vop_advlock_t ext2_advlock;
static int ext2_chmod(struct vnode *, int, struct ucred *, struct thread *);
static int ext2_chown(struct vnode *, uid_t, gid_t, struct ucred *,
struct thread *);
@ -119,7 +118,6 @@ static void filt_ext2detach(struct knote *kn);
struct vop_vector ext2_vnodeops = {
.vop_default = &default_vnodeops,
.vop_access = ext2_access,
.vop_advlock = ext2_advlock,
.vop_bmap = ext2_bmap,
.vop_cachedlookup = ext2_lookup,
.vop_close = ext2_close,
@ -1520,24 +1518,6 @@ ext2_pathconf(ap)
/* NOTREACHED */
}
/*
* Advisory record locking support
*/
static int
ext2_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
caddr_t a_id;
int a_op;
struct flock *a_fl;
int a_flags;
} */ *ap;
{
struct inode *ip = VTOI(ap->a_vp);
return (lf_advlock(ap, &(ip->i_lockf), ip->i_size));
}
/*
* Vnode pointer to File handle
*/

View File

@ -68,7 +68,6 @@ struct inode {
struct ext2_sb_info *i_e2fs; /* EXT2FS */
u_quad_t i_modrev; /* Revision level for NFS lease. */
struct lockf *i_lockf;/* Head of byte-level lock list. */
/*
* Side effects; used during directory lookup.
*/

View File

@ -1231,13 +1231,13 @@ _xfs_advlock(
#ifdef notyet
switch (ap->a_op) {
case F_SETLK:
error = lf_advlock(ap, &np->n_lockf, size);
error = lf_advlock(ap, &vp->v_lockf, size);
break;
case F_UNLCK:
lf_advlock(ap, &np->n_lockf, size);
lf_advlock(ap, &vp->v_lockf, size);
break;
case F_GETLK:
error = lf_advlock(ap, &np->n_lockf, size);
error = lf_advlock(ap, &vp->v_lockf, size);
break;
default:
return (EINVAL);

View File

@ -570,6 +570,11 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* the vnode interlock.
*/
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
lf_free_lock(lock);
return (ENOENT);
}
/*
* Allocate a state structure if necessary.
@ -595,6 +600,16 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* trying to allocate memory.
*/
VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
VI_UNLOCK(vp);
sx_xlock(&lf_lock_states_lock);
LIST_REMOVE(ls, ls_link);
sx_xunlock(&lf_lock_states_lock);
sx_destroy(&ls->ls_lock);
free(ls, M_LOCKF);
lf_free_lock(lock);
return (ENOENT);
}
if ((*statep) == NULL) {
state = *statep = ls;
VI_UNLOCK(vp);
@ -687,6 +702,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
VI_LOCK(vp);
state->ls_threads--;
wakeup(state);
if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
KASSERT(LIST_EMPTY(&state->ls_pending),
("freeing state with pending locks"));
@ -722,6 +738,77 @@ lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
return (lf_advlockasync(&a, statep, size));
}
void
lf_purgelocks(struct vnode *vp, struct lockf **statep)
{
struct lockf *state;
struct lockf_entry *lock, *nlock;
/*
* For this to work correctly, the caller must ensure that no
* other threads enter the locking system for this vnode,
* e.g. by checking VI_DOOMED. We wake up any threads that are
* sleeping waiting for locks on this vnode and then free all
* the remaining locks.
*/
VI_LOCK(vp);
state = *statep;
if (state) {
state->ls_threads++;
VI_UNLOCK(vp);
sx_xlock(&state->ls_lock);
sx_xlock(&lf_owner_graph_lock);
LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
LIST_REMOVE(lock, lf_link);
lf_remove_outgoing(lock);
lf_remove_incoming(lock);
/*
* If its an async lock, we can just free it
* here, otherwise we let the sleeping thread
* free it.
*/
if (lock->lf_async_task) {
lf_free_lock(lock);
} else {
lock->lf_flags |= F_INTR;
wakeup(lock);
}
}
sx_xunlock(&lf_owner_graph_lock);
sx_xunlock(&state->ls_lock);
/*
* Wait for all other threads, sleeping and otherwise
* to leave.
*/
VI_LOCK(vp);
while (state->ls_threads > 1)
msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
*statep = 0;
VI_UNLOCK(vp);
/*
* We can just free all the active locks since they
* will have no dependancies (we removed them all
* above). We don't need to bother locking since we
* are the last thread using this state structure.
*/
LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
LIST_REMOVE(lock, lf_link);
lf_free_lock(lock);
}
sx_xlock(&lf_lock_states_lock);
LIST_REMOVE(state, ls_link);
sx_xunlock(&lf_lock_states_lock);
sx_destroy(&state->ls_lock);
free(state, M_LOCKF);
} else {
VI_UNLOCK(vp);
}
}
/*
* Return non-zero if locks 'x' and 'y' overlap.
*/
@ -1346,7 +1433,10 @@ lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
* remove our lock graph edges) and/or by another
* process releasing a lock (in which case our edges
* have already been removed and we have been moved to
* the active list).
* the active list). We may also have been woken by
* lf_purgelocks which we report to the caller as
* EINTR. In that case, lf_purgelocks will have
* removed our lock graph edges.
*
* Note that it is possible to receive a signal after
* we were successfully woken (and moved to the active
@ -1358,6 +1448,11 @@ lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
* may now have incoming edges from some newer lock
* which is waiting behind us in the queue.
*/
if (lock->lf_flags & F_INTR) {
error = EINTR;
lf_free_lock(lock);
goto out;
}
if (LIST_EMPTY(&lock->lf_outedges)) {
error = 0;
} else {

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
@ -75,7 +76,8 @@ struct vop_vector default_vnodeops = {
.vop_default = NULL,
.vop_bypass = VOP_EOPNOTSUPP,
.vop_advlock = VOP_EINVAL,
.vop_advlock = vop_stdadvlock,
.vop_advlockasync = vop_stdadvlockasync,
.vop_bmap = vop_stdbmap,
.vop_close = VOP_NULL,
.vop_fsync = VOP_NULL,
@ -200,6 +202,43 @@ vop_nostrategy (struct vop_strategy_args *ap)
return (EOPNOTSUPP);
}
/*
* Advisory record locking support
*/
int
vop_stdadvlock(struct vop_advlock_args *ap)
{
struct vnode *vp = ap->a_vp;
struct thread *td = curthread;
struct vattr vattr;
int error;
vn_lock(vp, LK_SHARED | LK_RETRY);
error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
VOP_UNLOCK(vp, 0);
if (error)
return (error);
return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
}
int
vop_stdadvlockasync(struct vop_advlockasync_args *ap)
{
struct vnode *vp = ap->a_vp;
struct thread *td = curthread;
struct vattr vattr;
int error;
vn_lock(vp, LK_SHARED | LK_RETRY);
error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
VOP_UNLOCK(vp, 0);
if (error)
return (error);
return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
}
/*
* vop_stdpathconf:
*

View File

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/namei.h>
@ -2523,6 +2524,10 @@ vgonel(struct vnode *vp)
vn_finished_secondary_write(mp);
VNASSERT(vp->v_object == NULL, vp,
("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
/*
* Clear the advisory locks and wake up waiting threads.
*/
lf_purgelocks(vp, &(vp->v_lockf));
/*
* Delete from old mount point vnode list.
*/

View File

@ -2763,14 +2763,22 @@ nfs4_flush(struct vnode *vp, int waitfor, struct thread *td,
static int
nfs4_advlock(struct vop_advlock_args *ap)
{
struct vnode *vp = ap->a_vp;
u_quad_t size;
int error;
return (EPERM);
if ((VFSTONFS(ap->a_vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
struct nfsnode *np = VTONFS(ap->a_vp);
return (lf_advlock(ap, &(np->n_lockf), np->n_size));
}
return (nfs_dolock(ap));
error = vn_lock(vp, LK_SHARED);
if (error)
return (error);
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
size = VTONFS(vp)->n_size;
VOP_UNLOCK(vp, 0);
error = lf_advlock(ap, &(vp->v_lockf), size);
} else
error = nfs_dolock(ap);
return (error);
}
/*
@ -2779,14 +2787,24 @@ nfs4_advlock(struct vop_advlock_args *ap)
static int
nfs4_advlockasync(struct vop_advlockasync_args *ap)
{
struct vnode *vp = ap->a_vp;
u_quad_t size;
int error;
return (EPERM);
if ((VFSTONFS(ap->a_vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
struct nfsnode *np = VTONFS(ap->a_vp);
return (lf_advlockasync(ap, &(np->n_lockf), np->n_size));
error = vn_lock(vp, LK_SHARED);
if (error)
return (error);
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
size = VTONFS(vp)->n_size;
VOP_UNLOCK(vp, 0);
error = lf_advlockasync(ap, &(vp->v_lockf), size);
} else {
VOP_UNLOCK(vp, 0);
error = EOPNOTSUPP;
}
return (EOPNOTSUPP);
return (error);
}
/*

View File

@ -226,6 +226,9 @@ MODULE_VERSION(nfslock, 1);
/*
* nfs_advlock --
* NFS advisory byte-level locks.
*
* The vnode shall be (shared) locked on the entry, it is
* unconditionally unlocked after.
*/
int
nfs_dolock(struct vop_advlock_args *ap)
@ -243,6 +246,15 @@ nfs_dolock(struct vop_advlock_args *ap)
vp = ap->a_vp;
fl = ap->a_fl;
ASSERT_VOP_LOCKED(vp, "nfs_dolock");
bcopy(VFSTONFS(vp->v_mount)->nm_nam, &msg.lm_addr,
min(sizeof msg.lm_addr, VFSTONFS(vp->v_mount)->nm_nam->sa_len));
msg.lm_fh_len = NFS_ISV3(vp) ? VTONFS(vp)->n_fhsize : NFSX_V2FH;
bcopy(VTONFS(vp)->n_fhp, msg.lm_fh, msg.lm_fh_len);
msg.lm_nfsv3 = NFS_ISV3(vp);
VOP_UNLOCK(vp, 0);
/*
* the NLM protocol doesn't allow the server to return an error
* on ranges, so we do it.
@ -263,6 +275,8 @@ nfs_dolock(struct vop_advlock_args *ap)
*/
msg.lm_version = LOCKD_MSG_VERSION;
msg.lm_msg_ident.pid = p->p_pid;
mtx_lock(&Giant);
/*
* if there is no nfsowner table yet, allocate one.
*/
@ -278,21 +292,16 @@ nfs_dolock(struct vop_advlock_args *ap)
msg.lm_fl = *fl;
msg.lm_wait = ap->a_flags & F_WAIT;
msg.lm_getlk = ap->a_op == F_GETLK;
bcopy(VFSTONFS(vp->v_mount)->nm_nam, &msg.lm_addr,
min(sizeof msg.lm_addr, VFSTONFS(vp->v_mount)->nm_nam->sa_len));
msg.lm_fh_len = NFS_ISV3(vp) ? VTONFS(vp)->n_fhsize : NFSX_V2FH;
bcopy(VTONFS(vp)->n_fhp, msg.lm_fh, msg.lm_fh_len);
msg.lm_nfsv3 = NFS_ISV3(vp);
cru2x(td->td_ucred, &msg.lm_cred);
for (;;) {
error = nfslock_send(&msg);
if (error)
return (error);
goto out;
/* Unlocks succeed immediately. */
if (fl->l_type == F_UNLCK)
return (error);
goto out;
/*
* Retry after 20 seconds if we haven't gotten a response yet.
@ -333,7 +342,8 @@ nfs_dolock(struct vop_advlock_args *ap)
error = p->p_nlminfo->retcode;
break;
}
out:
mtx_unlock(&Giant);
return (error);
}

View File

@ -3038,18 +3038,19 @@ nfs_flush(struct vnode *vp, int waitfor, struct thread *td,
static int
nfs_advlock(struct vop_advlock_args *ap)
{
struct vnode *vp = ap->a_vp;
u_quad_t size;
int error;
mtx_lock(&Giant);
if ((VFSTONFS(ap->a_vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
struct nfsnode *np = VTONFS(ap->a_vp);
error = lf_advlock(ap, &(np->n_lockf), np->n_size);
goto out;
}
error = nfs_dolock(ap);
out:
mtx_unlock(&Giant);
error = vn_lock(vp, LK_SHARED);
if (error)
return (error);
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
size = VTONFS(vp)->n_size;
VOP_UNLOCK(vp, 0);
error = lf_advlock(ap, &(vp->v_lockf), size);
} else
error = nfs_dolock(ap);
return (error);
}
@ -3059,18 +3060,21 @@ nfs_advlock(struct vop_advlock_args *ap)
static int
nfs_advlockasync(struct vop_advlockasync_args *ap)
{
struct vnode *vp = ap->a_vp;
u_quad_t size;
int error;
mtx_lock(&Giant);
if ((VFSTONFS(ap->a_vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
struct nfsnode *np = VTONFS(ap->a_vp);
error = lf_advlockasync(ap, &(np->n_lockf), np->n_size);
goto out;
error = vn_lock(vp, LK_SHARED);
if (error)
return (error);
if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
size = VTONFS(vp)->n_size;
VOP_UNLOCK(vp, 0);
error = lf_advlockasync(ap, &(vp->v_lockf), size);
} else {
VOP_UNLOCK(vp, 0);
error = EOPNOTSUPP;
}
error = EOPNOTSUPP;
out:
mtx_unlock(&Giant);
return (error);
}

View File

@ -113,7 +113,6 @@ struct nfsnode {
nfsfh_t *n_fhp; /* NFS File Handle */
struct vnode *n_vnode; /* associated vnode */
struct vnode *n_dvp; /* parent vnode */
struct lockf *n_lockf; /* Locking record of file */
int n_error; /* Save write error value */
union {
struct timespec nf_atim; /* Special file times */

View File

@ -82,6 +82,11 @@ struct lockf_entry {
};
LIST_HEAD(lockf_entry_list, lockf_entry);
/*
* Extra lf_flags bits used by the implementation
*/
#define F_INTR 0x8000 /* lock was interrupted by lf_purgelocks */
/*
* Filesystem private node structures should include space for a
* pointer to a struct lockf_state. This pointer is used by the lock
@ -115,6 +120,7 @@ LIST_HEAD(lockf_list, lockf);
int lf_advlock(struct vop_advlock_args *, struct lockf **, u_quad_t);
int lf_advlockasync(struct vop_advlockasync_args *, struct lockf **, u_quad_t);
void lf_purgelocks(struct vnode *vp, struct lockf **statep);
int lf_countlocks(int sysid);
void lf_clearremotesys(int sysid);

View File

@ -168,6 +168,7 @@ struct vnode {
*/
struct vpollinfo *v_pollinfo; /* G Poll events, p for *v_pi */
struct label *v_label; /* MAC label for vnode */
struct lockf *v_lockf; /* Byte-level lock list */
};
#endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
@ -652,6 +653,8 @@ int vop_stdlock(struct vop_lock1_args *);
int vop_stdputpages(struct vop_putpages_args *);
int vop_stdunlock(struct vop_unlock_args *);
int vop_nopoll(struct vop_poll_args *);
int vop_stdadvlock(struct vop_advlock_args *ap);
int vop_stdadvlockasync(struct vop_advlockasync_args *ap);
int vop_stdpathconf(struct vop_pathconf_args *);
int vop_stdpoll(struct vop_poll_args *);
int vop_stdvptofh(struct vop_vptofh_args *ap);

View File

@ -75,7 +75,6 @@ struct inode {
struct fs *i_fs; /* Associated filesystem superblock. */
struct dquot *i_dquot[MAXQUOTAS]; /* Dquot structures. */
u_quad_t i_modrev; /* Revision level for NFS lease. */
struct lockf *i_lockf;/* Head of byte-level lock list. */
/*
* Side effects; used during directory lookup.
*/

View File

@ -91,8 +91,6 @@ __FBSDID("$FreeBSD$");
#include <ufs/ffs/ffs_extern.h>
static vop_access_t ufs_access;
static vop_advlock_t ufs_advlock;
static vop_advlockasync_t ufs_advlockasync;
static int ufs_chmod(struct vnode *, int, struct ucred *, struct thread *);
static int ufs_chown(struct vnode *, uid_t, gid_t, struct ucred *, struct thread *);
static vop_close_t ufs_close;
@ -2164,43 +2162,6 @@ ufs_pathconf(ap)
return (error);
}
/*
* Advisory record locking support
*/
static int
ufs_advlock(ap)
struct vop_advlock_args /* {
struct vnode *a_vp;
caddr_t a_id;
int a_op;
struct flock *a_fl;
int a_flags;
} */ *ap;
{
struct inode *ip = VTOI(ap->a_vp);
return (lf_advlock(ap, &(ip->i_lockf), ip->i_size));
}
/*
* Advisory record locking support
*/
static int
ufs_advlockasync(ap)
struct vop_advlockasync_args /* {
struct vnode *a_vp;
caddr_t a_id;
int a_op;
struct flock *a_fl;
int a_flags;
struct task *a_task;
} */ *ap;
{
struct inode *ip = VTOI(ap->a_vp);
return (lf_advlockasync(ap, &(ip->i_lockf), ip->i_size));
}
/*
* Initialize the vnode associated with a new inode, handle aliased
* vnodes.
@ -2468,8 +2429,6 @@ struct vop_vector ufs_vnodeops = {
.vop_reallocblks = VOP_PANIC,
.vop_write = VOP_PANIC,
.vop_access = ufs_access,
.vop_advlock = ufs_advlock,
.vop_advlockasync = ufs_advlockasync,
.vop_bmap = ufs_bmap,
.vop_cachedlookup = ufs_lookup,
.vop_close = ufs_close,