Decontextualize vfs_busy(), vfs_unbusy() and vfs_mount_alloc() functions.

Manpages are updated accordingly.

Tested by:	Diego Sardina <siarodx at gmail dot com>
This commit is contained in:
attilio 2008-08-31 14:26:08 +00:00
parent 2d23f13f7f
commit e2ca413d09
13 changed files with 59 additions and 70 deletions

View File

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 8, 2001
.Dd August 29, 2008
.Dt VFS_BUSY 9
.Os
.Sh NAME
@ -36,7 +36,7 @@
.In sys/param.h
.In sys/mount.h
.Ft int
.Fn vfs_busy "struct mount *mp" "int flags" "struct mtx *interlkp" "struct thread *td"
.Fn vfs_busy "struct mount *mp" "int flags" "struct mtx *interlkp"
.Sh DESCRIPTION
The
.Fn vfs_busy
@ -77,8 +77,6 @@ the mount point is being unmounted and
.Dv LK_NOWAIT
is not set then
interlock must be valid locked mutex.
.It Fa td
The thread responsible for this call.
.El
.Sh LOCKS
If

View File

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 8, 2001
.Dd August 29, 2008
.Dt VFS_UNBUSY 9
.Os
.Sh NAME
@ -36,7 +36,7 @@
.In sys/param.h
.In sys/mount.h
.Ft void
.Fn vfs_unbusy "struct mount *mp" "struct thread *td"
.Fn vfs_unbusy "struct mount *mp"
.Sh DESCRIPTION
The
.Fn vfs_unbusy
@ -50,8 +50,6 @@ Its arguments are:
.Bl -tag -width ".Fa mp"
.It Fa mp
The mount point to unbusy (unlock).
.It Fa td
The thread responsible for this call.
.El
.Sh LOCKS
.Va mnt_lock

View File

@ -193,7 +193,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
* Allocate and initialize the filesystem.
*/
vn_lock(vp, LK_SHARED | LK_RETRY);
mp = vfs_mount_alloc(vp, vfsp, fspath, td);
mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
VOP_UNLOCK(vp, 0);
mp->mnt_optnew = NULL;
@ -263,7 +263,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
VOP_UNLOCK(vp, 0);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
error = vfs_allocate_syncvnode(mp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
if (error)
vrele(vp);
else
@ -273,7 +273,7 @@ domount(kthread_t *td, vnode_t *vp, const char *fstype, char *fspath,
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
VOP_UNLOCK(vp, 0);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
vfs_mount_destroy(mp);
}
return (error);

View File

@ -2843,7 +2843,7 @@ g_journal_switch_wait(struct g_journal_softc *sc)
}
static void
g_journal_do_switch(struct g_class *classp, struct thread *td)
g_journal_do_switch(struct g_class *classp)
{
struct g_journal_softc *sc;
const struct g_journal_desc *desc;
@ -2879,7 +2879,7 @@ g_journal_do_switch(struct g_class *classp, struct thread *td)
desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
if (desc == NULL)
continue;
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx))
continue;
/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
@ -2973,7 +2973,7 @@ g_journal_do_switch(struct g_class *classp, struct thread *td)
vfs_write_resume(mp);
next:
mtx_lock(&mountlist_mtx);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
@ -3011,7 +3011,6 @@ g_journal_do_switch(struct g_class *classp, struct thread *td)
static void
g_journal_switcher(void *arg)
{
struct thread *td = curthread;
struct g_class *mp;
struct bintime bt;
int error;
@ -3033,7 +3032,7 @@ g_journal_switcher(void *arg)
g_journal_cache_limit);
}
GJ_TIMER_START(1, &bt);
g_journal_do_switch(mp, td);
g_journal_do_switch(mp);
GJ_TIMER_STOP(1, &bt, "Entire switch time");
if (g_journal_sync_requested > 0) {
g_journal_sync_requested = 0;

View File

@ -171,10 +171,10 @@ ext2_mount(mp, td)
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
if (vfs_busy(mp, LK_NOWAIT, 0, td))
if (vfs_busy(mp, LK_NOWAIT, 0))
return (EBUSY);
error = ext2_flushfiles(mp, flags, td);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
if (!error && fs->s_wasvalid) {
fs->s_es->s_state |= EXT2_VALID_FS;
ext2_sbupdate(ump, MNT_WAIT);

View File

@ -688,7 +688,7 @@ lookup(struct nameidata *ndp)
*/
while (dp->v_type == VDIR && (mp = dp->v_mountedhere) &&
(cnp->cn_flags & NOCROSSMOUNT) == 0) {
if (vfs_busy(mp, 0, 0, td))
if (vfs_busy(mp, 0, 0))
continue;
vput(dp);
VFS_UNLOCK_GIANT(vfslocked);
@ -702,7 +702,7 @@ lookup(struct nameidata *ndp)
vref(vp_crossmp);
ndp->ni_dvp = vp_crossmp;
error = VFS_ROOT(mp, compute_cn_lkflags(mp, cnp->cn_lkflags), &tdp, td);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT))
panic("vp_crossmp exclusively locked or reclaimed");
if (error) {

View File

@ -472,8 +472,8 @@ mount_fini(void *mem, int size)
* Allocate and initialize the mount point struct.
*/
struct mount *
vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp,
const char *fspath, struct thread *td)
vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
struct ucred *cred)
{
struct mount *mp;
@ -483,7 +483,7 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp,
TAILQ_INIT(&mp->mnt_nvnodelist);
mp->mnt_nvnodelistsize = 0;
mp->mnt_ref = 0;
(void) vfs_busy(mp, LK_NOWAIT, 0, td);
(void) vfs_busy(mp, LK_NOWAIT, 0);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;
vfsp->vfc_refcount++; /* XXX Unlocked */
@ -491,13 +491,13 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp,
mp->mnt_gen++;
strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
mp->mnt_vnodecovered = vp;
mp->mnt_cred = crdup(td->td_ucred);
mp->mnt_stat.f_owner = td->td_ucred->cr_uid;
mp->mnt_cred = crdup(cred);
mp->mnt_stat.f_owner = cred->cr_uid;
strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
mp->mnt_iosize_max = DFLTPHYS;
#ifdef MAC
mac_mount_init(mp);
mac_mount_create(td->td_ucred, mp);
mac_mount_create(cred, mp);
#endif
arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
return (mp);
@ -932,7 +932,7 @@ vfs_domount(
vput(vp);
return (error);
}
if (vfs_busy(mp, LK_NOWAIT, 0, td)) {
if (vfs_busy(mp, LK_NOWAIT, 0)) {
vput(vp);
return (EBUSY);
}
@ -940,7 +940,7 @@ vfs_domount(
if ((vp->v_iflag & VI_MOUNT) != 0 ||
vp->v_mountedhere != NULL) {
VI_UNLOCK(vp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
vput(vp);
return (EBUSY);
}
@ -993,7 +993,7 @@ vfs_domount(
/*
* Allocate and initialize the filesystem.
*/
mp = vfs_mount_alloc(vp, vfsp, fspath, td);
mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
VOP_UNLOCK(vp, 0);
/* XXXMAC: pass to vfs_mount_alloc? */
@ -1059,7 +1059,7 @@ vfs_domount(
vrele(mp->mnt_syncer);
mp->mnt_syncer = NULL;
}
vfs_unbusy(mp, td);
vfs_unbusy(mp);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
@ -1095,14 +1095,14 @@ vfs_domount(
VOP_UNLOCK(vp, 0);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
error = vfs_allocate_syncvnode(mp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
if (error)
vrele(vp);
} else {
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
vfs_mount_destroy(mp);
vput(vp);
}
@ -1514,7 +1514,7 @@ devfs_first(void)
if (vfsp == NULL)
return;
mp = vfs_mount_alloc(NULLVP, vfsp, "/dev", td);
mp = vfs_mount_alloc(NULLVP, vfsp, "/dev", td->td_ucred);
error = VFS_MOUNT(mp, td);
KASSERT(error == 0, ("VFS_MOUNT(devfs) failed %d", error));
@ -1589,7 +1589,7 @@ devfs_fixup(struct thread *td)
mtx_unlock(&mountlist_mtx);
VOP_UNLOCK(vp, 0);
vput(dvp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
/* Unlink the no longer needed /dev/dev -> / symlink */
kern_unlink(td, "/dev/dev", UIO_SYSSPACE);

View File

@ -338,8 +338,7 @@ SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
* unmounting. Interlock is not released on failure.
*/
int
vfs_busy(struct mount *mp, int flags, struct mtx *interlkp,
struct thread *td)
vfs_busy(struct mount *mp, int flags, struct mtx *interlkp)
{
int lkflags;
@ -379,7 +378,7 @@ vfs_busy(struct mount *mp, int flags, struct mtx *interlkp,
* Free a busy filesystem.
*/
void
vfs_unbusy(struct mount *mp, struct thread *td)
vfs_unbusy(struct mount *mp)
{
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
@ -573,7 +572,6 @@ vattr_null(struct vattr *vap)
static int
vlrureclaim(struct mount *mp)
{
struct thread *td;
struct vnode *vp;
int done;
int trigger;
@ -592,7 +590,6 @@ vlrureclaim(struct mount *mp)
usevnodes = 1;
trigger = cnt.v_page_count * 2 / usevnodes;
done = 0;
td = curthread;
vn_start_write(NULL, &mp, V_WAIT);
MNT_ILOCK(mp);
count = mp->mnt_nvnodelistsize / 10 + 1;
@ -727,7 +724,6 @@ vnlru_proc(void)
struct mount *mp, *nmp;
int done;
struct proc *p = vnlruproc;
struct thread *td = curthread;
EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
SHUTDOWN_PRI_FIRST);
@ -751,7 +747,7 @@ vnlru_proc(void)
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
int vfsunlocked;
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@ -765,7 +761,7 @@ vnlru_proc(void)
mtx_lock(&Giant);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
if (done == 0) {
@ -2988,7 +2984,6 @@ static int
sysctl_vnode(SYSCTL_HANDLER_ARGS)
{
struct xvnode *xvn;
struct thread *td = req->td;
struct mount *mp;
struct vnode *vp;
int error, len, n;
@ -3009,7 +3004,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
n = 0;
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx))
continue;
MNT_ILOCK(mp);
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
@ -3060,7 +3055,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
}
MNT_IUNLOCK(mp);
mtx_lock(&mountlist_mtx);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
if (n == len)
break;
}
@ -3337,7 +3332,6 @@ sync_fsync(struct vop_fsync_args *ap)
{
struct vnode *syncvp = ap->a_vp;
struct mount *mp = syncvp->v_mount;
struct thread *td = ap->a_td;
int error;
struct bufobj *bo;
@ -3360,12 +3354,12 @@ sync_fsync(struct vop_fsync_args *ap)
* not already on the sync list.
*/
mtx_lock(&mountlist_mtx);
if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx) != 0) {
mtx_unlock(&mountlist_mtx);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
vfs_unbusy(mp, td);
vfs_unbusy(mp);
return (0);
}
MNT_ILOCK(mp);
@ -3373,14 +3367,14 @@ sync_fsync(struct vop_fsync_args *ap)
mp->mnt_kern_flag &= ~MNTK_ASYNC;
MNT_IUNLOCK(mp);
vfs_msync(mp, MNT_NOWAIT);
error = VFS_SYNC(mp, MNT_LAZY, td);
error = VFS_SYNC(mp, MNT_LAZY, ap->a_td);
MNT_ILOCK(mp);
mp->mnt_noasync--;
if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
mp->mnt_kern_flag |= MNTK_ASYNC;
MNT_IUNLOCK(mp);
vn_finished_write(mp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
return (error);
}

View File

@ -124,7 +124,7 @@ sync(td, uap)
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@ -148,7 +148,7 @@ sync(td, uap)
VFS_UNLOCK_GIANT(vfslocked);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
return (0);
@ -197,14 +197,14 @@ quotactl(td, uap)
vfslocked = NDHASGIANT(&nd);
NDFREE(&nd, NDF_ONLY_PNBUF);
mp = nd.ni_vp->v_mount;
if ((error = vfs_busy(mp, 0, NULL, td))) {
if ((error = vfs_busy(mp, 0, NULL))) {
vrele(nd.ni_vp);
VFS_UNLOCK_GIANT(vfslocked);
return (error);
}
vrele(nd.ni_vp);
error = VFS_QUOTACTL(mp, uap->cmd, uap->uid, uap->arg, td);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
VFS_UNLOCK_GIANT(vfslocked);
return (error);
}
@ -479,7 +479,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
continue;
}
#endif
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@ -504,7 +504,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
VFS_UNLOCK_GIANT(vfslocked);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
continue;
}
if (priv_check(td, PRIV_VFS_GENERATION)) {
@ -518,7 +518,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
else /* if (bufseg == UIO_USERSPACE) */ {
error = copyout(sp, sfsp, sizeof(*sp));
if (error) {
vfs_unbusy(mp, td);
vfs_unbusy(mp);
VFS_UNLOCK_GIANT(vfslocked);
return (error);
}
@ -529,7 +529,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
count++;
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
@ -741,11 +741,11 @@ fchdir(td, uap)
error = change_dir(vp, td);
while (!error && (mp = vp->v_mountedhere) != NULL) {
int tvfslocked;
if (vfs_busy(mp, 0, 0, td))
if (vfs_busy(mp, 0, 0))
continue;
tvfslocked = VFS_LOCK_GIANT(mp);
error = VFS_ROOT(mp, LK_EXCLUSIVE, &tdp, td);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
if (error) {
VFS_UNLOCK_GIANT(tvfslocked);
break;

View File

@ -258,12 +258,12 @@ nfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td)
#ifndef nolint
sfp = NULL;
#endif
error = vfs_busy(mp, LK_NOWAIT, NULL, td);
error = vfs_busy(mp, LK_NOWAIT, NULL);
if (error)
return (error);
error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
if (error) {
vfs_unbusy(mp, td);
vfs_unbusy(mp);
return (error);
}
vp = NFSTOV(np);
@ -313,7 +313,7 @@ nfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td)
m_freem(mrep);
nfsmout:
vput(vp);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
return (error);
}

View File

@ -680,7 +680,7 @@ int vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...)
int vfs_setpublicfs /* set publicly exported fs */
(struct mount *, struct netexport *, struct export_args *);
void vfs_msync(struct mount *, int);
int vfs_busy(struct mount *, int, struct mtx *, struct thread *);
int vfs_busy(struct mount *, int, struct mtx *);
int vfs_export /* process mount export info */
(struct mount *, struct export_args *);
int vfs_allocate_syncvnode(struct mount *);
@ -694,9 +694,9 @@ void vfs_mountedfrom(struct mount *, const char *from);
void vfs_ref(struct mount *);
void vfs_rel(struct mount *);
struct mount *vfs_mount_alloc(struct vnode *, struct vfsconf *, const char *,
struct thread *);
struct ucred *);
int vfs_suser(struct mount *, struct thread *);
void vfs_unbusy(struct mount *, struct thread *);
void vfs_unbusy(struct mount *);
void vfs_unmountall(void);
extern TAILQ_HEAD(mntlist, mount) mountlist; /* mounted filesystem list */
extern struct mtx mountlist_mtx;

View File

@ -742,7 +742,7 @@ softdep_flush(void)
nmp = TAILQ_NEXT(mp, mnt_list);
if ((mp->mnt_flag & MNT_SOFTDEP) == 0)
continue;
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx))
continue;
vfslocked = VFS_LOCK_GIANT(mp);
softdep_process_worklist(mp, 0);
@ -752,7 +752,7 @@ softdep_flush(void)
VFS_UNLOCK_GIANT(vfslocked);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
if (remaining)

View File

@ -118,7 +118,7 @@ ufs_quotactl(mp, cmds, id, arg, td)
if ((u_int)type >= MAXQUOTAS)
return (EINVAL);
if (vfs_busy(mp, LK_NOWAIT, 0, td))
if (vfs_busy(mp, LK_NOWAIT, 0))
return (0);
switch (cmd) {
@ -150,7 +150,7 @@ ufs_quotactl(mp, cmds, id, arg, td)
error = EINVAL;
break;
}
vfs_unbusy(mp, td);
vfs_unbusy(mp);
return (error);
#endif
}