Import 4.4BSD-Lite2 onto the vendor branch, note that in the kernel, all

files are off the vendor branch, so this should not change anything.

A "U" marker generally means that the file was not changed in between
the 4.4Lite and Lite-2 releases, and does not need a merge.  "C" generally
means that there was a change.
[two new auxillary files in miscfs/union]
This commit is contained in:
peter 1996-03-11 19:29:25 +00:00
parent 65ef93bb55
commit 1714e39dd6
38 changed files with 3424 additions and 1701 deletions

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)dead_vnops.c 8.1 (Berkeley) 6/10/93
* @(#)dead_vnops.c 8.3 (Berkeley) 5/14/95
*/
#include <sys/param.h>
@ -73,11 +73,11 @@ int dead_select __P((struct vop_select_args *));
#define dead_inactive ((int (*) __P((struct vop_inactive_args *)))nullop)
#define dead_reclaim ((int (*) __P((struct vop_reclaim_args *)))nullop)
int dead_lock __P((struct vop_lock_args *));
#define dead_unlock ((int (*) __P((struct vop_unlock_args *)))nullop)
#define dead_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
int dead_bmap __P((struct vop_bmap_args *));
int dead_strategy __P((struct vop_strategy_args *));
int dead_print __P((struct vop_print_args *));
#define dead_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define dead_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
#define dead_pathconf ((int (*) __P((struct vop_pathconf_args *)))dead_ebadf)
#define dead_advlock ((int (*) __P((struct vop_advlock_args *)))dead_ebadf)
#define dead_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))dead_badop)
@ -93,20 +93,20 @@ struct vnodeopv_entry_desc dead_vnodeop_entries[] = {
{ &vop_lookup_desc, dead_lookup }, /* lookup */
{ &vop_create_desc, dead_create }, /* create */
{ &vop_mknod_desc, dead_mknod }, /* mknod */
{ &vop_open_desc, dead_open }, /* open */
{ &vop_open_desc, dead_open }, /* open */
{ &vop_close_desc, dead_close }, /* close */
{ &vop_access_desc, dead_access }, /* access */
{ &vop_getattr_desc, dead_getattr }, /* getattr */
{ &vop_setattr_desc, dead_setattr }, /* setattr */
{ &vop_read_desc, dead_read }, /* read */
{ &vop_read_desc, dead_read }, /* read */
{ &vop_write_desc, dead_write }, /* write */
{ &vop_ioctl_desc, dead_ioctl }, /* ioctl */
{ &vop_select_desc, dead_select }, /* select */
{ &vop_mmap_desc, dead_mmap }, /* mmap */
{ &vop_mmap_desc, dead_mmap }, /* mmap */
{ &vop_fsync_desc, dead_fsync }, /* fsync */
{ &vop_seek_desc, dead_seek }, /* seek */
{ &vop_seek_desc, dead_seek }, /* seek */
{ &vop_remove_desc, dead_remove }, /* remove */
{ &vop_link_desc, dead_link }, /* link */
{ &vop_link_desc, dead_link }, /* link */
{ &vop_rename_desc, dead_rename }, /* rename */
{ &vop_mkdir_desc, dead_mkdir }, /* mkdir */
{ &vop_rmdir_desc, dead_rmdir }, /* rmdir */
@ -116,9 +116,9 @@ struct vnodeopv_entry_desc dead_vnodeop_entries[] = {
{ &vop_abortop_desc, dead_abortop }, /* abortop */
{ &vop_inactive_desc, dead_inactive }, /* inactive */
{ &vop_reclaim_desc, dead_reclaim }, /* reclaim */
{ &vop_lock_desc, dead_lock }, /* lock */
{ &vop_lock_desc, dead_lock }, /* lock */
{ &vop_unlock_desc, dead_unlock }, /* unlock */
{ &vop_bmap_desc, dead_bmap }, /* bmap */
{ &vop_bmap_desc, dead_bmap }, /* bmap */
{ &vop_strategy_desc, dead_strategy }, /* strategy */
{ &vop_print_desc, dead_print }, /* print */
{ &vop_islocked_desc, dead_islocked }, /* islocked */
@ -184,9 +184,9 @@ dead_read(ap)
if (chkvnlock(ap->a_vp))
panic("dead_read: lock");
/*
* Return EOF for character devices, EIO for others
* Return EOF for tty devices, EIO for others
*/
if (ap->a_vp->v_type != VCHR)
if ((ap->a_vp->v_flag & VISTTY) == 0)
return (EIO);
return (0);
}
@ -269,12 +269,23 @@ dead_strategy(ap)
dead_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
if (!chkvnlock(ap->a_vp))
/*
* Since we are not using the lock manager, we must clear
* the interlock here.
*/
if (ap->a_flags & LK_INTERLOCK) {
simple_unlock(&vp->v_interlock);
ap->a_flags &= ~LK_INTERLOCK;
}
if (!chkvnlock(vp))
return (0);
return (VCALL(ap->a_vp, VOFFSET(vop_lock), ap));
return (VCALL(vp, VOFFSET(vop_lock), ap));
}
/*
@ -327,15 +338,6 @@ dead_badop()
/* NOTREACHED */
}
/*
* Empty vnode null operation
*/
dead_nullop()
{
return (0);
}
/*
* We have to wait during times when the vnode is
* in a state of change.

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fdesc.h 8.5 (Berkeley) 1/21/94
* @(#)fdesc.h 8.8 (Berkeley) 4/3/95
*
* $Id: fdesc.h,v 1.8 1993/04/06 15:28:33 jsp Exp $
*/
@ -61,8 +61,7 @@ typedef enum {
} fdntype;
struct fdescnode {
struct fdescnode *fd_forw; /* Hash chain */
struct fdescnode *fd_back;
LIST_ENTRY(fdescnode) fd_hash; /* Hash list */
struct vnode *fd_vnode; /* Back ptr to vnode */
fdntype fd_type; /* Type of this node */
unsigned fd_fd; /* Fd to be dup'ed */
@ -74,7 +73,7 @@ struct fdescnode {
#define VTOFDESC(vp) ((struct fdescnode *)(vp)->v_data)
extern dev_t devctty;
extern int fdesc_init __P((void));
extern int fdesc_init __P((struct vfsconf *));
extern int fdesc_root __P((struct mount *, struct vnode **));
extern int fdesc_allocvp __P((fdntype, int, struct mount *, struct vnode **));
extern int (**fdesc_vnodeop_p)();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1992, 1993
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fdesc_vfsops.c 8.4 (Berkeley) 1/21/94
* @(#)fdesc_vfsops.c 8.10 (Berkeley) 5/14/95
*
* $Id: fdesc_vfsops.c,v 1.9 1993/04/06 15:28:33 jsp Exp $
*/
@ -89,7 +89,7 @@ fdesc_mount(mp, path, data, ndp, p)
/* XXX -- don't mark as local to work around fts() problems */
/*mp->mnt_flag |= MNT_LOCAL;*/
mp->mnt_data = (qaddr_t) fmp;
getnewfsid(mp, MOUNT_FDESC);
vfs_getnewfsid(mp);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@ -115,15 +115,10 @@ fdesc_unmount(mp, mntflags, p)
{
int error;
int flags = 0;
extern int doforce;
struct vnode *rootvp = VFSTOFDESC(mp)->f_root;
if (mntflags & MNT_FORCE) {
/* fdesc can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
/*
* Clear out buffer cache. I don't think we
@ -157,6 +152,7 @@ fdesc_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct vnode *vp;
/*
@ -164,23 +160,11 @@ fdesc_root(mp, vpp)
*/
vp = VFSTOFDESC(mp)->f_root;
VREF(vp);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
*vpp = vp;
return (0);
}
int
fdesc_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return (EOPNOTSUPP);
}
int
fdesc_statfs(mp, sbp, p)
struct mount *mp;
@ -214,7 +198,6 @@ fdesc_statfs(mp, sbp, p)
if (fdp->fd_nfiles < lim)
freefd += (lim - fdp->fd_nfiles);
sbp->f_type = MOUNT_FDESC;
sbp->f_flags = 0;
sbp->f_bsize = DEV_BSIZE;
sbp->f_iosize = DEV_BSIZE;
@ -224,6 +207,7 @@ fdesc_statfs(mp, sbp, p)
sbp->f_files = lim + 1; /* Allow for "." */
sbp->f_ffree = freefd; /* See comments above */
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
@ -240,38 +224,15 @@ fdesc_sync(mp, waitfor)
return (0);
}
/*
* Fdesc flat namespace lookup.
* Currently unsupported.
*/
int
fdesc_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
fdesc_fhtovp(mp, fhp, setgen, vpp)
struct mount *mp;
struct fid *fhp;
int setgen;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
fdesc_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (EOPNOTSUPP);
}
#define fdesc_fhtovp ((int (*) __P((struct mount *, struct fid *, \
struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp)
#define fdesc_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \
struct proc *)))eopnotsupp)
#define fdesc_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
#define fdesc_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \
eopnotsupp)
#define fdesc_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp)
struct vfsops fdesc_vfsops = {
fdesc_mount,
@ -285,4 +246,5 @@ struct vfsops fdesc_vfsops = {
fdesc_fhtovp,
fdesc_vptofh,
fdesc_init,
fdesc_sysctl,
};

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fdesc_vnops.c 8.9 (Berkeley) 1/21/94
* @(#)fdesc_vnops.c 8.17 (Berkeley) 5/22/95
*
* $Id: fdesc_vnops.c,v 1.12 1993/04/06 16:17:17 jsp Exp $
*/
@ -72,41 +72,22 @@ dev_t devctty;
FD_STDIN, FD_STDOUT, FD_STDERR must be a sequence n, n+1, n+2
#endif
#define NFDCACHE 3
#define FD_NHASH(ix) ((ix) & NFDCACHE)
#define NFDCACHE 4
/*
* Cache head
*/
struct fdcache {
struct fdescnode *fc_forw;
struct fdescnode *fc_back;
};
static struct fdcache fdcache[NFDCACHE];
#define FD_NHASH(ix) \
(&fdhashtbl[(ix) & fdhash])
LIST_HEAD(fdhashhead, fdescnode) *fdhashtbl;
u_long fdhash;
/*
* Initialise cache headers
*/
fdesc_init()
fdesc_init(vfsp)
struct vfsconf *vfsp;
{
struct fdcache *fc;
devctty = makedev(nchrdev, 0);
for (fc = fdcache; fc < fdcache + NFDCACHE; fc++)
fc->fc_forw = fc->fc_back = (struct fdescnode *) fc;
}
/*
* Compute hash list for given target vnode
*/
static struct fdcache *
fdesc_hash(ix)
int ix;
{
return (&fdcache[FD_NHASH(ix)]);
fdhashtbl = hashinit(NFDCACHE, M_CACHE, &fdhash);
}
int
@ -116,15 +97,16 @@ fdesc_allocvp(ftype, ix, mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct fdcache *fc;
struct proc *p = curproc; /* XXX */
struct fdhashhead *fc;
struct fdescnode *fd;
int error = 0;
fc = FD_NHASH(ix);
loop:
fc = fdesc_hash(ix);
for (fd = fc->fc_forw; fd != (struct fdescnode *) fc; fd = fd->fd_forw) {
for (fd = fc->lh_first; fd != 0; fd = fd->fd_hash.le_next) {
if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) {
if (vget(fd->fd_vnode, 0))
if (vget(fd->fd_vnode, 0, p))
goto loop;
*vpp = fd->fd_vnode;
return (error);
@ -152,8 +134,7 @@ fdesc_allocvp(ftype, ix, mp, vpp)
fd->fd_fd = -1;
fd->fd_link = 0;
fd->fd_ix = ix;
fc = fdesc_hash(ix);
insque(fd, fc);
LIST_INSERT_HEAD(fc, fd, fd_hash);
out:;
fdcache_lock &= ~FDL_LOCKED;
@ -180,25 +161,23 @@ fdesc_lookup(ap)
{
struct vnode **vpp = ap->a_vpp;
struct vnode *dvp = ap->a_dvp;
char *pname;
struct proc *p;
int nfiles;
struct componentname *cnp = ap->a_cnp;
char *pname = cnp->cn_nameptr;
struct proc *p = cnp->cn_proc;
int nfiles = p->p_fd->fd_nfiles;
unsigned fd;
int error;
struct vnode *fvp;
char *ln;
pname = ap->a_cnp->cn_nameptr;
if (ap->a_cnp->cn_namelen == 1 && *pname == '.') {
VOP_UNLOCK(dvp, 0, p);
if (cnp->cn_namelen == 1 && *pname == '.') {
*vpp = dvp;
VREF(dvp);
VOP_LOCK(dvp);
vn_lock(dvp, LK_SHARED | LK_RETRY, p);
return (0);
}
p = ap->a_cnp->cn_proc;
nfiles = p->p_fd->fd_nfiles;
switch (VTOFDESC(dvp)->fd_type) {
default:
case Flink:
@ -208,17 +187,17 @@ fdesc_lookup(ap)
goto bad;
case Froot:
if (ap->a_cnp->cn_namelen == 2 && bcmp(pname, "fd", 2) == 0) {
if (cnp->cn_namelen == 2 && bcmp(pname, "fd", 2) == 0) {
error = fdesc_allocvp(Fdevfd, FD_DEVFD, dvp->v_mount, &fvp);
if (error)
goto bad;
*vpp = fvp;
fvp->v_type = VDIR;
VOP_LOCK(fvp);
vn_lock(fvp, LK_SHARED | LK_RETRY, p);
return (0);
}
if (ap->a_cnp->cn_namelen == 3 && bcmp(pname, "tty", 3) == 0) {
if (cnp->cn_namelen == 3 && bcmp(pname, "tty", 3) == 0) {
struct vnode *ttyvp = cttyvp(p);
if (ttyvp == NULL) {
error = ENXIO;
@ -229,12 +208,12 @@ fdesc_lookup(ap)
goto bad;
*vpp = fvp;
fvp->v_type = VFIFO;
VOP_LOCK(fvp);
vn_lock(fvp, LK_SHARED | LK_RETRY, p);
return (0);
}
ln = 0;
switch (ap->a_cnp->cn_namelen) {
switch (cnp->cn_namelen) {
case 5:
if (bcmp(pname, "stdin", 5) == 0) {
ln = "fd/0";
@ -260,7 +239,7 @@ fdesc_lookup(ap)
VTOFDESC(fvp)->fd_link = ln;
*vpp = fvp;
fvp->v_type = VLNK;
VOP_LOCK(fvp);
vn_lock(fvp, LK_SHARED | LK_RETRY, p);
return (0);
} else {
error = ENOENT;
@ -270,9 +249,10 @@ fdesc_lookup(ap)
/* FALL THROUGH */
case Fdevfd:
if (ap->a_cnp->cn_namelen == 2 && bcmp(pname, "..", 2) == 0) {
error = fdesc_root(dvp->v_mount, vpp);
return (error);
if (cnp->cn_namelen == 2 && bcmp(pname, "..", 2) == 0) {
if (error = fdesc_root(dvp->v_mount, vpp))
goto bad;
return (0);
}
fd = 0;
@ -296,11 +276,13 @@ fdesc_lookup(ap)
if (error)
goto bad;
VTOFDESC(fvp)->fd_fd = fd;
vn_lock(fvp, LK_SHARED | LK_RETRY, p);
*vpp = fvp;
return (0);
}
bad:;
vn_lock(dvp, LK_SHARED | LK_RETRY, p);
*vpp = NULL;
return (error);
}
@ -359,10 +341,10 @@ fdesc_attr(fd, vap, cred, p)
error = VOP_GETATTR((struct vnode *) fp->f_data, vap, cred, p);
if (error == 0 && vap->va_type == VDIR) {
/*
* don't allow directories to show up because
* that causes loops in the namespace.
* directories can cause loops in the namespace,
* so turn off the 'x' bits to avoid trouble.
*/
vap->va_type = VFIFO;
vap->va_mode &= ~((VEXEC)|(VEXEC>>3)|(VEXEC>>6));
}
break;
@ -547,6 +529,9 @@ fdesc_readdir(ap)
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
u_long *a_cookies;
int a_ncookies;
} */ *ap;
{
struct uio *uio = ap->a_uio;
@ -554,6 +539,13 @@ fdesc_readdir(ap)
int i;
int error;
/*
* We don't allow exporting fdesc mounts, and currently local
* requests do not need cookies.
*/
if (ap->a_ncookies)
panic("fdesc_readdir: not hungry");
switch (VTOFDESC(ap->a_vp)->fd_type) {
case Fctty:
return (0);
@ -770,6 +762,7 @@ int
fdesc_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@ -778,6 +771,7 @@ fdesc_inactive(ap)
* Clear out the v_type field to avoid
* nasty things happening in vgone().
*/
VOP_UNLOCK(vp, 0, ap->a_p);
vp->v_type = VNON;
return (0);
}
@ -789,8 +783,9 @@ fdesc_reclaim(ap)
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct fdescnode *fd = VTOFDESC(vp);
remque(VTOFDESC(vp));
LIST_REMOVE(fd, fd_hash);
FREE(vp->v_data, M_TEMP);
vp->v_data = 0;
@ -861,16 +856,6 @@ fdesc_vfree(ap)
return (0);
}
/*
* /dev/fd vnode unsupported operation
*/
int
fdesc_enotsupp()
{
return (EOPNOTSUPP);
}
/*
* /dev/fd "should never get here" operation
*/
@ -882,48 +867,39 @@ fdesc_badop()
/* NOTREACHED */
}
/*
* /dev/fd vnode null operation
*/
int
fdesc_nullop()
{
return (0);
}
#define fdesc_create ((int (*) __P((struct vop_create_args *)))fdesc_enotsupp)
#define fdesc_mknod ((int (*) __P((struct vop_mknod_args *)))fdesc_enotsupp)
#define fdesc_create ((int (*) __P((struct vop_create_args *)))eopnotsupp)
#define fdesc_mknod ((int (*) __P((struct vop_mknod_args *)))eopnotsupp)
#define fdesc_close ((int (*) __P((struct vop_close_args *)))nullop)
#define fdesc_access ((int (*) __P((struct vop_access_args *)))nullop)
#define fdesc_mmap ((int (*) __P((struct vop_mmap_args *)))fdesc_enotsupp)
#define fdesc_mmap ((int (*) __P((struct vop_mmap_args *)))eopnotsupp)
#define fdesc_revoke vop_revoke
#define fdesc_fsync ((int (*) __P((struct vop_fsync_args *)))nullop)
#define fdesc_seek ((int (*) __P((struct vop_seek_args *)))nullop)
#define fdesc_remove ((int (*) __P((struct vop_remove_args *)))fdesc_enotsupp)
#define fdesc_link ((int (*) __P((struct vop_link_args *)))fdesc_enotsupp)
#define fdesc_rename ((int (*) __P((struct vop_rename_args *)))fdesc_enotsupp)
#define fdesc_mkdir ((int (*) __P((struct vop_mkdir_args *)))fdesc_enotsupp)
#define fdesc_rmdir ((int (*) __P((struct vop_rmdir_args *)))fdesc_enotsupp)
#define fdesc_symlink ((int (*) __P((struct vop_symlink_args *)))fdesc_enotsupp)
#define fdesc_remove ((int (*) __P((struct vop_remove_args *)))eopnotsupp)
#define fdesc_link ((int (*) __P((struct vop_link_args *)))eopnotsupp)
#define fdesc_rename ((int (*) __P((struct vop_rename_args *)))eopnotsupp)
#define fdesc_mkdir ((int (*) __P((struct vop_mkdir_args *)))eopnotsupp)
#define fdesc_rmdir ((int (*) __P((struct vop_rmdir_args *)))eopnotsupp)
#define fdesc_symlink ((int (*) __P((struct vop_symlink_args *)))eopnotsupp)
#define fdesc_abortop ((int (*) __P((struct vop_abortop_args *)))nullop)
#define fdesc_lock ((int (*) __P((struct vop_lock_args *)))nullop)
#define fdesc_unlock ((int (*) __P((struct vop_unlock_args *)))nullop)
#define fdesc_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define fdesc_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
#define fdesc_bmap ((int (*) __P((struct vop_bmap_args *)))fdesc_badop)
#define fdesc_strategy ((int (*) __P((struct vop_strategy_args *)))fdesc_badop)
#define fdesc_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define fdesc_advlock ((int (*) __P((struct vop_advlock_args *)))fdesc_enotsupp)
#define fdesc_islocked \
((int (*) __P((struct vop_islocked_args *)))vop_noislocked)
#define fdesc_advlock ((int (*) __P((struct vop_advlock_args *)))eopnotsupp)
#define fdesc_blkatoff \
((int (*) __P((struct vop_blkatoff_args *)))fdesc_enotsupp)
#define fdesc_vget ((int (*) __P((struct vop_vget_args *)))fdesc_enotsupp)
((int (*) __P((struct vop_blkatoff_args *)))eopnotsupp)
#define fdesc_valloc ((int(*) __P(( \
struct vnode *pvp, \
int mode, \
struct ucred *cred, \
struct vnode **vpp))) fdesc_enotsupp)
struct vnode **vpp))) eopnotsupp)
#define fdesc_truncate \
((int (*) __P((struct vop_truncate_args *)))fdesc_enotsupp)
#define fdesc_update ((int (*) __P((struct vop_update_args *)))fdesc_enotsupp)
#define fdesc_bwrite ((int (*) __P((struct vop_bwrite_args *)))fdesc_enotsupp)
((int (*) __P((struct vop_truncate_args *)))eopnotsupp)
#define fdesc_update ((int (*) __P((struct vop_update_args *)))eopnotsupp)
#define fdesc_bwrite ((int (*) __P((struct vop_bwrite_args *)))eopnotsupp)
int (**fdesc_vnodeop_p)();
struct vnodeopv_entry_desc fdesc_vnodeop_entries[] = {
@ -940,6 +916,7 @@ struct vnodeopv_entry_desc fdesc_vnodeop_entries[] = {
{ &vop_write_desc, fdesc_write }, /* write */
{ &vop_ioctl_desc, fdesc_ioctl }, /* ioctl */
{ &vop_select_desc, fdesc_select }, /* select */
{ &vop_revoke_desc, fdesc_revoke }, /* revoke */
{ &vop_mmap_desc, fdesc_mmap }, /* mmap */
{ &vop_fsync_desc, fdesc_fsync }, /* fsync */
{ &vop_seek_desc, fdesc_seek }, /* seek */

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fifo.h 8.2 (Berkeley) 2/2/94
* @(#)fifo.h 8.6 (Berkeley) 5/21/95
*/
#ifdef FIFO
@ -50,8 +50,10 @@ int fifo_close __P((struct vop_close_args *));
#define fifo_setattr ((int (*) __P((struct vop_setattr_args *)))fifo_ebadf)
int fifo_read __P((struct vop_read_args *));
int fifo_write __P((struct vop_write_args *));
#define fifo_lease_check ((int (*) __P((struct vop_lease_args *)))nullop)
int fifo_ioctl __P((struct vop_ioctl_args *));
int fifo_select __P((struct vop_select_args *));
#define fifo_revoke vop_revoke
#define fifo_mmap ((int (*) __P((struct vop_mmap_args *)))fifo_badop)
#define fifo_fsync ((int (*) __P((struct vop_fsync_args *)))nullop)
#define fifo_seek ((int (*) __P((struct vop_seek_args *)))fifo_badop)
@ -64,14 +66,14 @@ int fifo_select __P((struct vop_select_args *));
#define fifo_readdir ((int (*) __P((struct vop_readdir_args *)))fifo_badop)
#define fifo_readlink ((int (*) __P((struct vop_readlink_args *)))fifo_badop)
#define fifo_abortop ((int (*) __P((struct vop_abortop_args *)))fifo_badop)
#define fifo_inactive ((int (*) __P((struct vop_inactive_args *)))nullop)
int fifo_inactive __P((struct vop_inactive_args *));
#define fifo_reclaim ((int (*) __P((struct vop_reclaim_args *)))nullop)
int fifo_lock __P((struct vop_lock_args *));
int fifo_unlock __P((struct vop_unlock_args *));
#define fifo_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define fifo_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
int fifo_bmap __P((struct vop_bmap_args *));
#define fifo_strategy ((int (*) __P((struct vop_strategy_args *)))fifo_badop)
int fifo_print __P((struct vop_print_args *));
#define fifo_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define fifo_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
int fifo_pathconf __P((struct vop_pathconf_args *));
int fifo_advlock __P((struct vop_advlock_args *));
#define fifo_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))fifo_badop)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1990, 1993
* Copyright (c) 1990, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,10 +30,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fifo_vnops.c 8.2 (Berkeley) 1/4/94
* @(#)fifo_vnops.c 8.10 (Berkeley) 5/27/95
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/namei.h>
@ -41,7 +42,6 @@
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/stat.h>
#include <sys/systm.h>
#include <sys/ioctl.h>
#include <sys/file.h>
#include <sys/errno.h>
@ -72,8 +72,10 @@ struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
{ &vop_setattr_desc, fifo_setattr }, /* setattr */
{ &vop_read_desc, fifo_read }, /* read */
{ &vop_write_desc, fifo_write }, /* write */
{ &vop_lease_desc, fifo_lease_check }, /* lease */
{ &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
{ &vop_select_desc, fifo_select }, /* select */
{ &vop_revoke_desc, fifo_revoke }, /* revoke */
{ &vop_mmap_desc, fifo_mmap }, /* mmap */
{ &vop_fsync_desc, fifo_fsync }, /* fsync */
{ &vop_seek_desc, fifo_seek }, /* seek */
@ -136,24 +138,23 @@ fifo_open(ap)
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct fifoinfo *fip;
struct vnode *vp = ap->a_vp;
struct fifoinfo *fip;
struct proc *p = ap->a_p;
struct socket *rso, *wso;
int error;
static char openstr[] = "fifo";
if ((ap->a_mode & (FREAD|FWRITE)) == (FREAD|FWRITE))
return (EINVAL);
if ((fip = vp->v_fifoinfo) == NULL) {
MALLOC(fip, struct fifoinfo *, sizeof(*fip), M_VNODE, M_WAITOK);
vp->v_fifoinfo = fip;
if (error = socreate(AF_UNIX, &rso, SOCK_STREAM, 0)) {
if (error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0)) {
free(fip, M_VNODE);
vp->v_fifoinfo = NULL;
return (error);
}
fip->fi_readsock = rso;
if (error = socreate(AF_UNIX, &wso, SOCK_STREAM, 0)) {
if (error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0)) {
(void)soclose(rso);
free(fip, M_VNODE);
vp->v_fifoinfo = NULL;
@ -171,7 +172,6 @@ fifo_open(ap)
wso->so_state |= SS_CANTRCVMORE;
rso->so_state |= SS_CANTSENDMORE;
}
error = 0;
if (ap->a_mode & FREAD) {
fip->fi_readers++;
if (fip->fi_readers == 1) {
@ -179,38 +179,46 @@ fifo_open(ap)
if (fip->fi_writers > 0)
wakeup((caddr_t)&fip->fi_writers);
}
if (ap->a_mode & O_NONBLOCK)
return (0);
}
if (ap->a_mode & FWRITE) {
fip->fi_writers++;
if (fip->fi_writers == 1) {
fip->fi_readsock->so_state &= ~SS_CANTRCVMORE;
if (fip->fi_readers > 0)
wakeup((caddr_t)&fip->fi_readers);
}
}
if ((ap->a_mode & FREAD) && (ap->a_mode & O_NONBLOCK) == 0) {
while (fip->fi_writers == 0) {
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
error = tsleep((caddr_t)&fip->fi_readers,
PCATCH | PSOCK, openstr, 0);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (error)
break;
goto bad;
}
} else {
fip->fi_writers++;
if (fip->fi_readers == 0 && (ap->a_mode & O_NONBLOCK)) {
error = ENXIO;
} else {
if (fip->fi_writers == 1) {
fip->fi_readsock->so_state &= ~SS_CANTRCVMORE;
if (fip->fi_readers > 0)
wakeup((caddr_t)&fip->fi_readers);
}
if (ap->a_mode & FWRITE) {
if (ap->a_mode & O_NONBLOCK) {
if (fip->fi_readers == 0) {
error = ENXIO;
goto bad;
}
} else {
while (fip->fi_readers == 0) {
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
error = tsleep((caddr_t)&fip->fi_writers,
PCATCH | PSOCK, openstr, 0);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (error)
break;
goto bad;
}
}
}
return (0);
bad:
if (error)
VOP_CLOSE(vp, ap->a_mode, ap->a_cred, ap->a_p);
VOP_CLOSE(vp, ap->a_mode, ap->a_cred, p);
return (error);
}
@ -226,8 +234,9 @@ fifo_read(ap)
struct ucred *a_cred;
} */ *ap;
{
register struct uio *uio = ap->a_uio;
register struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock;
struct uio *uio = ap->a_uio;
struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock;
struct proc *p = uio->uio_procp;
int error, startresid;
#ifdef DIAGNOSTIC
@ -239,10 +248,10 @@ fifo_read(ap)
if (ap->a_ioflag & IO_NDELAY)
rso->so_state |= SS_NBIO;
startresid = uio->uio_resid;
VOP_UNLOCK(ap->a_vp);
error = soreceive(rso, (struct mbuf **)0, uio, (int *)0,
(struct mbuf **)0, (struct mbuf **)0);
VOP_LOCK(ap->a_vp);
VOP_UNLOCK(ap->a_vp, 0, p);
error = soreceive(rso, (struct mbuf **)0, uio, (struct mbuf **)0,
(struct mbuf **)0, (int *)0);
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p);
/*
* Clear EOF indication after first such return.
*/
@ -266,6 +275,7 @@ fifo_write(ap)
} */ *ap;
{
struct socket *wso = ap->a_vp->v_fifoinfo->fi_writesock;
struct proc *p = ap->a_uio->uio_procp;
int error;
#ifdef DIAGNOSTIC
@ -274,9 +284,9 @@ fifo_write(ap)
#endif
if (ap->a_ioflag & IO_NDELAY)
wso->so_state |= SS_NBIO;
VOP_UNLOCK(ap->a_vp);
VOP_UNLOCK(ap->a_vp, 0, p);
error = sosend(wso, (struct mbuf *)0, ap->a_uio, 0, (struct mbuf *)0, 0);
VOP_LOCK(ap->a_vp);
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p);
if (ap->a_ioflag & IO_NDELAY)
wso->so_state &= ~SS_NBIO;
return (error);
@ -297,14 +307,23 @@ fifo_ioctl(ap)
} */ *ap;
{
struct file filetmp;
int error;
if (ap->a_command == FIONBIO)
return (0);
if (ap->a_fflag & FREAD)
if (ap->a_fflag & FREAD) {
filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock;
else
error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_p);
if (error)
return (error);
}
if (ap->a_fflag & FWRITE) {
filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock;
return (soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_p));
error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_p);
if (error)
return (error);
}
return (0);
}
/* ARGSUSED */
@ -318,12 +337,33 @@ fifo_select(ap)
} */ *ap;
{
struct file filetmp;
int ready;
if (ap->a_fflags & FREAD)
if (ap->a_fflags & FREAD) {
filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock;
else
ready = soo_select(&filetmp, ap->a_which, ap->a_p);
if (ready)
return (ready);
}
if (ap->a_fflags & FWRITE) {
filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock;
return (soo_select(&filetmp, ap->a_which, ap->a_p));
ready = soo_select(&filetmp, ap->a_which, ap->a_p);
if (ready)
return (ready);
}
return (0);
}
int
fifo_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
return (0);
}
/*
@ -335,6 +375,7 @@ fifo_bmap(ap)
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
@ -342,29 +383,8 @@ fifo_bmap(ap)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
return (0);
}
/*
* At the moment we do not do any locking.
*/
/* ARGSUSED */
fifo_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
} */ *ap;
{
return (0);
}
/* ARGSUSED */
fifo_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
} */ *ap;
{
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
@ -384,14 +404,15 @@ fifo_close(ap)
register struct fifoinfo *fip = vp->v_fifoinfo;
int error1, error2;
if (ap->a_fflag & FREAD) {
fip->fi_readers--;
if (fip->fi_readers == 0)
socantsendmore(fip->fi_writesock);
}
if (ap->a_fflag & FWRITE) {
fip->fi_writers--;
if (fip->fi_writers == 0)
socantrcvmore(fip->fi_readsock);
} else {
fip->fi_readers--;
if (fip->fi_readers == 0)
socantsendmore(fip->fi_writesock);
}
if (vp->v_usecount > 1)
return (0);

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kernfs.h 8.4 (Berkeley) 1/21/94
* @(#)kernfs.h 8.6 (Berkeley) 3/29/95
*/
#define _PATH_KERNFS "/kern" /* Default mountpoint */
@ -50,7 +50,18 @@ struct kernfs_node {
#define VFSTOKERNFS(mp) ((struct kernfs_mount *)((mp)->mnt_data))
#define VTOKERN(vp) ((struct kernfs_node *)(vp)->v_data)
#define kernfs_fhtovp ((int (*) __P((struct mount *, struct fid *, \
struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp)
#define kernfs_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \
struct proc *)))eopnotsupp)
#define kernfs_sync ((int (*) __P((struct mount *, int, struct ucred *, \
struct proc *)))nullop)
#define kernfs_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
#define kernfs_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \
eopnotsupp)
#define kernfs_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp)
extern int (**kernfs_vnodeop_p)();
extern struct vfsops kernfs_vfsops;
extern struct vnode *rrootvp;
extern dev_t rrootdev;
#endif /* KERNEL */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1992, 1993
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kernfs_vfsops.c 8.4 (Berkeley) 1/21/94
* @(#)kernfs_vfsops.c 8.10 (Berkeley) 5/14/95
*/
/*
@ -53,60 +53,36 @@
#include <miscfs/specfs/specdev.h>
#include <miscfs/kernfs/kernfs.h>
struct vnode *rrootvp;
dev_t rrootdev = NODEV;
/*
* Create a vnode for a character device.
*/
int
cdevvp(dev, vpp)
dev_t dev;
struct vnode **vpp;
kernfs_init(vfsp)
struct vfsconf *vfsp;
{
register struct vnode *vp;
struct vnode *nvp;
int error;
if (dev == NODEV)
return (0);
error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
if (error) {
*vpp = 0;
return (error);
}
vp = nvp;
vp->v_type = VCHR;
if (nvp = checkalias(vp, dev, (struct mount *)0)) {
vput(vp);
vp = nvp;
}
*vpp = vp;
return (0);
}
kernfs_init()
void
kernfs_get_rrootdev()
{
static int tried = 0;
int cmaj;
int bmaj = major(rootdev);
int error = ENXIO;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_init\n"); /* printed during system boot */
#endif
if (tried) {
/* Already did it once. */
return;
}
tried = 1;
if (rootdev == NODEV)
return;
for (cmaj = 0; cmaj < nchrdev; cmaj++) {
if (cdevsw[cmaj].d_open == bdevsw[bmaj].d_open) {
dev_t cdev = makedev(cmaj, minor(rootdev));
error = cdevvp(cdev, &rrootvp);
if (error == 0)
break;
}
}
if (error) {
printf("kernfs: no raw boot device\n");
rrootvp = 0;
rrootdev = makedev(cmaj, minor(rootdev));
if (chrtoblk(rrootdev) == rootdev)
return;
}
rrootdev = NODEV;
printf("kernfs_get_rrootdev: no raw root device\n");
}
/*
@ -148,7 +124,7 @@ kernfs_mount(mp, path, data, ndp, p)
fmp->kf_root = rvp;
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) fmp;
getnewfsid(mp, MOUNT_KERNFS);
vfs_getnewfsid(mp);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@ -157,6 +133,8 @@ kernfs_mount(mp, path, data, ndp, p)
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_mount: at %s\n", mp->mnt_stat.f_mntonname);
#endif
kernfs_get_rrootdev();
return (0);
}
@ -175,19 +153,14 @@ kernfs_unmount(mp, mntflags, p)
{
int error;
int flags = 0;
extern int doforce;
struct vnode *rootvp = VFSTOKERNFS(mp)->kf_root;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_unmount(mp = %x)\n", mp);
#endif
if (mntflags & MNT_FORCE) {
/* kernfs can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
/*
* Clear out buffer cache. I don't think we
@ -225,6 +198,7 @@ kernfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct vnode *vp;
#ifdef KERNFS_DIAGNOSTIC
@ -236,21 +210,11 @@ kernfs_root(mp, vpp)
*/
vp = VFSTOKERNFS(mp)->kf_root;
VREF(vp);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
*vpp = vp;
return (0);
}
kernfs_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return (EOPNOTSUPP);
}
kernfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
@ -260,7 +224,6 @@ kernfs_statfs(mp, sbp, p)
printf("kernfs_statfs(mp = %x)\n", mp);
#endif
sbp->f_type = MOUNT_KERNFS;
sbp->f_flags = 0;
sbp->f_bsize = DEV_BSIZE;
sbp->f_iosize = DEV_BSIZE;
@ -270,6 +233,7 @@ kernfs_statfs(mp, sbp, p)
sbp->f_files = 0;
sbp->f_ffree = 0;
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
@ -277,43 +241,6 @@ kernfs_statfs(mp, sbp, p)
return (0);
}
kernfs_sync(mp, waitfor)
struct mount *mp;
int waitfor;
{
return (0);
}
/*
* Kernfs flat namespace lookup.
* Currently unsupported.
*/
kernfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
kernfs_fhtovp(mp, fhp, setgen, vpp)
struct mount *mp;
struct fid *fhp;
int setgen;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
kernfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (EOPNOTSUPP);
}
struct vfsops kernfs_vfsops = {
kernfs_mount,
kernfs_start,
@ -326,4 +253,5 @@ struct vfsops kernfs_vfsops = {
kernfs_fhtovp,
kernfs_vptofh,
kernfs_init,
kernfs_sysctl,
};

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kernfs_vnops.c 8.6 (Berkeley) 2/10/94
* @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95
*/
/*
@ -65,38 +65,42 @@
#define DIR_MODE (S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
struct kern_target {
u_char kt_type;
u_char kt_namlen;
char *kt_name;
void *kt_data;
#define KTT_NULL 1
#define KTT_TIME 5
#define KTT_INT 17
#define KTT_STRING 31
#define KTT_HOSTNAME 47
#define KTT_AVENRUN 53
int kt_tag;
int kt_rw;
int kt_vtype;
#define KTT_NULL 1
#define KTT_TIME 5
#define KTT_INT 17
#define KTT_STRING 31
#define KTT_HOSTNAME 47
#define KTT_AVENRUN 53
#define KTT_DEVICE 71
u_char kt_tag;
u_char kt_vtype;
mode_t kt_mode;
} kern_targets[] = {
/* NOTE: The name must be less than UIO_MX-16 chars in length */
/* name data tag ro/rw */
{ ".", 0, KTT_NULL, VREAD, VDIR },
{ "..", 0, KTT_NULL, VREAD, VDIR },
{ "boottime", &boottime.tv_sec, KTT_INT, VREAD, VREG },
{ "copyright", copyright, KTT_STRING, VREAD, VREG },
{ "hostname", 0, KTT_HOSTNAME, VREAD|VWRITE, VREG },
{ "hz", &hz, KTT_INT, VREAD, VREG },
{ "loadavg", 0, KTT_AVENRUN, VREAD, VREG },
{ "pagesize", &cnt.v_page_size, KTT_INT, VREAD, VREG },
{ "physmem", &physmem, KTT_INT, VREAD, VREG },
#define N(s) sizeof(s)-1, s
/* name data tag type ro/rw */
{ DT_DIR, N("."), 0, KTT_NULL, VDIR, DIR_MODE },
{ DT_DIR, N(".."), 0, KTT_NULL, VDIR, DIR_MODE },
{ DT_REG, N("boottime"), &boottime.tv_sec, KTT_INT, VREG, READ_MODE },
{ DT_REG, N("copyright"), copyright, KTT_STRING, VREG, READ_MODE },
{ DT_REG, N("hostname"), 0, KTT_HOSTNAME, VREG, WRITE_MODE },
{ DT_REG, N("hz"), &hz, KTT_INT, VREG, READ_MODE },
{ DT_REG, N("loadavg"), 0, KTT_AVENRUN, VREG, READ_MODE },
{ DT_REG, N("pagesize"), &cnt.v_page_size, KTT_INT, VREG, READ_MODE },
{ DT_REG, N("physmem"), &physmem, KTT_INT, VREG, READ_MODE },
#if 0
{ "root", 0, KTT_NULL, VREAD, VDIR },
{ DT_DIR, N("root"), 0, KTT_NULL, VDIR, DIR_MODE },
#endif
{ "rootdev", 0, KTT_NULL, VREAD, VBLK },
{ "rrootdev", 0, KTT_NULL, VREAD, VCHR },
{ "time", 0, KTT_TIME, VREAD, VREG },
{ "version", version, KTT_STRING, VREAD, VREG },
{ DT_BLK, N("rootdev"), &rootdev, KTT_DEVICE, VBLK, READ_MODE },
{ DT_CHR, N("rrootdev"), &rrootdev, KTT_DEVICE, VCHR, READ_MODE },
{ DT_REG, N("time"), 0, KTT_TIME, VREG, READ_MODE },
{ DT_REG, N("version"), version, KTT_STRING, VREG, READ_MODE },
#undef N
};
static int nkern_targets = sizeof(kern_targets) / sizeof(kern_targets[0]);
static int
@ -106,6 +110,7 @@ kernfs_xread(kt, buf, len, lenp)
int len;
int *lenp;
{
switch (kt->kt_tag) {
case KTT_TIME: {
struct timeval tv;
@ -146,14 +151,12 @@ kernfs_xread(kt, buf, len, lenp)
case KTT_AVENRUN:
sprintf(buf, "%ld %ld %ld %ld\n",
averunnable.ldavg[0],
averunnable.ldavg[1],
averunnable.ldavg[2],
averunnable.fscale);
averunnable.ldavg[0], averunnable.ldavg[1],
averunnable.ldavg[2], averunnable.fscale);
break;
default:
return (EINVAL);
return (EIO);
}
*lenp = strlen(buf);
@ -166,15 +169,15 @@ kernfs_xwrite(kt, buf, len)
char *buf;
int len;
{
switch (kt->kt_tag) {
case KTT_HOSTNAME: {
case KTT_HOSTNAME:
if (buf[len-1] == '\n')
--len;
bcopy(buf, hostname, len);
hostname[len] = '\0';
hostnamelen = len;
return (0);
}
default:
return (EIO);
@ -193,25 +196,31 @@ kernfs_lookup(ap)
struct componentname * a_cnp;
} */ *ap;
{
struct componentname *cnp = ap->a_cnp;
struct vnode **vpp = ap->a_vpp;
struct vnode *dvp = ap->a_dvp;
struct componentname *cnp = ap->a_cnp;
char *pname = cnp->cn_nameptr;
struct proc *p = cnp->cn_proc;
struct kern_target *kt;
struct vnode *fvp;
int error, i;
char *pname;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_lookup(%x)\n", ap);
printf("kernfs_lookup(dp = %x, vpp = %x, cnp = %x)\n", dvp, vpp, ap->a_cnp);
#endif
pname = cnp->cn_nameptr;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_lookup(%s)\n", pname);
#endif
*vpp = NULLVP;
if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)
return (EROFS);
VOP_UNLOCK(dvp, 0, p);
if (cnp->cn_namelen == 1 && *pname == '.') {
*vpp = dvp;
VREF(dvp);
/*VOP_LOCK(dvp);*/
vn_lock(dvp, LK_SHARED | LK_RETRY, p);
return (0);
}
@ -219,74 +228,58 @@ kernfs_lookup(ap)
if (cnp->cn_namelen == 4 && bcmp(pname, "root", 4) == 0) {
*vpp = rootdir;
VREF(rootdir);
VOP_LOCK(rootdir);
vn_lock(rootdir, LK_SHARED | LK_RETRY, p)
return (0);
}
#endif
/*
* /kern/rootdev is the root device
*/
if (cnp->cn_namelen == 7 && bcmp(pname, "rootdev", 7) == 0) {
*vpp = rootvp;
VREF(rootvp);
VOP_LOCK(rootvp);
return (0);
}
/*
* /kern/rrootdev is the raw root device
*/
if (cnp->cn_namelen == 8 && bcmp(pname, "rrootdev", 8) == 0) {
if (rrootvp) {
*vpp = rrootvp;
VREF(rrootvp);
VOP_LOCK(rrootvp);
return (0);
}
error = ENXIO;
goto bad;
}
error = ENOENT;
for (i = 0; i < nkern_targets; i++) {
struct kern_target *kt = &kern_targets[i];
if (cnp->cn_namelen == strlen(kt->kt_name) &&
bcmp(kt->kt_name, pname, cnp->cn_namelen) == 0) {
error = 0;
break;
}
for (kt = kern_targets, i = 0; i < nkern_targets; kt++, i++) {
if (cnp->cn_namelen == kt->kt_namlen &&
bcmp(kt->kt_name, pname, cnp->cn_namelen) == 0)
goto found;
}
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_lookup: i = %d, error = %d\n", i, error);
printf("kernfs_lookup: i = %d, failed", i);
#endif
if (error)
goto bad;
vn_lock(dvp, LK_SHARED | LK_RETRY, p);
return (cnp->cn_nameiop == LOOKUP ? ENOENT : EROFS);
found:
if (kt->kt_tag == KTT_DEVICE) {
dev_t *dp = kt->kt_data;
loop:
if (*dp == NODEV || !vfinddev(*dp, kt->kt_vtype, &fvp)) {
vn_lock(dvp, LK_SHARED | LK_RETRY, p);
return (ENOENT);
}
*vpp = fvp;
if (vget(fvp, LK_EXCLUSIVE, p))
goto loop;
return (0);
}
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_lookup: allocate new vnode\n");
#endif
error = getnewvnode(VT_KERNFS, dvp->v_mount, kernfs_vnodeop_p, &fvp);
if (error)
goto bad;
MALLOC(fvp->v_data, void *, sizeof(struct kernfs_node), M_TEMP, M_WAITOK);
VTOKERN(fvp)->kf_kt = &kern_targets[i];
fvp->v_type = VTOKERN(fvp)->kf_kt->kt_vtype;
if (error = getnewvnode(VT_KERNFS, dvp->v_mount, kernfs_vnodeop_p,
&fvp)) {
vn_lock(dvp, LK_SHARED | LK_RETRY, p);
return (error);
}
MALLOC(fvp->v_data, void *, sizeof(struct kernfs_node), M_TEMP,
M_WAITOK);
VTOKERN(fvp)->kf_kt = kt;
fvp->v_type = kt->kt_vtype;
vn_lock(fvp, LK_SHARED | LK_RETRY, p);
*vpp = fvp;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_lookup: newvp = %x\n", fvp);
#endif
return (0);
bad:;
*vpp = NULL;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_lookup: error = %d\n", error);
#endif
return (error);
}
kernfs_open(ap)
@ -297,22 +290,8 @@ kernfs_open(ap)
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
/*
* Can always open the root (modulo perms)
*/
if (vp->v_flag & VROOT)
return (0);
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_open, mode = %x, file = %s\n",
ap->a_mode, VTOKERN(vp)->kf_kt->kt_name);
#endif
if ((ap->a_mode & FWRITE) && !(VTOKERN(vp)->kf_kt->kt_rw & VWRITE))
return (EOPNOTSUPP);
/* Only need to check access permissions. */
return (0);
}
@ -325,33 +304,45 @@ kernfs_access(ap)
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct ucred *cred = ap->a_cred;
mode_t mode = ap->a_mode;
register struct vnode *vp = ap->a_vp;
register struct ucred *cred = ap->a_cred;
mode_t amode = ap->a_mode;
mode_t fmode =
(vp->v_flag & VROOT) ? DIR_MODE : VTOKERN(vp)->kf_kt->kt_mode;
mode_t mask = 0;
register gid_t *gp;
int i;
if (mode & VEXEC) {
if (vp->v_flag & VROOT)
return (0);
return (EACCES);
}
/* Some files are simply not modifiable. */
if ((amode & VWRITE) && (fmode & (S_IWUSR|S_IWGRP|S_IWOTH)) == 0)
return (EPERM);
if (cred->cr_uid == 0) {
if ((vp->v_flag & VROOT) == 0) {
struct kern_target *kt = VTOKERN(vp)->kf_kt;
if ((mode & VWRITE) && !(kt->kt_rw & VWRITE))
return (EROFS);
}
/* Root can do anything else. */
if (cred->cr_uid == 0)
return (0);
}
if (mode & VWRITE)
return (EACCES);
/* Check for group 0 (wheel) permissions. */
for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++)
if (*gp == 0) {
if (amode & VEXEC)
mask |= S_IXGRP;
if (amode & VREAD)
mask |= S_IRGRP;
if (amode & VWRITE)
mask |= S_IWGRP;
return ((fmode & mask) == mask ? 0 : EACCES);
}
return (0);
/* Otherwise, check everyone else. */
if (amode & VEXEC)
mask |= S_IXOTH;
if (amode & VREAD)
mask |= S_IROTH;
if (amode & VWRITE)
mask |= S_IWOTH;
return ((fmode & mask) == mask ? 0 : EACCES);
}
kernfs_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
@ -362,6 +353,7 @@ kernfs_getattr(ap)
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
struct timeval tv;
int error = 0;
char strbuf[KSTRING];
@ -370,15 +362,15 @@ kernfs_getattr(ap)
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
/* vap->va_qsize = 0; */
vap->va_size = 0;
vap->va_blocksize = DEV_BSIZE;
microtime(&vap->va_atime);
microtime(&tv);
TIMEVAL_TO_TIMESPEC(&tv, &vap->va_atime);
vap->va_mtime = vap->va_atime;
vap->va_ctime = vap->va_ctime;
vap->va_gen = 0;
vap->va_flags = 0;
vap->va_rdev = 0;
/* vap->va_qbytes = 0; */
vap->va_bytes = 0;
if (vp->v_flag & VROOT) {
@ -397,14 +389,13 @@ kernfs_getattr(ap)
printf("kernfs_getattr: stat target %s\n", kt->kt_name);
#endif
vap->va_type = kt->kt_vtype;
vap->va_mode = (kt->kt_rw & VWRITE ? WRITE_MODE : READ_MODE);
vap->va_mode = kt->kt_mode;
vap->va_nlink = 1;
vap->va_fileid = 3 + (kt - kern_targets) / sizeof(*kt);
vap->va_fileid = 1 + (kt - kern_targets) / sizeof(*kt);
error = kernfs_xread(kt, strbuf, sizeof(strbuf), &nbytes);
vap->va_size = nbytes;
}
vp->v_type = vap->va_type;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_getattr: return error %d\n", error);
#endif
@ -446,7 +437,7 @@ kernfs_read(ap)
int error, len;
char *cp;
if (vp->v_flag & VROOT)
if (vp->v_type == VDIR)
return (EOPNOTSUPP);
kt = VTOKERN(vp)->kf_kt;
@ -456,12 +447,11 @@ kernfs_read(ap)
#endif
len = 0;
error = kernfs_xread(kt, strbuf, sizeof(strbuf), &len);
if (error)
if (error = kernfs_xread(kt, strbuf, sizeof(strbuf), &len))
return (error);
cp = strbuf + off;
len -= off;
return (uiomove(cp, len, uio));
if (len <= off)
return (0);
return (uiomove(&strbuf[off], len - off, uio));
}
static int
@ -479,8 +469,8 @@ kernfs_write(ap)
int error, xlen;
char strbuf[KSTRING];
if (vp->v_flag & VROOT)
return (0);
if (vp->v_type == VDIR)
return (EOPNOTSUPP);
kt = VTOKERN(vp)->kf_kt;
@ -488,8 +478,7 @@ kernfs_write(ap)
return (EINVAL);
xlen = min(uio->uio_resid, KSTRING-1);
error = uiomove(strbuf, xlen, uio);
if (error)
if (error = uiomove(strbuf, xlen, uio))
return (error);
if (uio->uio_resid != 0)
@ -500,32 +489,51 @@ kernfs_write(ap)
return (kernfs_xwrite(kt, strbuf, xlen));
}
kernfs_readdir(ap)
struct vop_readdir_args /* {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
u_long *a_cookies;
int a_ncookies;
} */ *ap;
{
int error, i;
struct uio *uio = ap->a_uio;
int i;
int error;
struct kern_target *kt;
struct dirent d;
if (ap->a_vp->v_type != VDIR)
return (ENOTDIR);
/*
* We don't allow exporting kernfs mounts, and currently local
* requests do not need cookies.
*/
if (ap->a_ncookies != NULL)
panic("kernfs_readdir: not hungry");
i = uio->uio_offset / UIO_MX;
error = 0;
while (uio->uio_resid > 0 && i < nkern_targets) {
struct dirent d;
for (kt = &kern_targets[i];
uio->uio_resid >= UIO_MX && i < nkern_targets; kt++, i++) {
struct dirent *dp = &d;
struct kern_target *kt = &kern_targets[i];
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_readdir: i = %d\n", i);
#endif
bzero((caddr_t) dp, UIO_MX);
if (kt->kt_tag == KTT_DEVICE) {
dev_t *dp = kt->kt_data;
struct vnode *fvp;
dp->d_namlen = strlen(kt->kt_name);
bcopy(kt->kt_name, dp->d_name, dp->d_namlen+1);
if (*dp == NODEV || !vfinddev(*dp, kt->kt_vtype, &fvp))
continue;
}
bzero((caddr_t)dp, UIO_MX);
dp->d_namlen = kt->kt_namlen;
bcopy(kt->kt_name, dp->d_name, kt->kt_namlen+1);
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_readdir: name = %s, len = %d\n",
@ -536,14 +544,12 @@ kernfs_readdir(ap)
*/
dp->d_reclen = UIO_MX;
dp->d_fileno = i + 3;
dp->d_type = DT_UNKNOWN; /* XXX */
dp->d_type = kt->kt_type;
/*
* And ship to userland
*/
error = uiomove((caddr_t) dp, UIO_MX, uio);
if (error)
if (error = uiomove((caddr_t)dp, UIO_MX, uio))
break;
i++;
}
uio->uio_offset = i * UIO_MX;
@ -554,18 +560,20 @@ kernfs_readdir(ap)
kernfs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_inactive(%x)\n", vp);
#endif
/*
* Clear out the v_type field to avoid
* nasty things happening in vgone().
*/
VOP_UNLOCK(vp, 0, ap->a_p);
vp->v_type = VNON;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_inactive(%x)\n", vp);
#endif
return (0);
}
@ -575,6 +583,7 @@ kernfs_reclaim(ap)
} */ *ap;
{
struct vnode *vp = ap->a_vp;
#ifdef KERNFS_DIAGNOSTIC
printf("kernfs_reclaim(%x)\n", vp);
#endif
@ -647,15 +656,6 @@ kernfs_vfree(ap)
return (0);
}
/*
* /dev/fd vnode unsupported operation
*/
kernfs_enotsupp()
{
return (EOPNOTSUPP);
}
/*
* /dev/fd "should never get here" operation
*/
@ -675,40 +675,40 @@ kernfs_nullop()
return (0);
}
#define kernfs_create ((int (*) __P((struct vop_create_args *)))kernfs_enotsupp)
#define kernfs_mknod ((int (*) __P((struct vop_mknod_args *)))kernfs_enotsupp)
#define kernfs_create ((int (*) __P((struct vop_create_args *)))eopnotsupp)
#define kernfs_mknod ((int (*) __P((struct vop_mknod_args *)))eopnotsupp)
#define kernfs_close ((int (*) __P((struct vop_close_args *)))nullop)
#define kernfs_ioctl ((int (*) __P((struct vop_ioctl_args *)))kernfs_enotsupp)
#define kernfs_select ((int (*) __P((struct vop_select_args *)))kernfs_enotsupp)
#define kernfs_mmap ((int (*) __P((struct vop_mmap_args *)))kernfs_enotsupp)
#define kernfs_ioctl ((int (*) __P((struct vop_ioctl_args *)))eopnotsupp)
#define kernfs_select ((int (*) __P((struct vop_select_args *)))eopnotsupp)
#define kernfs_revoke vop_revoke
#define kernfs_mmap ((int (*) __P((struct vop_mmap_args *)))eopnotsupp)
#define kernfs_fsync ((int (*) __P((struct vop_fsync_args *)))nullop)
#define kernfs_seek ((int (*) __P((struct vop_seek_args *)))nullop)
#define kernfs_remove ((int (*) __P((struct vop_remove_args *)))kernfs_enotsupp)
#define kernfs_link ((int (*) __P((struct vop_link_args *)))kernfs_enotsupp)
#define kernfs_rename ((int (*) __P((struct vop_rename_args *)))kernfs_enotsupp)
#define kernfs_mkdir ((int (*) __P((struct vop_mkdir_args *)))kernfs_enotsupp)
#define kernfs_rmdir ((int (*) __P((struct vop_rmdir_args *)))kernfs_enotsupp)
#define kernfs_symlink ((int (*) __P((struct vop_symlink_args *)))kernfs_enotsupp)
#define kernfs_readlink \
((int (*) __P((struct vop_readlink_args *)))kernfs_enotsupp)
#define kernfs_remove ((int (*) __P((struct vop_remove_args *)))eopnotsupp)
#define kernfs_link ((int (*) __P((struct vop_link_args *)))eopnotsupp)
#define kernfs_rename ((int (*) __P((struct vop_rename_args *)))eopnotsupp)
#define kernfs_mkdir ((int (*) __P((struct vop_mkdir_args *)))eopnotsupp)
#define kernfs_rmdir ((int (*) __P((struct vop_rmdir_args *)))eopnotsupp)
#define kernfs_symlink ((int (*) __P((struct vop_symlink_args *)))eopnotsupp)
#define kernfs_readlink ((int (*) __P((struct vop_readlink_args *)))eopnotsupp)
#define kernfs_abortop ((int (*) __P((struct vop_abortop_args *)))nullop)
#define kernfs_lock ((int (*) __P((struct vop_lock_args *)))nullop)
#define kernfs_unlock ((int (*) __P((struct vop_unlock_args *)))nullop)
#define kernfs_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define kernfs_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
#define kernfs_bmap ((int (*) __P((struct vop_bmap_args *)))kernfs_badop)
#define kernfs_strategy ((int (*) __P((struct vop_strategy_args *)))kernfs_badop)
#define kernfs_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define kernfs_advlock ((int (*) __P((struct vop_advlock_args *)))kernfs_enotsupp)
#define kernfs_blkatoff \
((int (*) __P((struct vop_blkatoff_args *)))kernfs_enotsupp)
#define kernfs_strategy \
((int (*) __P((struct vop_strategy_args *)))kernfs_badop)
#define kernfs_islocked \
((int (*) __P((struct vop_islocked_args *)))vop_noislocked)
#define kernfs_advlock ((int (*) __P((struct vop_advlock_args *)))eopnotsupp)
#define kernfs_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))eopnotsupp)
#define kernfs_valloc ((int(*) __P(( \
struct vnode *pvp, \
int mode, \
struct ucred *cred, \
struct vnode **vpp))) kernfs_enotsupp)
#define kernfs_truncate \
((int (*) __P((struct vop_truncate_args *)))kernfs_enotsupp)
#define kernfs_update ((int (*) __P((struct vop_update_args *)))kernfs_enotsupp)
#define kernfs_bwrite ((int (*) __P((struct vop_bwrite_args *)))kernfs_enotsupp)
struct vnode **vpp))) eopnotsupp)
#define kernfs_truncate ((int (*) __P((struct vop_truncate_args *)))eopnotsupp)
#define kernfs_update ((int (*) __P((struct vop_update_args *)))eopnotsupp)
#define kernfs_bwrite ((int (*) __P((struct vop_bwrite_args *)))eopnotsupp)
int (**kernfs_vnodeop_p)();
struct vnodeopv_entry_desc kernfs_vnodeop_entries[] = {
@ -725,6 +725,7 @@ struct vnodeopv_entry_desc kernfs_vnodeop_entries[] = {
{ &vop_write_desc, kernfs_write }, /* write */
{ &vop_ioctl_desc, kernfs_ioctl }, /* ioctl */
{ &vop_select_desc, kernfs_select }, /* select */
{ &vop_revoke_desc, kernfs_revoke }, /* revoke */
{ &vop_mmap_desc, kernfs_mmap }, /* mmap */
{ &vop_fsync_desc, kernfs_fsync }, /* fsync */
{ &vop_seek_desc, kernfs_seek }, /* seek */

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)null.h 8.2 (Berkeley) 1/21/94
* @(#)null.h 8.3 (Berkeley) 8/20/94
*
* $Id: lofs.h,v 1.8 1992/05/30 10:05:43 jsp Exp jsp $
*/
@ -52,8 +52,7 @@ struct null_mount {
* A cache of vnode references
*/
struct null_node {
struct null_node *null_forw; /* Hash chain */
struct null_node *null_back;
LIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
};

View File

@ -33,13 +33,14 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)null_subr.c 8.4 (Berkeley) 1/21/94
* @(#)null_subr.c 8.7 (Berkeley) 5/14/95
*
* $Id: lofs_subr.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
@ -50,7 +51,6 @@
#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
#define NNULLNODECACHE 16
#define NULL_NHASH(vp) ((((u_long)vp)>>LOG2_SIZEVNODE) & (NNULLNODECACHE-1))
/*
* Null layer cache:
@ -60,39 +60,21 @@
* alias is removed the lower vnode is vrele'd.
*/
/*
* Cache head
*/
struct null_node_cache {
struct null_node *ac_forw;
struct null_node *ac_back;
};
static struct null_node_cache null_node_cache[NNULLNODECACHE];
#define NULL_NHASH(vp) \
(&null_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & null_node_hash])
LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
u_long null_node_hash;
/*
* Initialise cache headers
*/
nullfs_init()
{
struct null_node_cache *ac;
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_init\n"); /* printed during system boot */
#endif
for (ac = null_node_cache; ac < null_node_cache + NNULLNODECACHE; ac++)
ac->ac_forw = ac->ac_back = (struct null_node *) ac;
}
/*
* Compute hash list for given lower vnode
*/
static struct null_node_cache *
null_node_hash(lowervp)
struct vnode *lowervp;
{
return (&null_node_cache[NULL_NHASH(lowervp)]);
null_node_hashtbl = hashinit(NNULLNODECACHE, M_CACHE, &null_node_hash);
}
/*
@ -103,7 +85,8 @@ null_node_find(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
{
struct null_node_cache *hd;
struct proc *p = curproc; /* XXX */
struct null_node_hashhead *hd;
struct null_node *a;
struct vnode *vp;
@ -113,9 +96,9 @@ null_node_find(mp, lowervp)
* the lower vnode. If found, the increment the null_node
* reference count (but NOT the lower vnode's VREF counter).
*/
hd = null_node_hash(lowervp);
hd = NULL_NHASH(lowervp);
loop:
for (a = hd->ac_forw; a != (struct null_node *) hd; a = a->null_forw) {
for (a = hd->lh_first; a != 0; a = a->null_hash.le_next) {
if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
vp = NULLTOV(a);
/*
@ -123,7 +106,7 @@ null_node_find(mp, lowervp)
* stuff, but we don't want to lock
* the lower node.
*/
if (vget(vp, 0)) {
if (vget(vp, 0, p)) {
printf ("null_node_find: vget failed.\n");
goto loop;
};
@ -146,7 +129,7 @@ null_node_alloc(mp, lowervp, vpp)
struct vnode *lowervp;
struct vnode **vpp;
{
struct null_node_cache *hd;
struct null_node_hashhead *hd;
struct null_node *xp;
struct vnode *othervp, *vp;
int error;
@ -173,8 +156,8 @@ null_node_alloc(mp, lowervp, vpp)
return 0;
};
VREF(lowervp); /* Extra VREF will be vrele'd in null_node_create */
hd = null_node_hash(lowervp);
insque(xp, hd);
hd = NULL_NHASH(lowervp);
LIST_INSERT_HEAD(hd, xp, null_hash);
return 0;
}
@ -227,9 +210,8 @@ null_node_create(mp, lowervp, newvpp)
#ifdef DIAGNOSTIC
if (lowervp->v_usecount < 1) {
/* Should never happen... */
vprint ("null_node_create: alias ");
vprint ("null_node_create: lower ");
printf ("null_node_create: lower has 0 usecount.\n");
vprint ("null_node_create: alias ", aliasvp);
vprint ("null_node_create: lower ", lowervp);
panic ("null_node_create: lower has 0 usecount.");
};
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1992, 1993
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)null_vfsops.c 8.2 (Berkeley) 1/21/94
* @(#)null_vfsops.c 8.7 (Berkeley) 5/14/95
*
* @(#)lofs_vfsops.c 1.2 (Berkeley) 6/18/92
* $Id: lofs_vfsops.c,v 1.9 1992/05/30 10:26:24 jsp Exp jsp $
@ -46,6 +46,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
@ -122,7 +123,7 @@ nullfs_mount(mp, path, data, ndp, p)
/*
* Unlock the node (either the lower or the alias)
*/
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
/*
* Make sure the node alias worked
*/
@ -142,7 +143,7 @@ nullfs_mount(mp, path, data, ndp, p)
if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) xmp;
getnewfsid(mp, MOUNT_LOFS);
vfs_getnewfsid(mp);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@ -183,18 +184,13 @@ nullfs_unmount(mp, mntflags, p)
struct vnode *nullm_rootvp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
int error;
int flags = 0;
extern int doforce;
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_unmount(mp = %x)\n", mp);
#endif
if (mntflags & MNT_FORCE) {
/* lofs can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
/*
* Clear out buffer cache. I don't think we
@ -235,6 +231,7 @@ nullfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct vnode *vp;
#ifdef NULLFS_DIAGNOSTIC
@ -249,7 +246,7 @@ nullfs_root(mp, vpp)
*/
vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
VREF(vp);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
*vpp = vp;
return 0;
}
@ -349,7 +346,10 @@ nullfs_vptofh(vp, fhp)
return VFS_VPTOFH(NULLVPTOLOWERVP(vp), fhp);
}
int nullfs_init __P((void));
int nullfs_init __P((struct vfsconf *));
#define nullfs_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
struct vfsops null_vfsops = {
nullfs_mount,
@ -363,4 +363,5 @@ struct vfsops null_vfsops = {
nullfs_fhtovp,
nullfs_vptofh,
nullfs_init,
nullfs_sysctl,
};

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)null_vnops.c 8.1 (Berkeley) 6/10/93
* @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
*
* Ancestors:
* @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
@ -92,13 +92,21 @@
* in the arguments and, if a vnode is return by the operation,
* stacks a null-node on top of the returned vnode.
*
* Although bypass handles most operations,
* vop_getattr, _inactive, _reclaim, and _print are not bypassed.
* Vop_getattr must change the fsid being returned.
* Although bypass handles most operations, vop_getattr, vop_lock,
* vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
* bypassed. Vop_getattr must change the fsid being returned.
* Vop_lock and vop_unlock must handle any locking for the
* current vnode as well as pass the lock request down.
* Vop_inactive and vop_reclaim are not bypassed so that
* they can handle freeing null-layer specific data.
* Vop_print is not bypassed to avoid excessive debugging
* information.
* they can handle freeing null-layer specific data. Vop_print
* is not bypassed to avoid excessive debugging information.
* Also, certain vnode operations change the locking state within
* the operation (create, mknod, remove, link, rename, mkdir, rmdir,
* and symlink). Ideally these operations should not change the
* lock state, but should be changed to let the caller of the
* function unlock them. Otherwise all intermediate vnode layers
* (such as union, umapfs, etc) must catch these functions to do
* the necessary locking at their layer.
*
*
* INSTANTIATING VNODE STACKS
@ -248,7 +256,8 @@ null_bypass(ap)
* are of our type. Check for and don't map any
* that aren't. (We must always map first vp or vclean fails.)
*/
if (i && (*this_vp_p)->v_op != null_vnodeop_p) {
if (i && (*this_vp_p == NULL ||
(*this_vp_p)->v_op != null_vnodeop_p)) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
@ -311,6 +320,104 @@ null_bypass(ap)
return (error);
}
/*
* We have to carry on the locking protocol on the null layer vnodes
* as we progress through the tree. We also have to enforce read-only
* if this layer is mounted read-only.
*/
null_lookup(ap)
struct vop_lookup_args /* {
struct vnode * a_dvp;
struct vnode ** a_vpp;
struct componentname * a_cnp;
} */ *ap;
{
struct componentname *cnp = ap->a_cnp;
struct proc *p = cnp->cn_proc;
int flags = cnp->cn_flags;
struct vop_lock_args lockargs;
struct vop_unlock_args unlockargs;
struct vnode *dvp, *vp;
int error;
if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
error = null_bypass(ap);
if (error == EJUSTRETURN && (flags & ISLASTCN) &&
(ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
error = EROFS;
/*
* We must do the same locking and unlocking at this layer as
* is done in the layers below us. We could figure this out
* based on the error return and the LASTCN, LOCKPARENT, and
* LOCKLEAF flags. However, it is more expidient to just find
* out the state of the lower level vnodes and set ours to the
* same state.
*/
dvp = ap->a_dvp;
vp = *ap->a_vpp;
if (dvp == vp)
return (error);
if (!VOP_ISLOCKED(dvp)) {
unlockargs.a_vp = dvp;
unlockargs.a_flags = 0;
unlockargs.a_p = p;
vop_nounlock(&unlockargs);
}
if (vp != NULL && VOP_ISLOCKED(vp)) {
lockargs.a_vp = vp;
lockargs.a_flags = LK_SHARED;
lockargs.a_p = p;
vop_nolock(&lockargs);
}
return (error);
}
/*
* Setattr call. Disallow write attempts if the layer is mounted read-only.
*/
int
null_setattr(ap)
struct vop_setattr_args /* {
struct vnodeop_desc *a_desc;
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
vap->va_gid != (gid_t)VNOVAL || vap->va_atime.ts_sec != VNOVAL ||
vap->va_mtime.ts_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
(vp->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
if (vap->va_size != VNOVAL) {
switch (vp->v_type) {
case VDIR:
return (EISDIR);
case VCHR:
case VBLK:
case VSOCK:
case VFIFO:
return (0);
case VREG:
case VLNK:
default:
/*
* Disallow write attempts if the filesystem is
* mounted read-only.
*/
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
}
}
return (null_bypass(ap));
}
/*
* We handle getattr only to change the fsid.
@ -325,6 +432,7 @@ null_getattr(ap)
} */ *ap;
{
int error;
if (error = null_bypass(ap))
return (error);
/* Requires that arguments be restored. */
@ -332,11 +440,82 @@ null_getattr(ap)
return (0);
}
int
null_access(ap)
struct vop_access_args /* {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
mode_t mode = ap->a_mode;
/*
* Disallow write attempts on read-only layers;
* unless the file is a socket, fifo, or a block or
* character device resident on the file system.
*/
if (mode & VWRITE) {
switch (vp->v_type) {
case VDIR:
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
break;
}
}
return (null_bypass(ap));
}
/*
* We need to process our own vnode lock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
null_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
vop_nolock(ap);
if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
return (0);
ap->a_flags &= ~LK_INTERLOCK;
return (null_bypass(ap));
}
/*
* We need to process our own vnode unlock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
null_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vop_nounlock(ap);
ap->a_flags &= ~LK_INTERLOCK;
return (null_bypass(ap));
}
int
null_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
/*
@ -351,6 +530,7 @@ null_inactive(ap)
* like they do in the name lookup cache code.
* That's too much work for now.
*/
VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
return (0);
}
@ -358,6 +538,7 @@ int
null_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@ -370,14 +551,13 @@ null_reclaim(ap)
*/
/* After this assignment, this node will not be re-used. */
xp->null_lowervp = NULL;
remque(xp);
LIST_REMOVE(xp, null_hash);
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
vrele (lowervp);
return (0);
}
int
null_print(ap)
struct vop_print_args /* {
@ -389,7 +569,6 @@ null_print(ap)
return (0);
}
/*
* XXX - vop_strategy must be hand coded because it has no
* vnode in its arguments.
@ -415,7 +594,6 @@ null_strategy(ap)
return (error);
}
/*
* XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
* vnode in its arguments.
@ -448,7 +626,12 @@ int (**null_vnodeop_p)();
struct vnodeopv_entry_desc null_vnodeop_entries[] = {
{ &vop_default_desc, null_bypass },
{ &vop_lookup_desc, null_lookup },
{ &vop_setattr_desc, null_setattr },
{ &vop_getattr_desc, null_getattr },
{ &vop_access_desc, null_access },
{ &vop_lock_desc, null_lock },
{ &vop_unlock_desc, null_unlock },
{ &vop_inactive_desc, null_inactive },
{ &vop_reclaim_desc, null_reclaim },
{ &vop_print_desc, null_print },

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1992, 1993
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)portal_vfsops.c 8.6 (Berkeley) 1/21/94
* @(#)portal_vfsops.c 8.11 (Berkeley) 5/14/95
*
* $Id: portal_vfsops.c,v 1.5 1992/05/30 10:25:27 jsp Exp jsp $
*/
@ -62,7 +62,8 @@
#include <miscfs/portal/portal.h>
int
portal_init()
portal_init(vfsp)
struct vfsconf *vfsp;
{
return (0);
@ -120,7 +121,7 @@ portal_mount(mp, path, data, ndp, p)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) fmp;
getnewfsid(mp, MOUNT_PORTAL);
vfs_getnewfsid(mp);
(void)copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@ -152,17 +153,12 @@ portal_unmount(mp, mntflags, p)
int mntflags;
struct proc *p;
{
extern int doforce;
struct vnode *rootvp = VFSTOPORTAL(mp)->pm_root;
int error, flags = 0;
if (mntflags & MNT_FORCE) {
/* portal can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
/*
* Clear out buffer cache. I don't think we
@ -211,31 +207,19 @@ portal_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct vnode *vp;
/*
* Return locked reference to root.
*/
vp = VFSTOPORTAL(mp)->pm_root;
VREF(vp);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
*vpp = vp;
return (0);
}
int
portal_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return (EOPNOTSUPP);
}
int
portal_statfs(mp, sbp, p)
struct mount *mp;
@ -243,7 +227,6 @@ portal_statfs(mp, sbp, p)
struct proc *p;
{
sbp->f_type = MOUNT_PORTAL;
sbp->f_flags = 0;
sbp->f_bsize = DEV_BSIZE;
sbp->f_iosize = DEV_BSIZE;
@ -253,6 +236,7 @@ portal_statfs(mp, sbp, p)
sbp->f_files = 1; /* Allow for "." */
sbp->f_ffree = 0; /* See comments above */
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
@ -260,43 +244,17 @@ portal_statfs(mp, sbp, p)
return (0);
}
int
portal_sync(mp, waitfor)
struct mount *mp;
int waitfor;
{
return (0);
}
int
portal_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
portal_fhtovp(mp, fhp, vpp)
struct mount *mp;
struct fid *fhp;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
portal_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (EOPNOTSUPP);
}
#define portal_fhtovp ((int (*) __P((struct mount *, struct fid *, \
struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp)
#define portal_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \
struct proc *)))eopnotsupp)
#define portal_sync ((int (*) __P((struct mount *, int, struct ucred *, \
struct proc *)))nullop)
#define portal_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
#define portal_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \
eopnotsupp)
#define portal_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp)
struct vfsops portal_vfsops = {
portal_mount,
@ -310,4 +268,5 @@ struct vfsops portal_vfsops = {
portal_fhtovp,
portal_vptofh,
portal_init,
portal_sysctl,
};

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)portal_vnops.c 8.8 (Berkeley) 1/21/94
* @(#)portal_vnops.c 8.14 (Berkeley) 5/21/95
*
* $Id: portal_vnops.c,v 1.4 1992/05/30 10:05:24 jsp Exp jsp $
*/
@ -97,27 +97,34 @@ portal_lookup(ap)
struct componentname * a_cnp;
} */ *ap;
{
char *pname = ap->a_cnp->cn_nameptr;
struct componentname *cnp = ap->a_cnp;
struct vnode **vpp = ap->a_vpp;
struct vnode *dvp = ap->a_dvp;
char *pname = cnp->cn_nameptr;
struct portalnode *pt;
int error;
struct vnode *fvp = 0;
char *path;
int size;
if (ap->a_cnp->cn_namelen == 1 && *pname == '.') {
*ap->a_vpp = ap->a_dvp;
VREF(ap->a_dvp);
/*VOP_LOCK(ap->a_dvp);*/
*vpp = NULLVP;
if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)
return (EROFS);
if (cnp->cn_namelen == 1 && *pname == '.') {
*vpp = dvp;
VREF(dvp);
/*VOP_LOCK(dvp);*/
return (0);
}
error = getnewvnode(VT_PORTAL, ap->a_dvp->v_mount, portal_vnodeop_p, &fvp);
error = getnewvnode(VT_PORTAL, dvp->v_mount, portal_vnodeop_p, &fvp);
if (error)
goto bad;
fvp->v_type = VREG;
MALLOC(fvp->v_data, void *, sizeof(struct portalnode),
M_TEMP, M_WAITOK);
MALLOC(fvp->v_data, void *, sizeof(struct portalnode), M_TEMP,
M_WAITOK);
pt = VTOPORTAL(fvp);
/*
@ -127,22 +134,20 @@ portal_lookup(ap)
*/
for (size = 0, path = pname; *path; path++)
size++;
ap->a_cnp->cn_consume = size - ap->a_cnp->cn_namelen;
cnp->cn_consume = size - cnp->cn_namelen;
pt->pt_arg = malloc(size+1, M_TEMP, M_WAITOK);
pt->pt_size = size+1;
bcopy(pname, pt->pt_arg, pt->pt_size);
pt->pt_fileid = portal_fileid++;
*ap->a_vpp = fvp;
*vpp = fvp;
/*VOP_LOCK(fvp);*/
return (0);
bad:;
if (fvp) {
if (fvp)
vrele(fvp);
}
*ap->a_vpp = NULL;
return (error);
}
@ -423,6 +428,7 @@ portal_getattr(ap)
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
struct timeval tv;
bzero(vap, sizeof(*vap));
vattr_null(vap);
@ -431,7 +437,8 @@ portal_getattr(ap)
vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
vap->va_size = DEV_BSIZE;
vap->va_blocksize = DEV_BSIZE;
microtime(&vap->va_atime);
microtime(&tv);
TIMEVAL_TO_TIMESPEC(&tv, &vap->va_atime);
vap->va_mtime = vap->va_atime;
vap->va_ctime = vap->va_ctime;
vap->va_gen = 0;
@ -487,9 +494,19 @@ portal_readdir(ap)
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
u_long *a_cookies;
int a_ncookies;
} */ *ap;
{
/*
* We don't allow exporting portal mounts, and currently local
* requests do not need cookies.
*/
if (ap->a_ncookies)
panic("portal_readdir: not hungry");
return (0);
}
@ -497,9 +514,11 @@ int
portal_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
return (0);
}
@ -626,6 +645,7 @@ portal_nullop()
#define portal_ioctl ((int (*) __P((struct vop_ioctl_args *)))portal_enotsupp)
#define portal_select ((int (*) __P((struct vop_select_args *)))portal_enotsupp)
#define portal_mmap ((int (*) __P((struct vop_mmap_args *)))portal_enotsupp)
#define portal_revoke vop_revoke
#define portal_fsync ((int (*) __P((struct vop_fsync_args *)))nullop)
#define portal_seek ((int (*) __P((struct vop_seek_args *)))nullop)
#define portal_remove ((int (*) __P((struct vop_remove_args *)))portal_enotsupp)
@ -638,12 +658,14 @@ portal_nullop()
#define portal_readlink \
((int (*) __P((struct vop_readlink_args *)))portal_enotsupp)
#define portal_abortop ((int (*) __P((struct vop_abortop_args *)))nullop)
#define portal_lock ((int (*) __P((struct vop_lock_args *)))nullop)
#define portal_unlock ((int (*) __P((struct vop_unlock_args *)))nullop)
#define portal_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define portal_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
#define portal_bmap ((int (*) __P((struct vop_bmap_args *)))portal_badop)
#define portal_strategy \
((int (*) __P((struct vop_strategy_args *)))portal_badop)
#define portal_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define portal_islocked \
((int (*) __P((struct vop_islocked_args *)))vop_noislocked)
#define fifo_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
#define portal_advlock \
((int (*) __P((struct vop_advlock_args *)))portal_enotsupp)
#define portal_blkatoff \
@ -674,6 +696,7 @@ struct vnodeopv_entry_desc portal_vnodeop_entries[] = {
{ &vop_ioctl_desc, portal_ioctl }, /* ioctl */
{ &vop_select_desc, portal_select }, /* select */
{ &vop_mmap_desc, portal_mmap }, /* mmap */
{ &vop_revoke_desc, portal_revoke }, /* revoke */
{ &vop_fsync_desc, portal_fsync }, /* fsync */
{ &vop_seek_desc, portal_seek }, /* seek */
{ &vop_remove_desc, portal_remove }, /* remove */

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs.h 8.6 (Berkeley) 2/3/94
* @(#)procfs.h 8.9 (Berkeley) 5/14/95
*
* From:
* $Id: procfs.h,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -45,6 +45,7 @@
*/
typedef enum {
Proot, /* the filesystem root */
Pcurproc, /* symbolic link for curproc */
Pproc, /* a process-specific sub-directory */
Pfile, /* the executable file */
Pmem, /* the process's memory image */
@ -94,9 +95,9 @@ struct pfsdent {
};
#define UIO_MX sizeof(struct pfsdent)
#define PROCFS_FILENO(pid, type) \
(((type) == Proot) ? \
2 : \
((((pid)+1) << 3) + ((int) (type))))
(((type) < Pproc) ? \
((type) + 2) : \
((((pid)+1) << 4) + ((int) (type))))
/*
* Convert between pfsnode vnode
@ -110,29 +111,34 @@ struct vfs_namemap {
int nm_val;
};
extern int vfs_getuserstr __P((struct uio *, char *, int *));
extern vfs_namemap_t *vfs_findname __P((vfs_namemap_t *, char *, int));
int vfs_getuserstr __P((struct uio *, char *, int *));
vfs_namemap_t *vfs_findname __P((vfs_namemap_t *, char *, int));
/* <machine/reg.h> */
struct reg;
struct fpreg;
#define PFIND(pid) ((pid) ? pfind(pid) : &proc0)
extern int procfs_freevp __P((struct vnode *));
extern int procfs_allocvp __P((struct mount *, struct vnode **, long, pfstype));
extern struct vnode *procfs_findtextvp __P((struct proc *));
extern int procfs_sstep __P((struct proc *));
extern void procfs_fix_sstep __P((struct proc *));
extern int procfs_read_regs __P((struct proc *, struct reg *));
extern int procfs_write_regs __P((struct proc *, struct reg *));
extern int procfs_read_fpregs __P((struct proc *, struct fpreg *));
extern int procfs_write_fpregs __P((struct proc *, struct fpreg *));
extern int procfs_donote __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
extern int procfs_doregs __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
extern int procfs_dofpregs __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
extern int procfs_domem __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
extern int procfs_doctl __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
extern int procfs_dostatus __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
int procfs_freevp __P((struct vnode *));
int procfs_allocvp __P((struct mount *, struct vnode **, long, pfstype));
struct vnode *procfs_findtextvp __P((struct proc *));
int procfs_sstep __P((struct proc *, int));
void procfs_fix_sstep __P((struct proc *));
int procfs_read_regs __P((struct proc *, struct reg *));
int procfs_write_regs __P((struct proc *, struct reg *));
int procfs_read_fpregs __P((struct proc *, struct fpreg *));
int procfs_write_fpregs __P((struct proc *, struct fpreg *));
int procfs_donote __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
int procfs_doregs __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
int procfs_dofpregs __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
int procfs_domem __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
int procfs_doctl __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
int procfs_dostatus __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio));
/* functions to check whether or not files should be displayed */
int procfs_validfile __P((struct proc *));
int procfs_validfpregs __P((struct proc *));
int procfs_validregs __P((struct proc *));
#define PROCFS_LOCKED 0x01
#define PROCFS_WANT 0x02
@ -158,6 +164,7 @@ int procfs_setattr __P((struct vop_setattr_args *));
int procfs_ioctl __P((struct vop_ioctl_args *));
#define procfs_select ((int (*) __P((struct vop_select_args *))) procfs_badop)
#define procfs_mmap ((int (*) __P((struct vop_mmap_args *))) procfs_badop)
#define procfs_revoke vop_revoke
#define procfs_fsync ((int (*) __P((struct vop_fsync_args *))) procfs_badop)
#define procfs_seek ((int (*) __P((struct vop_seek_args *))) procfs_badop)
#define procfs_remove ((int (*) __P((struct vop_remove_args *))) procfs_badop)
@ -167,16 +174,17 @@ int procfs_ioctl __P((struct vop_ioctl_args *));
#define procfs_rmdir ((int (*) __P((struct vop_rmdir_args *))) procfs_badop)
#define procfs_symlink ((int (*) __P((struct vop_symlink_args *))) procfs_badop)
int procfs_readdir __P((struct vop_readdir_args *));
#define procfs_readlink ((int (*) __P((struct vop_readlink_args *))) procfs_badop)
int procfs_readlink __P((struct vop_readlink_args *));
int procfs_abortop __P((struct vop_abortop_args *));
int procfs_inactive __P((struct vop_inactive_args *));
int procfs_reclaim __P((struct vop_reclaim_args *));
#define procfs_lock ((int (*) __P((struct vop_lock_args *))) nullop)
#define procfs_unlock ((int (*) __P((struct vop_unlock_args *))) nullop)
#define procfs_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define procfs_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
int procfs_bmap __P((struct vop_bmap_args *));
#define procfs_strategy ((int (*) __P((struct vop_strategy_args *))) procfs_badop)
int procfs_print __P((struct vop_print_args *));
#define procfs_islocked ((int (*) __P((struct vop_islocked_args *))) nullop)
#define procfs_islocked \
((int (*) __P((struct vop_islocked_args *)))vop_noislocked)
#define procfs_advlock ((int (*) __P((struct vop_advlock_args *))) procfs_badop)
#define procfs_blkatoff ((int (*) __P((struct vop_blkatoff_args *))) procfs_badop)
#define procfs_valloc ((int (*) __P((struct vop_valloc_args *))) procfs_badop)

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_ctl.c 8.3 (Berkeley) 1/21/94
* @(#)procfs_ctl.c 8.4 (Berkeley) 6/15/94
*
* From:
* $Id: procfs_ctl.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -50,8 +50,14 @@
#include <sys/tty.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
#include <sys/ptrace.h>
#include <miscfs/procfs/procfs.h>
#ifndef FIX_SSTEP
#define FIX_SSTEP(p)
#endif
/*
* True iff process (p) is in trace wait state
* relative to process (curp)
@ -61,15 +67,6 @@
(p)->p_pptr == (curp) && \
((p)->p_flag & P_TRACED))
#ifdef notdef
#define FIX_SSTEP(p) { \
procfs_fix_sstep(p); \
} \
}
#else
#define FIX_SSTEP(p)
#endif
#define PROCFS_CTL_ATTACH 1
#define PROCFS_CTL_DETACH 2
#define PROCFS_CTL_STEP 3
@ -206,7 +203,8 @@ procfs_control(curp, p, op)
* Step. Let the target process execute a single instruction.
*/
case PROCFS_CTL_STEP:
procfs_sstep(p);
if (error = procfs_sstep(p, 1))
return (error);
break;
/*

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_fpregs.c 8.1 (Berkeley) 1/27/94
* @(#)procfs_fpregs.c 8.2 (Berkeley) 6/15/94
*
* From:
* $Id: procfs_regs.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -85,3 +85,11 @@ procfs_dofpregs(curp, p, pfs, uio)
uio->uio_offset = 0;
return (error);
}
int
procfs_validfpregs(p)
struct proc *p;
{
return ((p->p_flag & P_SYSTEM) == 0);
}

View File

@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_mem.c 8.4 (Berkeley) 1/21/94
* @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
*
* From:
* $Id: procfs_mem.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -206,14 +206,11 @@ procfs_domem(curp, p, pfs, uio)
struct pfsnode *pfs;
struct uio *uio;
{
int error;
if (uio->uio_resid == 0)
return (0);
error = procfs_rwmem(p, uio);
return (error);
return (procfs_rwmem(p, uio));
}
/*
@ -231,6 +228,7 @@ struct vnode *
procfs_findtextvp(p)
struct proc *p;
{
return (p->p_textvp);
}

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_regs.c 8.3 (Berkeley) 1/27/94
* @(#)procfs_regs.c 8.4 (Berkeley) 6/15/94
*
* From:
* $Id: procfs_regs.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -85,3 +85,11 @@ procfs_doregs(curp, p, pfs, uio)
uio->uio_offset = 0;
return (error);
}
int
procfs_validregs(p)
struct proc *p;
{
return ((p->p_flag & P_SYSTEM) == 0);
}

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_status.c 8.3 (Berkeley) 2/17/94
* @(#)procfs_status.c 8.4 (Berkeley) 6/15/94
*
* From:
* $Id: procfs_status.c,v 3.1 1993/12/15 09:40:17 jsp Exp $
@ -127,7 +127,7 @@ procfs_dostatus(curp, p, pfs, uio)
cr = p->p_ucred;
ps += sprintf(ps, " %d", cr->cr_uid, cr->cr_gid);
ps += sprintf(ps, " %d", cr->cr_uid);
for (i = 0; i < cr->cr_ngroups; i++)
ps += sprintf(ps, ",%d", cr->cr_groups[i]);
ps += sprintf(ps, "\n");
@ -135,7 +135,7 @@ procfs_dostatus(curp, p, pfs, uio)
xlen = ps - psbuf;
xlen -= uio->uio_offset;
ps = psbuf + uio->uio_offset;
xlen = min(xlen, uio->uio_resid);
xlen = imin(xlen, uio->uio_resid);
if (xlen <= 0)
error = 0;
else

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_subr.c 8.4 (Berkeley) 1/27/94
* @(#)procfs_subr.c 8.6 (Berkeley) 5/14/95
*
* From:
* $Id: procfs_subr.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -85,18 +85,21 @@ procfs_allocvp(mp, vpp, pid, pfs_type)
long pid;
pfstype pfs_type;
{
int error;
struct proc *p = curproc; /* XXX */
struct pfsnode *pfs;
struct vnode *vp;
struct pfsnode **pp;
int error;
loop:
for (pfs = pfshead; pfs != 0; pfs = pfs->pfs_next) {
vp = PFSTOV(pfs);
if (pfs->pfs_pid == pid &&
pfs->pfs_type == pfs_type &&
PFSTOV(pfs)->v_mount == mp) {
if (vget(pfs->pfs_vnode, 0))
vp->v_mount == mp) {
if (vget(vp, 0, p))
goto loop;
*vpp = pfs->pfs_vnode;
*vpp = vp;
return (0);
}
}
@ -112,18 +115,17 @@ procfs_allocvp(mp, vpp, pid, pfs_type)
}
pfsvplock |= PROCFS_LOCKED;
error = getnewvnode(VT_PROCFS, mp, procfs_vnodeop_p, vpp);
if (error)
if (error = getnewvnode(VT_PROCFS, mp, procfs_vnodeop_p, vpp))
goto out;
vp = *vpp;
MALLOC((*vpp)->v_data, void *, sizeof(struct pfsnode),
M_TEMP, M_WAITOK);
MALLOC(pfs, void *, sizeof(struct pfsnode), M_TEMP, M_WAITOK);
vp->v_data = pfs;
pfs = VTOPFS(*vpp);
pfs->pfs_next = 0;
pfs->pfs_pid = (pid_t) pid;
pfs->pfs_type = pfs_type;
pfs->pfs_vnode = *vpp;
pfs->pfs_vnode = vp;
pfs->pfs_flags = 0;
pfs->pfs_fileno = PROCFS_FILENO(pid, pfs_type);
@ -132,46 +134,44 @@ procfs_allocvp(mp, vpp, pid, pfs_type)
pfs->pfs_mode = (VREAD|VEXEC) |
(VREAD|VEXEC) >> 3 |
(VREAD|VEXEC) >> 6;
vp->v_type = VDIR;
vp->v_flag = VROOT;
break;
case Pcurproc: /* /proc/curproc = lr--r--r-- */
pfs->pfs_mode = (VREAD) |
(VREAD >> 3) |
(VREAD >> 6);
vp->v_type = VLNK;
break;
case Pproc:
pfs->pfs_mode = (VREAD|VEXEC) |
(VREAD|VEXEC) >> 3 |
(VREAD|VEXEC) >> 6;
vp->v_type = VDIR;
break;
case Pfile:
pfs->pfs_mode = (VREAD|VWRITE);
break;
case Pmem:
pfs->pfs_mode = (VREAD|VWRITE);
break;
case Pregs:
pfs->pfs_mode = (VREAD|VWRITE);
break;
case Pfpregs:
pfs->pfs_mode = (VREAD|VWRITE);
vp->v_type = VREG;
break;
case Pctl:
case Pnote:
case Pnotepg:
pfs->pfs_mode = (VWRITE);
vp->v_type = VREG;
break;
case Pstatus:
pfs->pfs_mode = (VREAD) |
(VREAD >> 3) |
(VREAD >> 6);
break;
case Pnote:
pfs->pfs_mode = (VWRITE);
break;
case Pnotepg:
pfs->pfs_mode = (VWRITE);
vp->v_type = VREG;
break;
default:
@ -283,8 +283,7 @@ vfs_getuserstr(uio, buf, buflenp)
return (EMSGSIZE);
xlen = uio->uio_resid;
error = uiomove(buf, xlen, uio);
if (error)
if (error = uiomove(buf, xlen, uio))
return (error);
/* allow multiple writes without seeks */
@ -306,6 +305,7 @@ vfs_findname(nm, buf, buflen)
char *buf;
int buflen;
{
for (; nm->nm_name; nm++)
if (bcmp(buf, (char *) nm->nm_name, buflen+1) == 0)
return (nm);

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_vfsops.c 8.4 (Berkeley) 1/21/94
* @(#)procfs_vfsops.c 8.7 (Berkeley) 5/10/95
*
* From:
* $Id: procfs_vfsops.c,v 3.1 1993/12/15 09:40:17 jsp Exp $
@ -45,6 +45,7 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/proc.h>
@ -81,7 +82,7 @@ procfs_mount(mp, path, data, ndp, p)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = 0;
getnewfsid(mp, MOUNT_PROCFS);
vfs_getnewfsid(mp);
(void) copyinstr(path, (caddr_t)mp->mnt_stat.f_mntonname, MNAMELEN, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@ -102,15 +103,10 @@ procfs_unmount(mp, mntflags, p)
struct proc *p;
{
int error;
extern int doforce;
int flags = 0;
if (mntflags & MNT_FORCE) {
/* procfs can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
if (error = vflush(mp, 0, flags))
return (error);
@ -122,24 +118,10 @@ procfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct pfsnode *pfs;
struct vnode *vp;
int error;
error = procfs_allocvp(mp, &vp, (pid_t) 0, Proot);
if (error)
return (error);
vp->v_type = VDIR;
vp->v_flag = VROOT;
pfs = VTOPFS(vp);
*vpp = vp;
return (0);
return (procfs_allocvp(mp, vpp, 0, Proot));
}
/*
*/
/* ARGSUSED */
procfs_start(mp, flags, p)
struct mount *mp;
@ -158,7 +140,6 @@ procfs_statfs(mp, sbp, p)
struct statfs *sbp;
struct proc *p;
{
sbp->f_type = MOUNT_PROCFS;
sbp->f_bsize = PAGE_SIZE;
sbp->f_iosize = PAGE_SIZE;
sbp->f_blocks = 1; /* avoid divide by zero in some df's */
@ -168,6 +149,7 @@ procfs_statfs(mp, sbp, p)
sbp->f_ffree = maxproc - nprocs; /* approx */
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
@ -176,57 +158,24 @@ procfs_statfs(mp, sbp, p)
return (0);
}
procfs_quotactl(mp, cmds, uid, arg, p)
struct mount *mp;
int cmds;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return (EOPNOTSUPP);
}
procfs_sync(mp, waitfor)
struct mount *mp;
int waitfor;
procfs_init(vfsp)
struct vfsconf *vfsp;
{
return (0);
}
procfs_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
procfs_fhtovp(mp, fhp, vpp)
struct mount *mp;
struct fid *fhp;
struct vnode **vpp;
{
return (EINVAL);
}
procfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return EINVAL;
}
procfs_init()
{
return (0);
}
#define procfs_fhtovp ((int (*) __P((struct mount *, struct fid *, \
struct mbuf *, struct vnode **, int *, struct ucred **)))einval)
#define procfs_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \
struct proc *)))eopnotsupp)
#define procfs_sync ((int (*) __P((struct mount *, int, struct ucred *, \
struct proc *)))nullop)
#define procfs_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
#define procfs_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \
eopnotsupp)
#define procfs_vptofh ((int (*) __P((struct vnode *, struct fid *)))einval)
struct vfsops procfs_vfsops = {
procfs_mount,
@ -240,4 +189,5 @@ struct vfsops procfs_vfsops = {
procfs_fhtovp,
procfs_vptofh,
procfs_init,
procfs_sysctl,
};

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1993 Jan-Simon Pendry
* Copyright (c) 1993
* Copyright (c) 1993, 1995 Jan-Simon Pendry
* Copyright (c) 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)procfs_vnops.c 8.6 (Berkeley) 2/7/94
* @(#)procfs_vnops.c 8.18 (Berkeley) 5/21/95
*
* From:
* $Id: procfs_vnops.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
@ -55,8 +55,9 @@
#include <sys/malloc.h>
#include <sys/dirent.h>
#include <sys/resourcevar.h>
#include <miscfs/procfs/procfs.h>
#include <vm/vm.h> /* for PAGE_SIZE */
#include <machine/reg.h>
#include <miscfs/procfs/procfs.h>
/*
* Vnode Operations.
@ -68,24 +69,28 @@
* process-specific sub-directories. It is
* used in procfs_lookup and procfs_readdir
*/
static struct pfsnames {
u_short d_namlen;
char d_name[PROCFS_NAMELEN];
pfstype d_pfstype;
} procent[] = {
struct proc_target {
u_char pt_type;
u_char pt_namlen;
char *pt_name;
pfstype pt_pfstype;
int (*pt_valid) __P((struct proc *p));
} proc_targets[] = {
#define N(s) sizeof(s)-1, s
/* namlen, nam, type */
{ N("file"), Pfile },
{ N("mem"), Pmem },
{ N("regs"), Pregs },
{ N("fpregs"), Pfpregs },
{ N("ctl"), Pctl },
{ N("status"), Pstatus },
{ N("note"), Pnote },
{ N("notepg"), Pnotepg },
/* name type validp */
{ DT_DIR, N("."), Pproc, NULL },
{ DT_DIR, N(".."), Proot, NULL },
{ DT_REG, N("file"), Pfile, procfs_validfile },
{ DT_REG, N("mem"), Pmem, NULL },
{ DT_REG, N("regs"), Pregs, procfs_validregs },
{ DT_REG, N("fpregs"), Pfpregs, procfs_validfpregs },
{ DT_REG, N("ctl"), Pctl, NULL },
{ DT_REG, N("status"), Pstatus, NULL },
{ DT_REG, N("note"), Pnote, NULL },
{ DT_REG, N("notepg"), Pnotepg, NULL },
#undef N
};
#define Nprocent (sizeof(procent)/sizeof(procent[0]))
static int nproc_targets = sizeof(proc_targets) / sizeof(proc_targets[0]);
static pid_t atopid __P((const char *, u_int));
@ -101,7 +106,12 @@ static pid_t atopid __P((const char *, u_int));
* memory images.
*/
procfs_open(ap)
struct vop_open_args *ap;
struct vop_open_args /* {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
struct pfsnode *pfs = VTOPFS(ap->a_vp);
@ -111,10 +121,9 @@ procfs_open(ap)
return (ENOENT); /* was ESRCH, jsp */
if ((pfs->pfs_flags & FWRITE) && (ap->a_mode & O_EXCL) ||
(pfs->pfs_flags & O_EXCL) && (ap->a_mode & FWRITE))
(pfs->pfs_flags & O_EXCL) && (ap->a_mode & FWRITE))
return (EBUSY);
if (ap->a_mode & FWRITE)
pfs->pfs_flags = ap->a_mode & (FWRITE|O_EXCL);
@ -135,7 +144,12 @@ procfs_open(ap)
* any exclusive open flag (see _open above).
*/
procfs_close(ap)
struct vop_close_args *ap;
struct vop_close_args /* {
struct vnode *a_vp;
int a_fflag;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
struct pfsnode *pfs = VTOPFS(ap->a_vp);
@ -154,7 +168,14 @@ procfs_close(ap)
* (vp) is not locked on entry or exit.
*/
procfs_ioctl(ap)
struct vop_ioctl_args *ap;
struct vop_ioctl_args /* {
struct vnode *a_vp;
int a_command;
caddr_t a_data;
int a_fflag;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
return (ENOTTY);
@ -171,18 +192,26 @@ procfs_ioctl(ap)
* (EIO) would be a reasonable alternative.
*/
procfs_bmap(ap)
struct vop_bmap_args *ap;
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
/*
* _inactive is called when the pfsnode
* procfs_inactive is called when the pfsnode
* is vrele'd and the reference count goes
* to zero. (vp) will be on the vnode free
* list, so to get it back vget() must be
@ -195,15 +224,19 @@ procfs_bmap(ap)
* chances are that the process will still be
* there and PFIND is not free.
*
* (vp) is not locked on entry or exit.
* (vp) is locked on entry, but must be unlocked on exit.
*/
procfs_inactive(ap)
struct vop_inactive_args *ap;
struct vop_inactive_args /* {
struct vnode *a_vp;
} */ *ap;
{
struct pfsnode *pfs = VTOPFS(ap->a_vp);
struct vnode *vp = ap->a_vp;
struct pfsnode *pfs = VTOPFS(vp);
VOP_UNLOCK(vp, 0, ap->a_p);
if (PFIND(pfs->pfs_pid) == 0)
vgone(ap->a_vp);
vgone(vp);
return (0);
}
@ -216,12 +249,12 @@ procfs_inactive(ap)
* from any private lists.
*/
procfs_reclaim(ap)
struct vop_reclaim_args *ap;
struct vop_reclaim_args /* {
struct vnode *a_vp;
} */ *ap;
{
int error;
error = procfs_freevp(ap->a_vp);
return (error);
return (procfs_freevp(ap->a_vp));
}
/*
@ -266,13 +299,14 @@ procfs_pathconf(ap)
* of (vp).
*/
procfs_print(ap)
struct vop_print_args *ap;
struct vop_print_args /* {
struct vnode *a_vp;
} */ *ap;
{
struct pfsnode *pfs = VTOPFS(ap->a_vp);
printf("tag VT_PROCFS, pid %d, mode %x, flags %x\n",
pfs->pfs_pid,
pfs->pfs_mode, pfs->pfs_flags);
printf("tag VT_PROCFS, type %s, pid %d, mode %x, flags %x\n",
pfs->pfs_type, pfs->pfs_pid, pfs->pfs_mode, pfs->pfs_flags);
}
/*
@ -282,7 +316,10 @@ procfs_print(ap)
* this will always include freeing the pathname buffer.
*/
procfs_abortop(ap)
struct vop_abortop_args *ap;
struct vop_abortop_args /* {
struct vnode *a_dvp;
struct componentname *a_cnp;
} */ *ap;
{
if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
@ -309,16 +346,23 @@ procfs_badop()
* this is relatively minimal for procfs.
*/
procfs_getattr(ap)
struct vop_getattr_args *ap;
struct vop_getattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
struct pfsnode *pfs = VTOPFS(ap->a_vp);
struct vattr *vap = ap->a_vap;
struct proc *procp;
struct timeval tv;
int error;
/* first check the process still exists */
switch (pfs->pfs_type) {
case Proot:
case Pcurproc:
procp = 0;
break;
@ -341,22 +385,6 @@ procfs_getattr(ap)
vap->va_blocksize = PAGE_SIZE;
vap->va_bytes = vap->va_size = 0;
/*
* If the process has exercised some setuid or setgid
* privilege, then rip away read/write permission so
* that only root can gain access.
*/
switch (pfs->pfs_type) {
case Pregs:
case Pfpregs:
case Pmem:
if (procp->p_flag & P_SUGID)
vap->va_mode &= ~((VREAD|VWRITE)|
((VREAD|VWRITE)>>3)|
((VREAD|VWRITE)>>6));
break;
}
/*
* Make all times be current TOD.
* It would be possible to get the process start
@ -365,9 +393,33 @@ procfs_getattr(ap)
* p_stat structure is not addressible if u. gets
* swapped out for that process.
*/
microtime(&vap->va_ctime);
microtime(&tv);
TIMEVAL_TO_TIMESPEC(&tv, &vap->va_ctime);
vap->va_atime = vap->va_mtime = vap->va_ctime;
/*
* If the process has exercised some setuid or setgid
* privilege, then rip away read/write permission so
* that only root can gain access.
*/
switch (pfs->pfs_type) {
case Pmem:
case Pregs:
case Pfpregs:
if (procp->p_flag & P_SUGID)
vap->va_mode &= ~((VREAD|VWRITE)|
((VREAD|VWRITE)>>3)|
((VREAD|VWRITE)>>6));
case Pctl:
case Pstatus:
case Pnote:
case Pnotepg:
vap->va_nlink = 1;
vap->va_uid = procp->p_ucred->cr_uid;
vap->va_gid = procp->p_ucred->cr_gid;
break;
}
/*
* now do the object specific fields
*
@ -380,15 +432,30 @@ procfs_getattr(ap)
switch (pfs->pfs_type) {
case Proot:
vap->va_nlink = 2;
/*
* Set nlink to 1 to tell fts(3) we don't actually know.
*/
vap->va_nlink = 1;
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_size = vap->va_bytes = DEV_BSIZE;
break;
case Pcurproc: {
char buf[16]; /* should be enough */
vap->va_nlink = 1;
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_size = vap->va_bytes =
sprintf(buf, "%ld", (long)curproc->p_pid);
break;
}
case Pproc:
vap->va_nlink = 2;
vap->va_uid = procp->p_ucred->cr_uid;
vap->va_gid = procp->p_ucred->cr_gid;
vap->va_size = vap->va_bytes = DEV_BSIZE;
break;
case Pfile:
@ -396,24 +463,24 @@ procfs_getattr(ap)
break;
case Pmem:
vap->va_nlink = 1;
vap->va_bytes = vap->va_size =
ctob(procp->p_vmspace->vm_tsize +
procp->p_vmspace->vm_dsize +
procp->p_vmspace->vm_ssize);
vap->va_uid = procp->p_ucred->cr_uid;
vap->va_gid = procp->p_ucred->cr_gid;
break;
case Pregs:
vap->va_bytes = vap->va_size = sizeof(struct reg);
break;
case Pfpregs:
vap->va_bytes = vap->va_size = sizeof(struct fpreg);
break;
case Pctl:
case Pstatus:
case Pnote:
case Pnotepg:
vap->va_nlink = 1;
vap->va_uid = procp->p_ucred->cr_uid;
vap->va_gid = procp->p_ucred->cr_gid;
break;
default:
@ -424,7 +491,12 @@ procfs_getattr(ap)
}
procfs_setattr(ap)
struct vop_setattr_args *ap;
struct vop_setattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
/*
* just fake out attribute setting
@ -452,7 +524,12 @@ procfs_setattr(ap)
* that the operation really does make sense.
*/
procfs_access(ap)
struct vop_access_args *ap;
struct vop_access_args /* {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
struct vattr *vap;
struct vattr vattr;
@ -462,8 +539,9 @@ procfs_access(ap)
* If you're the super-user,
* you always get access.
*/
if (ap->a_cred->cr_uid == (uid_t) 0)
if (ap->a_cred->cr_uid == 0)
return (0);
vap = &vattr;
if (error = VOP_GETATTR(ap->a_vp, vap, ap->a_cred, ap->a_p))
return (error);
@ -477,7 +555,7 @@ procfs_access(ap)
gid_t *gp;
int i;
(ap->a_mode) >>= 3;
ap->a_mode >>= 3;
gp = ap->a_cred->cr_groups;
for (i = 0; i < ap->a_cred->cr_ngroups; i++, gp++)
if (vap->va_gid == *gp)
@ -503,29 +581,37 @@ procfs_access(ap)
* read and inwardly digest ufs_lookup().
*/
procfs_lookup(ap)
struct vop_lookup_args *ap;
struct vop_lookup_args /* {
struct vnode * a_dvp;
struct vnode ** a_vpp;
struct componentname * a_cnp;
} */ *ap;
{
struct componentname *cnp = ap->a_cnp;
struct vnode **vpp = ap->a_vpp;
struct vnode *dvp = ap->a_dvp;
char *pname = cnp->cn_nameptr;
struct proc *curp = cnp->cn_proc;
int error = 0;
struct proc_target *pt;
struct vnode *fvp;
pid_t pid;
struct vnode *nvp;
struct pfsnode *pfs;
struct proc *procp;
pfstype pfs_type;
struct proc *p;
int i;
*vpp = NULL;
if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)
return (EROFS);
if (cnp->cn_namelen == 1 && *pname == '.') {
*vpp = dvp;
VREF(dvp);
/*VOP_LOCK(dvp);*/
/* vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, curp); */
return (0);
}
*vpp = NULL;
pfs = VTOPFS(dvp);
switch (pfs->pfs_type) {
case Proot:
@ -533,71 +619,60 @@ procfs_lookup(ap)
return (EIO);
if (CNEQ(cnp, "curproc", 7))
pid = cnp->cn_proc->p_pid;
else
pid = atopid(pname, cnp->cn_namelen);
return (procfs_allocvp(dvp->v_mount, vpp, 0, Pcurproc));
pid = atopid(pname, cnp->cn_namelen);
if (pid == NO_PID)
return (ENOENT);
break;
procp = PFIND(pid);
if (procp == 0)
return (ENOENT);
p = PFIND(pid);
if (p == 0)
break;
error = procfs_allocvp(dvp->v_mount, &nvp, pid, Pproc);
if (error)
return (error);
nvp->v_type = VDIR;
pfs = VTOPFS(nvp);
*vpp = nvp;
return (0);
return (procfs_allocvp(dvp->v_mount, vpp, pid, Pproc));
case Pproc:
if (cnp->cn_flags & ISDOTDOT) {
error = procfs_root(dvp->v_mount, vpp);
return (error);
}
if (cnp->cn_flags & ISDOTDOT)
return (procfs_root(dvp->v_mount, vpp));
procp = PFIND(pfs->pfs_pid);
if (procp == 0)
return (ENOENT);
p = PFIND(pfs->pfs_pid);
if (p == 0)
break;
for (i = 0; i < Nprocent; i++) {
struct pfsnames *dp = &procent[i];
if (cnp->cn_namelen == dp->d_namlen &&
bcmp(pname, dp->d_name, dp->d_namlen) == 0) {
pfs_type = dp->d_pfstype;
for (pt = proc_targets, i = 0; i < nproc_targets; pt++, i++) {
if (cnp->cn_namelen == pt->pt_namlen &&
bcmp(pt->pt_name, pname, cnp->cn_namelen) == 0 &&
(pt->pt_valid == NULL || (*pt->pt_valid)(p)))
goto found;
}
}
return (ENOENT);
break;
found:
if (pfs_type == Pfile) {
nvp = procfs_findtextvp(procp);
if (nvp) {
VREF(nvp);
VOP_LOCK(nvp);
} else {
error = ENXIO;
}
} else {
error = procfs_allocvp(dvp->v_mount, &nvp,
pfs->pfs_pid, pfs_type);
if (error)
return (error);
nvp->v_type = VREG;
pfs = VTOPFS(nvp);
if (pt->pt_pfstype == Pfile) {
fvp = procfs_findtextvp(p);
/* We already checked that it exists. */
VREF(fvp);
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, curp);
*vpp = fvp;
return (0);
}
*vpp = nvp;
return (error);
return (procfs_allocvp(dvp->v_mount, vpp, pfs->pfs_pid,
pt->pt_pfstype));
default:
return (ENOTDIR);
}
return (cnp->cn_nameiop == LOOKUP ? ENOENT : EROFS);
}
int
procfs_validfile(p)
struct proc *p;
{
return (procfs_findtextvp(p) != NULLVP);
}
/*
@ -613,7 +688,14 @@ procfs_lookup(ap)
* this should just be done through read()
*/
procfs_readdir(ap)
struct vop_readdir_args *ap;
struct vop_readdir_args /* {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
u_long *a_cookies;
int a_ncookies;
} */ *ap;
{
struct uio *uio = ap->a_uio;
struct pfsdent d;
@ -623,6 +705,13 @@ procfs_readdir(ap)
int count;
int i;
/*
* We don't allow exporting procfs mounts, and currently local
* requests do not need cookies.
*/
if (ap->a_ncookies)
panic("procfs_readdir: not hungry");
pfs = VTOPFS(ap->a_vp);
if (uio->uio_resid < UIO_MX)
@ -643,28 +732,29 @@ procfs_readdir(ap)
* from the procent[] table (top of this file).
*/
case Pproc: {
while (uio->uio_resid >= UIO_MX) {
struct pfsnames *dt;
struct proc *p;
struct proc_target *pt;
if (i >= Nprocent)
break;
p = PFIND(pfs->pfs_pid);
if (p == NULL)
break;
dt = &procent[i];
for (pt = &proc_targets[i];
uio->uio_resid >= UIO_MX && i < nproc_targets; pt++, i++) {
if (pt->pt_valid && (*pt->pt_valid)(p) == 0)
continue;
dp->d_reclen = UIO_MX;
dp->d_fileno = PROCFS_FILENO(pfs->pfs_pid, dt->d_pfstype);
dp->d_type = DT_REG;
dp->d_namlen = dt->d_namlen;
bcopy(dt->d_name, dp->d_name, sizeof(dt->d_name)-1);
error = uiomove((caddr_t) dp, UIO_MX, uio);
if (error)
dp->d_fileno = PROCFS_FILENO(pfs->pfs_pid, pt->pt_pfstype);
dp->d_namlen = pt->pt_namlen;
bcopy(pt->pt_name, dp->d_name, pt->pt_namlen + 1);
dp->d_type = pt->pt_type;
if (error = uiomove((caddr_t)dp, UIO_MX, uio))
break;
count += UIO_MX;
i++;
}
break;
}
/*
@ -677,55 +767,61 @@ procfs_readdir(ap)
*/
case Proot: {
int pcnt;
#ifdef PROCFS_ZOMBIE
int doingzomb = 0;
#endif
volatile struct proc *p;
int pcnt = 0;
volatile struct proc *p = allproc.lh_first;
p = allproc;
#define PROCFS_XFILES 1 /* number of other entries, like "curproc" */
pcnt = PROCFS_XFILES;
while (p && uio->uio_resid >= UIO_MX) {
again:
for (; p && uio->uio_resid >= UIO_MX; i++, pcnt++) {
bzero((char *) dp, UIO_MX);
dp->d_type = DT_DIR;
dp->d_reclen = UIO_MX;
switch (i) {
case 0:
/* ship out entry for "curproc" */
dp->d_fileno = PROCFS_FILENO(PID_MAX+1, Pproc);
dp->d_namlen = sprintf(dp->d_name, "curproc");
case 0: /* `.' */
case 1: /* `..' */
dp->d_fileno = PROCFS_FILENO(0, Proot);
dp->d_namlen = i + 1;
bcopy("..", dp->d_name, dp->d_namlen);
dp->d_name[i + 1] = '\0';
dp->d_type = DT_DIR;
break;
case 2:
dp->d_fileno = PROCFS_FILENO(0, Pcurproc);
dp->d_namlen = 7;
bcopy("curproc", dp->d_name, 8);
dp->d_type = DT_LNK;
break;
default:
if (pcnt >= i) {
dp->d_fileno = PROCFS_FILENO(p->p_pid, Pproc);
dp->d_namlen = sprintf(dp->d_name, "%ld", (long) p->p_pid);
while (pcnt < i) {
pcnt++;
p = p->p_list.le_next;
if (!p)
goto done;
}
p = p->p_next;
#ifdef PROCFS_ZOMBIE
if (p == 0 && doingzomb == 0) {
doingzomb = 1;
p = zombproc;
}
#endif
if (pcnt++ < i)
continue;
dp->d_fileno = PROCFS_FILENO(p->p_pid, Pproc);
dp->d_namlen = sprintf(dp->d_name, "%ld",
(long)p->p_pid);
dp->d_type = DT_REG;
p = p->p_list.le_next;
break;
}
error = uiomove((caddr_t) dp, UIO_MX, uio);
if (error)
if (error = uiomove((caddr_t)dp, UIO_MX, uio))
break;
count += UIO_MX;
i++;
}
done:
#ifdef PROCFS_ZOMBIE
if (p == 0 && doingzomb == 0) {
doingzomb = 1;
p = zombproc.lh_first;
goto again;
}
#endif
break;
@ -741,6 +837,24 @@ procfs_readdir(ap)
return (error);
}
/*
* readlink reads the link of `curproc'
*/
procfs_readlink(ap)
struct vop_readlink_args *ap;
{
struct uio *uio = ap->a_uio;
char buf[16]; /* should be enough */
int len;
if (VTOPFS(ap->a_vp)->pfs_fileno != PROCFS_FILENO(0, Pcurproc))
return (EINVAL);
len = sprintf(buf, "%ld", (long)curproc->p_pid);
return (uiomove((caddr_t)buf, len, ap->a_uio));
}
/*
* convert decimal ascii to pid_t
*/
@ -782,6 +896,7 @@ struct vnodeopv_entry_desc procfs_vnodeop_entries[] = {
{ &vop_ioctl_desc, procfs_ioctl }, /* ioctl */
{ &vop_select_desc, procfs_select }, /* select */
{ &vop_mmap_desc, procfs_mmap }, /* mmap */
{ &vop_revoke_desc, procfs_revoke }, /* revoke */
{ &vop_fsync_desc, procfs_fsync }, /* fsync */
{ &vop_seek_desc, procfs_seek }, /* seek */
{ &vop_remove_desc, procfs_remove }, /* remove */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1989, 1993
* Copyright (c) 1989, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.6 (Berkeley) 4/9/94
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
*/
#include <sys/param.h>
@ -71,8 +71,10 @@ struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vop_setattr_desc, spec_setattr }, /* setattr */
{ &vop_read_desc, spec_read }, /* read */
{ &vop_write_desc, spec_write }, /* write */
{ &vop_lease_desc, spec_lease_check }, /* lease */
{ &vop_ioctl_desc, spec_ioctl }, /* ioctl */
{ &vop_select_desc, spec_select }, /* select */
{ &vop_revoke_desc, spec_revoke }, /* revoke */
{ &vop_mmap_desc, spec_mmap }, /* mmap */
{ &vop_fsync_desc, spec_fsync }, /* fsync */
{ &vop_seek_desc, spec_seek }, /* seek */
@ -134,9 +136,10 @@ spec_open(ap)
struct proc *a_p;
} */ *ap;
{
struct proc *p = ap->a_p;
struct vnode *bvp, *vp = ap->a_vp;
dev_t bdev, dev = (dev_t)vp->v_rdev;
register int maj = major(dev);
int maj = major(dev);
int error;
/*
@ -155,7 +158,7 @@ spec_open(ap)
* When running in very secure mode, do not allow
* opens for writing of any disk character devices.
*/
if (securelevel >= 2 && isdisk(dev, VCHR))
if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK)
return (EPERM);
/*
* When running in secure mode, do not allow opens
@ -173,9 +176,11 @@ spec_open(ap)
return (EPERM);
}
}
VOP_UNLOCK(vp);
error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p);
VOP_LOCK(vp);
if (cdevsw[maj].d_type == D_TTY)
vp->v_flag |= VISTTY;
VOP_UNLOCK(vp, 0, p);
error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
return (error);
case VBLK:
@ -186,7 +191,7 @@ spec_open(ap)
* opens for writing of any disk block devices.
*/
if (securelevel >= 2 && ap->a_cred != FSCRED &&
(ap->a_mode & FWRITE) && isdisk(dev, VBLK))
(ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
return (EPERM);
/*
* Do not allow opens of block devices that are
@ -194,7 +199,7 @@ spec_open(ap)
*/
if (error = vfs_mountedon(vp))
return (error);
return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p));
return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p));
}
return (0);
}
@ -234,10 +239,10 @@ spec_read(ap)
switch (vp->v_type) {
case VCHR:
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
error = (*cdevsw[major(vp->v_rdev)].d_read)
(vp->v_rdev, uio, ap->a_ioflag);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
return (error);
case VBLK:
@ -313,10 +318,10 @@ spec_write(ap)
switch (vp->v_type) {
case VCHR:
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
error = (*cdevsw[major(vp->v_rdev)].d_write)
(vp->v_rdev, uio, ap->a_ioflag);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
return (error);
case VBLK:
@ -385,7 +390,7 @@ spec_ioctl(ap)
case VBLK:
if (ap->a_command == 0 && (int)ap->a_data == B_TAPE)
if (bdevsw[major(dev)].d_flags & B_TAPE)
if (bdevsw[major(dev)].d_type == D_TAPE)
return (0);
else
return (1);
@ -473,6 +478,18 @@ spec_fsync(ap)
return (0);
}
int
spec_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
return (0);
}
/*
* Just call the device strategy routine
*/
@ -495,6 +512,7 @@ spec_bmap(ap)
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
@ -502,29 +520,8 @@ spec_bmap(ap)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
return (0);
}
/*
* At the moment we do not do any locking.
*/
/* ARGSUSED */
spec_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
} */ *ap;
{
return (0);
}
/* ARGSUSED */
spec_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
} */ *ap;
{
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)specdev.h 8.2 (Berkeley) 2/2/94
* @(#)specdev.h 8.6 (Berkeley) 5/21/95
*/
/*
@ -93,8 +93,10 @@ int spec_close __P((struct vop_close_args *));
#define spec_setattr ((int (*) __P((struct vop_setattr_args *)))spec_ebadf)
int spec_read __P((struct vop_read_args *));
int spec_write __P((struct vop_write_args *));
#define spec_lease_check ((int (*) __P((struct vop_lease_args *)))nullop)
int spec_ioctl __P((struct vop_ioctl_args *));
int spec_select __P((struct vop_select_args *));
#define spec_revoke vop_revoke
#define spec_mmap ((int (*) __P((struct vop_mmap_args *)))spec_badop)
int spec_fsync __P((struct vop_fsync_args *));
#define spec_seek ((int (*) __P((struct vop_seek_args *)))spec_badop)
@ -107,14 +109,14 @@ int spec_fsync __P((struct vop_fsync_args *));
#define spec_readdir ((int (*) __P((struct vop_readdir_args *)))spec_badop)
#define spec_readlink ((int (*) __P((struct vop_readlink_args *)))spec_badop)
#define spec_abortop ((int (*) __P((struct vop_abortop_args *)))spec_badop)
#define spec_inactive ((int (*) __P((struct vop_inactive_args *)))nullop)
int spec_inactive __P((struct vop_inactive_args *));
#define spec_reclaim ((int (*) __P((struct vop_reclaim_args *)))nullop)
int spec_lock __P((struct vop_lock_args *));
int spec_unlock __P((struct vop_unlock_args *));
#define spec_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define spec_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
int spec_bmap __P((struct vop_bmap_args *));
int spec_strategy __P((struct vop_strategy_args *));
int spec_print __P((struct vop_print_args *));
#define spec_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define spec_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
int spec_pathconf __P((struct vop_pathconf_args *));
int spec_advlock __P((struct vop_advlock_args *));
#define spec_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))spec_badop)

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)umap.h 8.3 (Berkeley) 1/21/94
* @(#)umap.h 8.4 (Berkeley) 8/20/94
*
* @(#)null_vnops.c 1.5 (Berkeley) 7/10/92
*/
@ -67,8 +67,7 @@ struct umap_mount {
* A cache of vnode references
*/
struct umap_node {
struct umap_node *umap_forw; /* Hash chain */
struct umap_node *umap_back;
LIST_ENTRY(umap_node) umap_hash; /* Hash list */
struct vnode *umap_lowervp; /* Aliased vnode - VREFed once */
struct vnode *umap_vnode; /* Back pointer to vnode/umap_node */
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1992, 1993
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -33,13 +33,14 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)umap_subr.c 8.6 (Berkeley) 1/26/94
* @(#)umap_subr.c 8.9 (Berkeley) 5/14/95
*
* $Id: lofs_subr.c, v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
* From: $Id: lofs_subr.c, v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
@ -50,7 +51,6 @@
#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
#define NUMAPNODECACHE 16
#define UMAP_NHASH(vp) ((((u_long) vp)>>LOG2_SIZEVNODE) & (NUMAPNODECACHE-1))
/*
* Null layer cache:
@ -60,39 +60,22 @@
* alias is removed the target vnode is vrele'd.
*/
/*
* Cache head
*/
struct umap_node_cache {
struct umap_node *ac_forw;
struct umap_node *ac_back;
};
static struct umap_node_cache umap_node_cache[NUMAPNODECACHE];
#define UMAP_NHASH(vp) \
(&umap_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & umap_node_hash])
LIST_HEAD(umap_node_hashhead, umap_node) *umap_node_hashtbl;
u_long umap_node_hash;
/*
* Initialise cache headers
*/
umapfs_init()
umapfs_init(vfsp)
struct vfsconf *vfsp;
{
struct umap_node_cache *ac;
#ifdef UMAPFS_DIAGNOSTIC
printf("umapfs_init\n"); /* printed during system boot */
#endif
for (ac = umap_node_cache; ac < umap_node_cache + NUMAPNODECACHE; ac++)
ac->ac_forw = ac->ac_back = (struct umap_node *) ac;
}
/*
* Compute hash list for given target vnode
*/
static struct umap_node_cache *
umap_node_hash(targetvp)
struct vnode *targetvp;
{
return (&umap_node_cache[UMAP_NHASH(targetvp)]);
umap_node_hashtbl = hashinit(NUMAPNODECACHE, M_CACHE, &umap_node_hash);
}
/*
@ -151,7 +134,8 @@ umap_node_find(mp, targetvp)
struct mount *mp;
struct vnode *targetvp;
{
struct umap_node_cache *hd;
struct proc *p = curproc; /* XXX */
struct umap_node_hashhead *hd;
struct umap_node *a;
struct vnode *vp;
@ -165,10 +149,9 @@ umap_node_find(mp, targetvp)
* the target vnode. If found, the increment the umap_node
* reference count (but NOT the target vnode's VREF counter).
*/
hd = umap_node_hash(targetvp);
loop:
for (a = hd->ac_forw; a != (struct umap_node *) hd; a = a->umap_forw) {
hd = UMAP_NHASH(targetvp);
loop:
for (a = hd->lh_first; a != 0; a = a->umap_hash.le_next) {
if (a->umap_lowervp == targetvp &&
a->umap_vnode->v_mount == mp) {
vp = UMAPTOV(a);
@ -177,7 +160,7 @@ umap_node_find(mp, targetvp)
* stuff, but we don't want to lock
* the lower node.
*/
if (vget(vp, 0)) {
if (vget(vp, 0, p)) {
#ifdef UMAPFS_DIAGNOSTIC
printf ("umap_node_find: vget failed.\n");
#endif
@ -205,7 +188,7 @@ umap_node_alloc(mp, lowervp, vpp)
struct vnode *lowervp;
struct vnode **vpp;
{
struct umap_node_cache *hd;
struct umap_node_hashhead *hd;
struct umap_node *xp;
struct vnode *othervp, *vp;
int error;
@ -233,8 +216,8 @@ umap_node_alloc(mp, lowervp, vpp)
return (0);
}
VREF(lowervp); /* Extra VREF will be vrele'd in umap_node_create */
hd = umap_node_hash(lowervp);
insque(xp, hd);
hd = UMAP_NHASH(lowervp);
LIST_INSERT_HEAD(hd, xp, umap_hash);
return (0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1992, 1993
* Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)umap_vfsops.c 8.3 (Berkeley) 1/21/94
* @(#)umap_vfsops.c 8.8 (Berkeley) 5/14/95
*
* @(#)null_vfsops.c 1.5 (Berkeley) 7/10/92
*/
@ -45,6 +45,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
@ -163,7 +164,7 @@ umapfs_mount(mp, path, data, ndp, p)
/*
* Unlock the node (either the lower or the alias)
*/
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
/*
* Make sure the node alias worked
*/
@ -183,7 +184,7 @@ umapfs_mount(mp, path, data, ndp, p)
if (UMAPVPTOLOWERVP(umapm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) amp;
getnewfsid(mp, MOUNT_LOFS);
vfs_getnewfsid(mp);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@ -224,18 +225,13 @@ umapfs_unmount(mp, mntflags, p)
struct vnode *umapm_rootvp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
int error;
int flags = 0;
extern int doforce;
#ifdef UMAPFS_DIAGNOSTIC
printf("umapfs_unmount(mp = %x)\n", mp);
#endif
if (mntflags & MNT_FORCE) {
/* lofs can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
/*
* Clear out buffer cache. I don't think we
@ -276,6 +272,7 @@ umapfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct vnode *vp;
#ifdef UMAPFS_DIAGNOSTIC
@ -290,7 +287,7 @@ umapfs_root(mp, vpp)
*/
vp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
VREF(vp);
VOP_LOCK(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
*vpp = vp;
return (0);
}
@ -390,7 +387,9 @@ umapfs_vptofh(vp, fhp)
return (VFS_VPTOFH(UMAPVPTOLOWERVP(vp), fhp));
}
int umapfs_init __P((void));
int umapfs_init __P((struct vfsconf *));
#define umapfs_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
struct vfsops umap_vfsops = {
umapfs_mount,
@ -404,4 +403,5 @@ struct vfsops umap_vfsops = {
umapfs_fhtovp,
umapfs_vptofh,
umapfs_init,
umapfs_sysctl,
};

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)umap_vnops.c 8.3 (Berkeley) 1/5/94
* @(#)umap_vnops.c 8.6 (Berkeley) 5/22/95
*/
/*
@ -323,10 +323,52 @@ umap_getattr(ap)
return (0);
}
/*
* We need to process our own vnode lock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
umap_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
vop_nolock(ap);
if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
return (0);
ap->a_flags &= ~LK_INTERLOCK;
return (null_bypass(ap));
}
/*
* We need to process our own vnode unlock and then clear the
* interlock flag as it applies only to our vnode, not the
* vnodes below us on the stack.
*/
int
umap_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vop_nounlock(ap);
ap->a_flags &= ~LK_INTERLOCK;
return (null_bypass(ap));
}
int
umap_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
/*
@ -336,6 +378,7 @@ umap_inactive(ap)
* cache and reusable.
*
*/
VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
return (0);
}
@ -351,7 +394,7 @@ umap_reclaim(ap)
/* After this assignment, this node will not be re-used. */
xp->umap_lowervp = NULL;
remque(xp);
LIST_REMOVE(xp, umap_hash);
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
vrele(lowervp);
@ -474,6 +517,8 @@ struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
{ &vop_default_desc, umap_bypass },
{ &vop_getattr_desc, umap_getattr },
{ &vop_lock_desc, umap_lock },
{ &vop_unlock_desc, umap_unlock },
{ &vop_inactive_desc, umap_inactive },
{ &vop_reclaim_desc, umap_reclaim },
{ &vop_print_desc, umap_print },

View File

@ -1,7 +1,8 @@
If you plan on using union mounts, then you should consider replacing
"libc/gen/opendir.c" in the C library with the file "libc.opendir.c"
three files in "libc/gen" in the C library with the files in "libc"
in this directory. The replacement version of opendir() automatically
removes duplicate names when a union stack is encountered. You will
then need to rebuild the C library and all commands.
removes duplicate names when a union stack is encountered. The other
two files do special handling of whiteouts. You will then need to
rebuild the C library and all commands.
@(#)README 8.1 (Berkeley) 2/15/94
@(#)README 8.2 (Berkeley) 11/4/94

995
sys/miscfs/union/libc.fts.c Normal file
View File

@ -0,0 +1,995 @@
/*-
* Copyright (c) 1990, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)fts.c 8.6 (Berkeley) 8/14/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/param.h>
#include <sys/stat.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <fts.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static FTSENT *fts_alloc __P((FTS *, char *, int));
static FTSENT *fts_build __P((FTS *, int));
static void fts_lfree __P((FTSENT *));
static void fts_load __P((FTS *, FTSENT *));
static size_t fts_maxarglen __P((char * const *));
static void fts_padjust __P((FTS *, void *));
static int fts_palloc __P((FTS *, size_t));
static FTSENT *fts_sort __P((FTS *, FTSENT *, int));
static u_short fts_stat __P((FTS *, FTSENT *, int));
#define ISDOT(a) (a[0] == '.' && (!a[1] || a[1] == '.' && !a[2]))
#define ISSET(opt) (sp->fts_options & opt)
#define SET(opt) (sp->fts_options |= opt)
#define CHDIR(sp, path) (!ISSET(FTS_NOCHDIR) && chdir(path))
#define FCHDIR(sp, fd) (!ISSET(FTS_NOCHDIR) && fchdir(fd))
/* fts_build flags */
#define BCHILD 1 /* fts_children */
#define BNAMES 2 /* fts_children, names only */
#define BREAD 3 /* fts_read */
FTS *
fts_open(argv, options, compar)
char * const *argv;
register int options;
int (*compar)();
{
register FTS *sp;
register FTSENT *p, *root;
register int nitems;
FTSENT *parent, *tmp;
int len;
/* Options check. */
if (options & ~FTS_OPTIONMASK) {
errno = EINVAL;
return (NULL);
}
/* Allocate/initialize the stream */
if ((sp = malloc((u_int)sizeof(FTS))) == NULL)
return (NULL);
memset(sp, 0, sizeof(FTS));
sp->fts_compar = compar;
sp->fts_options = options;
/* Logical walks turn on NOCHDIR; symbolic links are too hard. */
if (ISSET(FTS_LOGICAL))
SET(FTS_NOCHDIR);
/*
* Start out with 1K of path space, and enough, in any case,
* to hold the user's paths.
*/
if (fts_palloc(sp, MAX(fts_maxarglen(argv), MAXPATHLEN)))
goto mem1;
/* Allocate/initialize root's parent. */
if ((parent = fts_alloc(sp, "", 0)) == NULL)
goto mem2;
parent->fts_level = FTS_ROOTPARENTLEVEL;
/* Allocate/initialize root(s). */
for (root = NULL, nitems = 0; *argv; ++argv, ++nitems) {
/* Don't allow zero-length paths. */
if ((len = strlen(*argv)) == 0) {
errno = ENOENT;
goto mem3;
}
p = fts_alloc(sp, *argv, len);
p->fts_level = FTS_ROOTLEVEL;
p->fts_parent = parent;
p->fts_accpath = p->fts_name;
p->fts_info = fts_stat(sp, p, ISSET(FTS_COMFOLLOW));
/* Command-line "." and ".." are real directories. */
if (p->fts_info == FTS_DOT)
p->fts_info = FTS_D;
/*
* If comparison routine supplied, traverse in sorted
* order; otherwise traverse in the order specified.
*/
if (compar) {
p->fts_link = root;
root = p;
} else {
p->fts_link = NULL;
if (root == NULL)
tmp = root = p;
else {
tmp->fts_link = p;
tmp = p;
}
}
}
if (compar && nitems > 1)
root = fts_sort(sp, root, nitems);
/*
* Allocate a dummy pointer and make fts_read think that we've just
* finished the node before the root(s); set p->fts_info to FTS_INIT
* so that everything about the "current" node is ignored.
*/
if ((sp->fts_cur = fts_alloc(sp, "", 0)) == NULL)
goto mem3;
sp->fts_cur->fts_link = root;
sp->fts_cur->fts_info = FTS_INIT;
/*
* If using chdir(2), grab a file descriptor pointing to dot to insure
* that we can get back here; this could be avoided for some paths,
* but almost certainly not worth the effort. Slashes, symbolic links,
* and ".." are all fairly nasty problems. Note, if we can't get the
* descriptor we run anyway, just more slowly.
*/
if (!ISSET(FTS_NOCHDIR) && (sp->fts_rfd = open(".", O_RDONLY, 0)) < 0)
SET(FTS_NOCHDIR);
return (sp);
mem3: fts_lfree(root);
free(parent);
mem2: free(sp->fts_path);
mem1: free(sp);
return (NULL);
}
static void
fts_load(sp, p)
FTS *sp;
register FTSENT *p;
{
register int len;
register char *cp;
/*
* Load the stream structure for the next traversal. Since we don't
* actually enter the directory until after the preorder visit, set
* the fts_accpath field specially so the chdir gets done to the right
* place and the user can access the first node. From fts_open it's
* known that the path will fit.
*/
len = p->fts_pathlen = p->fts_namelen;
memmove(sp->fts_path, p->fts_name, len + 1);
if ((cp = strrchr(p->fts_name, '/')) && (cp != p->fts_name || cp[1])) {
len = strlen(++cp);
memmove(p->fts_name, cp, len + 1);
p->fts_namelen = len;
}
p->fts_accpath = p->fts_path = sp->fts_path;
sp->fts_dev = p->fts_dev;
}
int
fts_close(sp)
FTS *sp;
{
register FTSENT *freep, *p;
int saved_errno;
/*
* This still works if we haven't read anything -- the dummy structure
* points to the root list, so we step through to the end of the root
* list which has a valid parent pointer.
*/
if (sp->fts_cur) {
for (p = sp->fts_cur; p->fts_level >= FTS_ROOTLEVEL;) {
freep = p;
p = p->fts_link ? p->fts_link : p->fts_parent;
free(freep);
}
free(p);
}
/* Free up child linked list, sort array, path buffer. */
if (sp->fts_child)
fts_lfree(sp->fts_child);
if (sp->fts_array)
free(sp->fts_array);
free(sp->fts_path);
/* Return to original directory, save errno if necessary. */
if (!ISSET(FTS_NOCHDIR)) {
saved_errno = fchdir(sp->fts_rfd) ? errno : 0;
(void)close(sp->fts_rfd);
}
/* Free up the stream pointer. */
free(sp);
/* Set errno and return. */
if (!ISSET(FTS_NOCHDIR) && saved_errno) {
errno = saved_errno;
return (-1);
}
return (0);
}
/*
* Special case a root of "/" so that slashes aren't appended which would
* cause paths to be written as "//foo".
*/
#define NAPPEND(p) \
(p->fts_level == FTS_ROOTLEVEL && p->fts_pathlen == 1 && \
p->fts_path[0] == '/' ? 0 : p->fts_pathlen)
FTSENT *
fts_read(sp)
register FTS *sp;
{
register FTSENT *p, *tmp;
register int instr;
register char *t;
int saved_errno;
/* If finished or unrecoverable error, return NULL. */
if (sp->fts_cur == NULL || ISSET(FTS_STOP))
return (NULL);
/* Set current node pointer. */
p = sp->fts_cur;
/* Save and zero out user instructions. */
instr = p->fts_instr;
p->fts_instr = FTS_NOINSTR;
/* Any type of file may be re-visited; re-stat and re-turn. */
if (instr == FTS_AGAIN) {
p->fts_info = fts_stat(sp, p, 0);
return (p);
}
/*
* Following a symlink -- SLNONE test allows application to see
* SLNONE and recover. If indirecting through a symlink, have
* keep a pointer to current location. If unable to get that
* pointer, follow fails.
*/
if (instr == FTS_FOLLOW &&
(p->fts_info == FTS_SL || p->fts_info == FTS_SLNONE)) {
p->fts_info = fts_stat(sp, p, 1);
if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR))
if ((p->fts_symfd = open(".", O_RDONLY, 0)) < 0) {
p->fts_errno = errno;
p->fts_info = FTS_ERR;
} else
p->fts_flags |= FTS_SYMFOLLOW;
return (p);
}
/* Directory in pre-order. */
if (p->fts_info == FTS_D) {
/* If skipped or crossed mount point, do post-order visit. */
if (instr == FTS_SKIP ||
ISSET(FTS_XDEV) && p->fts_dev != sp->fts_dev) {
if (p->fts_flags & FTS_SYMFOLLOW)
(void)close(p->fts_symfd);
if (sp->fts_child) {
fts_lfree(sp->fts_child);
sp->fts_child = NULL;
}
p->fts_info = FTS_DP;
return (p);
}
/* Rebuild if only read the names and now traversing. */
if (sp->fts_child && sp->fts_options & FTS_NAMEONLY) {
sp->fts_options &= ~FTS_NAMEONLY;
fts_lfree(sp->fts_child);
sp->fts_child = NULL;
}
/*
* Cd to the subdirectory.
*
* If have already read and now fail to chdir, whack the list
* to make the names come out right, and set the parent errno
* so the application will eventually get an error condition.
* Set the FTS_DONTCHDIR flag so that when we logically change
* directories back to the parent we don't do a chdir.
*
* If haven't read do so. If the read fails, fts_build sets
* FTS_STOP or the fts_info field of the node.
*/
if (sp->fts_child) {
if (CHDIR(sp, p->fts_accpath)) {
p->fts_errno = errno;
p->fts_flags |= FTS_DONTCHDIR;
for (p = sp->fts_child; p; p = p->fts_link)
p->fts_accpath =
p->fts_parent->fts_accpath;
}
} else if ((sp->fts_child = fts_build(sp, BREAD)) == NULL) {
if (ISSET(FTS_STOP))
return (NULL);
return (p);
}
p = sp->fts_child;
sp->fts_child = NULL;
goto name;
}
/* Move to the next node on this level. */
next: tmp = p;
if (p = p->fts_link) {
free(tmp);
/*
* If reached the top, return to the original directory, and
* load the paths for the next root.
*/
if (p->fts_level == FTS_ROOTLEVEL) {
if (!ISSET(FTS_NOCHDIR) && FCHDIR(sp, sp->fts_rfd)) {
SET(FTS_STOP);
return (NULL);
}
fts_load(sp, p);
return (sp->fts_cur = p);
}
/*
* User may have called fts_set on the node. If skipped,
* ignore. If followed, get a file descriptor so we can
* get back if necessary.
*/
if (p->fts_instr == FTS_SKIP)
goto next;
if (p->fts_instr == FTS_FOLLOW) {
p->fts_info = fts_stat(sp, p, 1);
if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR))
if ((p->fts_symfd =
open(".", O_RDONLY, 0)) < 0) {
p->fts_errno = errno;
p->fts_info = FTS_ERR;
} else
p->fts_flags |= FTS_SYMFOLLOW;
p->fts_instr = FTS_NOINSTR;
}
name: t = sp->fts_path + NAPPEND(p->fts_parent);
*t++ = '/';
memmove(t, p->fts_name, p->fts_namelen + 1);
return (sp->fts_cur = p);
}
/* Move up to the parent node. */
p = tmp->fts_parent;
free(tmp);
if (p->fts_level == FTS_ROOTPARENTLEVEL) {
/*
* Done; free everything up and set errno to 0 so the user
* can distinguish between error and EOF.
*/
free(p);
errno = 0;
return (sp->fts_cur = NULL);
}
/* Nul terminate the pathname. */
sp->fts_path[p->fts_pathlen] = '\0';
/*
* Return to the parent directory. If at a root node or came through
* a symlink, go back through the file descriptor. Otherwise, cd up
* one directory.
*/
if (p->fts_level == FTS_ROOTLEVEL) {
if (!ISSET(FTS_NOCHDIR) && FCHDIR(sp, sp->fts_rfd)) {
SET(FTS_STOP);
return (NULL);
}
} else if (p->fts_flags & FTS_SYMFOLLOW) {
if (FCHDIR(sp, p->fts_symfd)) {
saved_errno = errno;
(void)close(p->fts_symfd);
errno = saved_errno;
SET(FTS_STOP);
return (NULL);
}
(void)close(p->fts_symfd);
} else if (!(p->fts_flags & FTS_DONTCHDIR)) {
if (CHDIR(sp, "..")) {
SET(FTS_STOP);
return (NULL);
}
}
p->fts_info = p->fts_errno ? FTS_ERR : FTS_DP;
return (sp->fts_cur = p);
}
/*
* Fts_set takes the stream as an argument although it's not used in this
* implementation; it would be necessary if anyone wanted to add global
* semantics to fts using fts_set. An error return is allowed for similar
* reasons.
*/
/* ARGSUSED */
int
fts_set(sp, p, instr)
FTS *sp;
FTSENT *p;
int instr;
{
if (instr && instr != FTS_AGAIN && instr != FTS_FOLLOW &&
instr != FTS_NOINSTR && instr != FTS_SKIP) {
errno = EINVAL;
return (1);
}
p->fts_instr = instr;
return (0);
}
FTSENT *
fts_children(sp, instr)
register FTS *sp;
int instr;
{
register FTSENT *p;
int fd;
if (instr && instr != FTS_NAMEONLY) {
errno = EINVAL;
return (NULL);
}
/* Set current node pointer. */
p = sp->fts_cur;
/*
* Errno set to 0 so user can distinguish empty directory from
* an error.
*/
errno = 0;
/* Fatal errors stop here. */
if (ISSET(FTS_STOP))
return (NULL);
/* Return logical hierarchy of user's arguments. */
if (p->fts_info == FTS_INIT)
return (p->fts_link);
/*
* If not a directory being visited in pre-order, stop here. Could
* allow FTS_DNR, assuming the user has fixed the problem, but the
* same effect is available with FTS_AGAIN.
*/
if (p->fts_info != FTS_D /* && p->fts_info != FTS_DNR */)
return (NULL);
/* Free up any previous child list. */
if (sp->fts_child)
fts_lfree(sp->fts_child);
if (instr == FTS_NAMEONLY) {
sp->fts_options |= FTS_NAMEONLY;
instr = BNAMES;
} else
instr = BCHILD;
/*
* If using chdir on a relative path and called BEFORE fts_read does
* its chdir to the root of a traversal, we can lose -- we need to
* chdir into the subdirectory, and we don't know where the current
* directory is, so we can't get back so that the upcoming chdir by
* fts_read will work.
*/
if (p->fts_level != FTS_ROOTLEVEL || p->fts_accpath[0] == '/' ||
ISSET(FTS_NOCHDIR))
return (sp->fts_child = fts_build(sp, instr));
if ((fd = open(".", O_RDONLY, 0)) < 0)
return (NULL);
sp->fts_child = fts_build(sp, instr);
if (fchdir(fd))
return (NULL);
(void)close(fd);
return (sp->fts_child);
}
/*
* This is the tricky part -- do not casually change *anything* in here. The
* idea is to build the linked list of entries that are used by fts_children
* and fts_read. There are lots of special cases.
*
* The real slowdown in walking the tree is the stat calls. If FTS_NOSTAT is
* set and it's a physical walk (so that symbolic links can't be directories),
* we can do things quickly. First, if it's a 4.4BSD file system, the type
* of the file is in the directory entry. Otherwise, we assume that the number
* of subdirectories in a node is equal to the number of links to the parent.
* The former skips all stat calls. The latter skips stat calls in any leaf
* directories and for any files after the subdirectories in the directory have
* been found, cutting the stat calls by about 2/3.
*/
static FTSENT *
fts_build(sp, type)
register FTS *sp;
int type;
{
register struct dirent *dp;
register FTSENT *p, *head;
register int nitems;
FTSENT *cur, *tail;
DIR *dirp;
void *adjaddr;
int cderrno, descend, len, level, maxlen, nlinks, oflag, saved_errno;
char *cp;
/* Set current node pointer. */
cur = sp->fts_cur;
/*
* Open the directory for reading. If this fails, we're done.
* If being called from fts_read, set the fts_info field.
*/
#ifdef FTS_WHITEOUT
if (ISSET(FTS_WHITEOUT))
oflag = DTF_NODUP|DTF_REWIND;
else
oflag = DTF_HIDEW|DTF_NODUP|DTF_REWIND;
#else
#define __opendir2(path, flag) opendir(path)
#endif
if ((dirp = __opendir2(cur->fts_accpath, oflag)) == NULL) {
if (type == BREAD) {
cur->fts_info = FTS_DNR;
cur->fts_errno = errno;
}
return (NULL);
}
/*
* Nlinks is the number of possible entries of type directory in the
* directory if we're cheating on stat calls, 0 if we're not doing
* any stat calls at all, -1 if we're doing stats on everything.
*/
if (type == BNAMES)
nlinks = 0;
else if (ISSET(FTS_NOSTAT) && ISSET(FTS_PHYSICAL))
nlinks = cur->fts_nlink - (ISSET(FTS_SEEDOT) ? 0 : 2);
else
nlinks = -1;
#ifdef notdef
(void)printf("nlinks == %d (cur: %d)\n", nlinks, cur->fts_nlink);
(void)printf("NOSTAT %d PHYSICAL %d SEEDOT %d\n",
ISSET(FTS_NOSTAT), ISSET(FTS_PHYSICAL), ISSET(FTS_SEEDOT));
#endif
/*
* If we're going to need to stat anything or we want to descend
* and stay in the directory, chdir. If this fails we keep going,
* but set a flag so we don't chdir after the post-order visit.
* We won't be able to stat anything, but we can still return the
* names themselves. Note, that since fts_read won't be able to
* chdir into the directory, it will have to return different path
* names than before, i.e. "a/b" instead of "b". Since the node
* has already been visited in pre-order, have to wait until the
* post-order visit to return the error. There is a special case
* here, if there was nothing to stat then it's not an error to
* not be able to stat. This is all fairly nasty. If a program
* needed sorted entries or stat information, they had better be
* checking FTS_NS on the returned nodes.
*/
cderrno = 0;
if (nlinks || type == BREAD)
if (FCHDIR(sp, dirfd(dirp))) {
if (nlinks && type == BREAD)
cur->fts_errno = errno;
cur->fts_flags |= FTS_DONTCHDIR;
descend = 0;
cderrno = errno;
} else
descend = 1;
else
descend = 0;
/*
* Figure out the max file name length that can be stored in the
* current path -- the inner loop allocates more path as necessary.
* We really wouldn't have to do the maxlen calculations here, we
* could do them in fts_read before returning the path, but it's a
* lot easier here since the length is part of the dirent structure.
*
* If not changing directories set a pointer so that can just append
* each new name into the path.
*/
maxlen = sp->fts_pathlen - cur->fts_pathlen - 1;
len = NAPPEND(cur);
if (ISSET(FTS_NOCHDIR)) {
cp = sp->fts_path + len;
*cp++ = '/';
}
level = cur->fts_level + 1;
/* Read the directory, attaching each entry to the `link' pointer. */
adjaddr = NULL;
for (head = tail = NULL, nitems = 0; dp = readdir(dirp);) {
if (!ISSET(FTS_SEEDOT) && ISDOT(dp->d_name))
continue;
if ((p = fts_alloc(sp, dp->d_name, (int)dp->d_namlen)) == NULL)
goto mem1;
if (dp->d_namlen > maxlen) {
if (fts_palloc(sp, (size_t)dp->d_namlen)) {
/*
* No more memory for path or structures. Save
* errno, free up the current structure and the
* structures already allocated.
*/
mem1: saved_errno = errno;
if (p)
free(p);
fts_lfree(head);
(void)closedir(dirp);
errno = saved_errno;
cur->fts_info = FTS_ERR;
SET(FTS_STOP);
return (NULL);
}
adjaddr = sp->fts_path;
maxlen = sp->fts_pathlen - sp->fts_cur->fts_pathlen - 1;
}
p->fts_pathlen = len + dp->d_namlen + 1;
p->fts_parent = sp->fts_cur;
p->fts_level = level;
#ifdef FTS_WHITEOUT
if (dp->d_type == DT_WHT)
p->fts_flags |= FTS_ISW;
#endif
if (cderrno) {
if (nlinks) {
p->fts_info = FTS_NS;
p->fts_errno = cderrno;
} else
p->fts_info = FTS_NSOK;
p->fts_accpath = cur->fts_accpath;
} else if (nlinks == 0
#ifdef DT_DIR
|| nlinks > 0 &&
dp->d_type != DT_DIR && dp->d_type != DT_UNKNOWN
#endif
) {
p->fts_accpath =
ISSET(FTS_NOCHDIR) ? p->fts_path : p->fts_name;
p->fts_info = FTS_NSOK;
} else {
/* Build a file name for fts_stat to stat. */
if (ISSET(FTS_NOCHDIR)) {
p->fts_accpath = p->fts_path;
memmove(cp, p->fts_name, p->fts_namelen + 1);
} else
p->fts_accpath = p->fts_name;
/* Stat it. */
p->fts_info = fts_stat(sp, p, 0);
/* Decrement link count if applicable. */
if (nlinks > 0 && (p->fts_info == FTS_D ||
p->fts_info == FTS_DC || p->fts_info == FTS_DOT))
--nlinks;
}
/* We walk in directory order so "ls -f" doesn't get upset. */
p->fts_link = NULL;
if (head == NULL)
head = tail = p;
else {
tail->fts_link = p;
tail = p;
}
++nitems;
}
(void)closedir(dirp);
/*
* If had to realloc the path, adjust the addresses for the rest
* of the tree.
*/
if (adjaddr)
fts_padjust(sp, adjaddr);
/*
* If not changing directories, reset the path back to original
* state.
*/
if (ISSET(FTS_NOCHDIR)) {
if (cp - 1 > sp->fts_path)
--cp;
*cp = '\0';
}
/*
* If descended after called from fts_children or after called from
* fts_read and nothing found, get back. At the root level we use
* the saved fd; if one of fts_open()'s arguments is a relative path
* to an empty directory, we wind up here with no other way back. If
* can't get back, we're done.
*/
if (descend && (type == BCHILD || !nitems) &&
(cur->fts_level == FTS_ROOTLEVEL ?
FCHDIR(sp, sp->fts_rfd) : CHDIR(sp, ".."))) {
cur->fts_info = FTS_ERR;
SET(FTS_STOP);
return (NULL);
}
/* If didn't find anything, return NULL. */
if (!nitems) {
if (type == BREAD)
cur->fts_info = FTS_DP;
return (NULL);
}
/* Sort the entries. */
if (sp->fts_compar && nitems > 1)
head = fts_sort(sp, head, nitems);
return (head);
}
static u_short
fts_stat(sp, p, follow)
FTS *sp;
register FTSENT *p;
int follow;
{
register FTSENT *t;
register dev_t dev;
register ino_t ino;
struct stat *sbp, sb;
int saved_errno;
/* If user needs stat info, stat buffer already allocated. */
sbp = ISSET(FTS_NOSTAT) ? &sb : p->fts_statp;
#ifdef FTS_WHITEOUT
/* check for whiteout */
if (p->fts_flags & FTS_ISW) {
if (sbp != &sb) {
memset(sbp, '\0', sizeof (*sbp));
sbp->st_mode = S_IFWHT;
}
return (FTS_W);
}
#endif
/*
* If doing a logical walk, or application requested FTS_FOLLOW, do
* a stat(2). If that fails, check for a non-existent symlink. If
* fail, set the errno from the stat call.
*/
if (ISSET(FTS_LOGICAL) || follow) {
if (stat(p->fts_accpath, sbp)) {
saved_errno = errno;
if (!lstat(p->fts_accpath, sbp)) {
errno = 0;
return (FTS_SLNONE);
}
p->fts_errno = saved_errno;
goto err;
}
} else if (lstat(p->fts_accpath, sbp)) {
p->fts_errno = errno;
err: memset(sbp, 0, sizeof(struct stat));
return (FTS_NS);
}
if (S_ISDIR(sbp->st_mode)) {
/*
* Set the device/inode. Used to find cycles and check for
* crossing mount points. Also remember the link count, used
* in fts_build to limit the number of stat calls. It is
* understood that these fields are only referenced if fts_info
* is set to FTS_D.
*/
dev = p->fts_dev = sbp->st_dev;
ino = p->fts_ino = sbp->st_ino;
p->fts_nlink = sbp->st_nlink;
if (ISDOT(p->fts_name))
return (FTS_DOT);
/*
* Cycle detection is done by brute force when the directory
* is first encountered. If the tree gets deep enough or the
* number of symbolic links to directories is high enough,
* something faster might be worthwhile.
*/
for (t = p->fts_parent;
t->fts_level >= FTS_ROOTLEVEL; t = t->fts_parent)
if (ino == t->fts_ino && dev == t->fts_dev) {
p->fts_cycle = t;
return (FTS_DC);
}
return (FTS_D);
}
if (S_ISLNK(sbp->st_mode))
return (FTS_SL);
if (S_ISREG(sbp->st_mode))
return (FTS_F);
return (FTS_DEFAULT);
}
static FTSENT *
fts_sort(sp, head, nitems)
FTS *sp;
FTSENT *head;
register int nitems;
{
register FTSENT **ap, *p;
/*
* Construct an array of pointers to the structures and call qsort(3).
* Reassemble the array in the order returned by qsort. If unable to
* sort for memory reasons, return the directory entries in their
* current order. Allocate enough space for the current needs plus
* 40 so don't realloc one entry at a time.
*/
if (nitems > sp->fts_nitems) {
sp->fts_nitems = nitems + 40;
if ((sp->fts_array = realloc(sp->fts_array,
(size_t)(sp->fts_nitems * sizeof(FTSENT *)))) == NULL) {
sp->fts_nitems = 0;
return (head);
}
}
for (ap = sp->fts_array, p = head; p; p = p->fts_link)
*ap++ = p;
qsort((void *)sp->fts_array, nitems, sizeof(FTSENT *), sp->fts_compar);
for (head = *(ap = sp->fts_array); --nitems; ++ap)
ap[0]->fts_link = ap[1];
ap[0]->fts_link = NULL;
return (head);
}
static FTSENT *
fts_alloc(sp, name, namelen)
FTS *sp;
char *name;
register int namelen;
{
register FTSENT *p;
size_t len;
/*
* The file name is a variable length array and no stat structure is
* necessary if the user has set the nostat bit. Allocate the FTSENT
* structure, the file name and the stat structure in one chunk, but
* be careful that the stat structure is reasonably aligned. Since the
* fts_name field is declared to be of size 1, the fts_name pointer is
* namelen + 2 before the first possible address of the stat structure.
*/
len = sizeof(FTSENT) + namelen;
if (!ISSET(FTS_NOSTAT))
len += sizeof(struct stat) + ALIGNBYTES;
if ((p = malloc(len)) == NULL)
return (NULL);
/* Copy the name plus the trailing NULL. */
memmove(p->fts_name, name, namelen + 1);
if (!ISSET(FTS_NOSTAT))
p->fts_statp = (struct stat *)ALIGN(p->fts_name + namelen + 2);
p->fts_namelen = namelen;
p->fts_path = sp->fts_path;
p->fts_errno = 0;
p->fts_flags = 0;
p->fts_instr = FTS_NOINSTR;
p->fts_number = 0;
p->fts_pointer = NULL;
return (p);
}
static void
fts_lfree(head)
register FTSENT *head;
{
register FTSENT *p;
/* Free a linked list of structures. */
while (p = head) {
head = head->fts_link;
free(p);
}
}
/*
* Allow essentially unlimited paths; find, rm, ls should all work on any tree.
* Most systems will allow creation of paths much longer than MAXPATHLEN, even
* though the kernel won't resolve them. Add the size (not just what's needed)
* plus 256 bytes so don't realloc the path 2 bytes at a time.
*/
static int
fts_palloc(sp, more)
FTS *sp;
size_t more;
{
sp->fts_pathlen += more + 256;
sp->fts_path = realloc(sp->fts_path, (size_t)sp->fts_pathlen);
return (sp->fts_path == NULL);
}
/*
* When the path is realloc'd, have to fix all of the pointers in structures
* already returned.
*/
static void
fts_padjust(sp, addr)
FTS *sp;
void *addr;
{
FTSENT *p;
#define ADJUST(p) { \
(p)->fts_accpath = \
(char *)addr + ((p)->fts_accpath - (p)->fts_path); \
(p)->fts_path = addr; \
}
/* Adjust the current set of children. */
for (p = sp->fts_child; p; p = p->fts_link)
ADJUST(p);
/* Adjust the rest of the tree. */
for (p = sp->fts_cur; p->fts_level >= FTS_ROOTLEVEL;) {
ADJUST(p);
p = p->fts_link ? p->fts_link : p->fts_parent;
}
}
static size_t
fts_maxarglen(argv)
char * const *argv;
{
size_t len, max;
for (max = 0; *argv; ++argv)
if ((len = strlen(*argv)) > max)
max = len;
return (max);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1983, 1993, 1994
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -32,8 +32,7 @@
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char orig_sccsid[] = "@(#)opendir.c 8.2 (Berkeley) 2/12/94";
static char sccsid[] = "@(#)libc.opendir.c 8.1 (Berkeley) 2/15/94";
static char sccsid[] = "@(#)opendir.c 8.6 (Berkeley) 8/14/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/param.h>
@ -45,18 +44,27 @@ static char sccsid[] = "@(#)libc.opendir.c 8.1 (Berkeley) 2/15/94";
#include <unistd.h>
/*
* open a directory.
* Open a directory.
*/
DIR *
opendir(name)
const char *name;
{
return (__opendir2(name, DTF_HIDEW|DTF_NODUP));
}
DIR *
__opendir2(name, flags)
const char *name;
int flags;
{
DIR *dirp;
int fd;
int incr;
struct statfs sfb;
int unionstack;
if ((fd = open(name, 0)) == -1)
if ((fd = open(name, O_RDONLY)) == -1)
return (NULL);
if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1 ||
(dirp = (DIR *)malloc(sizeof(DIR))) == NULL) {
@ -68,24 +76,30 @@ opendir(name)
* If CLBYTES is an exact multiple of DIRBLKSIZ, use a CLBYTES
* buffer that it cluster boundary aligned.
* Hopefully this can be a big win someday by allowing page
* trades trade to user space to be done by getdirentries()
* trades to user space to be done by getdirentries()
*/
if ((CLBYTES % DIRBLKSIZ) == 0)
incr = CLBYTES;
else
incr = DIRBLKSIZ;
#ifdef MOUNT_UNION
/*
* Determine whether this directory is the top of a union stack.
*/
if (fstatfs(fd, &sfb) < 0) {
free(dirp);
close(fd);
return (NULL);
if (flags & DTF_NODUP) {
struct statfs sfb;
if (fstatfs(fd, &sfb) < 0) {
free(dirp);
close(fd);
return (NULL);
}
unionstack = (sfb.f_type == MOUNT_UNION);
} else {
unionstack = 0;
}
if (sfb.f_type == MOUNT_UNION) {
if (unionstack) {
int len = 0;
int space = 0;
char *buf = 0;
@ -100,11 +114,6 @@ opendir(name)
* number to zero.
*/
/*
* Fixup dd_loc to be non-zero to fake out readdir
*/
dirp->dd_loc = sizeof(void *);
do {
/*
* Always make at least DIRBLKSIZ bytes
@ -119,7 +128,7 @@ opendir(name)
close(fd);
return (NULL);
}
ddptr = buf + (len - space) + dirp->dd_loc;
ddptr = buf + (len - space);
}
n = getdirentries(fd, ddptr, space, &dirp->dd_seek);
@ -129,6 +138,24 @@ opendir(name)
}
} while (n > 0);
flags |= __DTF_READALL;
/*
* Re-open the directory.
* This has the effect of rewinding back to the
* top of the union stack and is needed by
* programs which plan to fchdir to a descriptor
* which has also been read -- see fts.c.
*/
if (flags & DTF_REWIND) {
(void) close(fd);
if ((fd = open(name, O_RDONLY)) == -1) {
free(buf);
free(dirp);
return (NULL);
}
}
/*
* There is now a buffer full of (possibly) duplicate
* names.
@ -144,7 +171,7 @@ opendir(name)
*/
for (dpv = 0;;) {
n = 0;
ddptr = buf + dirp->dd_loc;
ddptr = buf;
while (ddptr < buf + len) {
struct dirent *dp;
@ -166,10 +193,9 @@ opendir(name)
struct dirent *xp;
/*
* If and when whiteouts happen,
* this sort would need to be stable.
* This sort must be stable.
*/
heapsort(dpv, n, sizeof(*dpv), alphasort);
mergesort(dpv, n, sizeof(*dpv), alphasort);
dpv[n] = NULL;
xp = NULL;
@ -183,9 +209,13 @@ opendir(name)
struct dirent *dp = dpv[n];
if ((xp == NULL) ||
strcmp(dp->d_name, xp->d_name))
strcmp(dp->d_name, xp->d_name)) {
xp = dp;
else
} else {
dp->d_fileno = 0;
}
if (dp->d_type == DT_WHT &&
(flags & DTF_HIDEW))
dp->d_fileno = 0;
}
@ -200,9 +230,7 @@ opendir(name)
dirp->dd_len = len;
dirp->dd_size = ddptr - dirp->dd_buf;
} else
#endif /* MOUNT_UNION */
{
} else {
dirp->dd_len = incr;
dirp->dd_buf = malloc(dirp->dd_len);
if (dirp->dd_buf == NULL) {
@ -211,10 +239,12 @@ opendir(name)
return (NULL);
}
dirp->dd_seek = 0;
dirp->dd_loc = 0;
flags &= ~DTF_REWIND;
}
dirp->dd_loc = 0;
dirp->dd_fd = fd;
dirp->dd_flags = flags;
/*
* Set up seek point for rewinddir.

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)readdir.c 8.3 (Berkeley) 9/29/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/param.h>
#include <dirent.h>
/*
* get next entry in a directory.
*/
struct dirent *
readdir(dirp)
register DIR *dirp;
{
register struct dirent *dp;
for (;;) {
if (dirp->dd_loc >= dirp->dd_size) {
if (dirp->dd_flags & __DTF_READALL)
return (NULL);
dirp->dd_loc = 0;
}
if (dirp->dd_loc == 0 && !(dirp->dd_flags & __DTF_READALL)) {
dirp->dd_size = getdirentries(dirp->dd_fd,
dirp->dd_buf, dirp->dd_len, &dirp->dd_seek);
if (dirp->dd_size <= 0)
return (NULL);
}
dp = (struct dirent *)(dirp->dd_buf + dirp->dd_loc);
if ((int)dp & 03) /* bogus pointer check */
return (NULL);
if (dp->d_reclen <= 0 ||
dp->d_reclen > dirp->dd_len + 1 - dirp->dd_loc)
return (NULL);
dirp->dd_loc += dp->d_reclen;
if (dp->d_ino == 0)
continue;
if (dp->d_type == DT_WHT && (dirp->dd_flags & DTF_HIDEW))
continue;
return (dp);
}
}

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)union.h 8.2 (Berkeley) 2/17/94
* @(#)union.h 8.9 (Berkeley) 12/10/94
*/
struct union_args {
@ -74,10 +74,14 @@ struct union_node {
struct vnode *un_uppervp; /* overlaying object */
struct vnode *un_lowervp; /* underlying object */
struct vnode *un_dirvp; /* Parent dir of uppervp */
struct vnode *un_pvp; /* Parent vnode */
char *un_path; /* saved component name */
int un_hash; /* saved un_path hash value */
int un_openl; /* # of opens on lowervp */
int un_flags;
unsigned int un_flags;
struct vnode **un_dircache; /* cached union stack */
off_t un_uppersz; /* size of upper object */
off_t un_lowersz; /* size of lower object */
#ifdef DIAGNOSTIC
pid_t un_pid;
#endif
@ -87,15 +91,22 @@ struct union_node {
#define UN_LOCKED 0x02
#define UN_ULOCK 0x04 /* Upper node is locked */
#define UN_KLOCK 0x08 /* Keep upper node locked on vput */
#define UN_CACHED 0x10 /* In union cache */
extern int union_allocvp __P((struct vnode **, struct mount *,
struct vnode *, struct vnode *,
struct componentname *, struct vnode *,
struct vnode *));
extern int union_copyfile __P((struct proc *, struct ucred *,
struct vnode *, struct vnode *));
struct vnode *, int));
extern int union_copyfile __P((struct vnode *, struct vnode *,
struct ucred *, struct proc *));
extern int union_copyup __P((struct union_node *, int, struct ucred *,
struct proc *));
extern int union_dowhiteout __P((struct union_node *, struct ucred *,
struct proc *));
extern int union_mkshadow __P((struct union_mount *, struct vnode *,
struct componentname *, struct vnode **));
extern int union_mkwhiteout __P((struct union_mount *, struct vnode *,
struct componentname *, char *));
extern int union_vn_create __P((struct vnode **, struct union_node *,
struct proc *));
extern int union_cn_close __P((struct vnode *, int, struct ucred *,
@ -104,6 +115,7 @@ extern void union_removed_upper __P((struct union_node *un));
extern struct vnode *union_lowervp __P((struct vnode *));
extern void union_newlower __P((struct union_node *, struct vnode *));
extern void union_newupper __P((struct union_node *, struct vnode *));
extern void union_newsize __P((struct vnode *, off_t, off_t));
#define MOUNTTOUNIONMOUNT(mp) ((struct union_mount *)((mp)->mnt_data))
#define VTOUNION(vp) ((struct union_node *)(vp)->v_data)

View File

@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)union_subr.c 8.4 (Berkeley) 2/17/94
* @(#)union_subr.c 8.20 (Berkeley) 5/20/95
*/
#include <sys/param.h>
@ -47,6 +47,9 @@
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/queue.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <vm/vm.h> /* for vnode_pager_setsize */
#include <miscfs/union/union.h>
#ifdef DIAGNOSTIC
@ -110,31 +113,38 @@ union_updatevp(un, uppervp, lowervp)
{
int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
int nhash = UNION_HASH(uppervp, lowervp);
int docache = (lowervp != NULLVP || uppervp != NULLVP);
int lhash, hhash, uhash;
if (ohash != nhash) {
/*
* Ensure locking is ordered from lower to higher
* to avoid deadlocks.
*/
if (nhash < ohash) {
int t = ohash;
ohash = nhash;
nhash = t;
}
while (union_list_lock(ohash))
continue;
while (union_list_lock(nhash))
continue;
LIST_REMOVE(un, un_cache);
union_list_unlock(ohash);
} else {
while (union_list_lock(nhash))
continue;
/*
* Ensure locking is ordered from lower to higher
* to avoid deadlocks.
*/
if (nhash < ohash) {
lhash = nhash;
uhash = ohash;
} else {
lhash = ohash;
uhash = nhash;
}
if (lhash != uhash)
while (union_list_lock(lhash))
continue;
while (union_list_lock(uhash))
continue;
if (ohash != nhash || !docache) {
if (un->un_flags & UN_CACHED) {
un->un_flags &= ~UN_CACHED;
LIST_REMOVE(un, un_cache);
}
}
if (ohash != nhash)
union_list_unlock(ohash);
if (un->un_lowervp != lowervp) {
if (un->un_lowervp) {
vrele(un->un_lowervp);
@ -148,6 +158,7 @@ union_updatevp(un, uppervp, lowervp)
}
}
un->un_lowervp = lowervp;
un->un_lowersz = VNOVAL;
}
if (un->un_uppervp != uppervp) {
@ -155,10 +166,13 @@ union_updatevp(un, uppervp, lowervp)
vrele(un->un_uppervp);
un->un_uppervp = uppervp;
un->un_uppersz = VNOVAL;
}
if (ohash != nhash)
if (docache && (ohash != nhash)) {
LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
un->un_flags |= UN_CACHED;
}
union_list_unlock(nhash);
}
@ -181,6 +195,47 @@ union_newupper(un, uppervp)
union_updatevp(un, uppervp, un->un_lowervp);
}
/*
* Keep track of size changes in the underlying vnodes.
* If the size changes, then callback to the vm layer
* giving priority to the upper layer size.
*/
void
union_newsize(vp, uppersz, lowersz)
struct vnode *vp;
off_t uppersz, lowersz;
{
struct union_node *un;
off_t sz;
/* only interested in regular files */
if (vp->v_type != VREG)
return;
un = VTOUNION(vp);
sz = VNOVAL;
if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
un->un_uppersz = uppersz;
if (sz == VNOVAL)
sz = un->un_uppersz;
}
if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
un->un_lowersz = lowersz;
if (sz == VNOVAL)
sz = un->un_lowersz;
}
if (sz != VNOVAL) {
#ifdef UNION_DIAGNOSTIC
printf("union: %s size now %ld\n",
uppersz != VNOVAL ? "upper" : "lower", (long) sz);
#endif
vnode_pager_setsize(vp, sz);
}
}
/*
* allocate a union_node/vnode pair. the vnode is
* referenced and locked. the new vnode is returned
@ -213,20 +268,23 @@ union_newupper(un, uppervp)
* the vnode free list.
*/
int
union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp, docache)
struct vnode **vpp;
struct mount *mp;
struct vnode *undvp;
struct vnode *undvp; /* parent union vnode */
struct vnode *dvp; /* may be null */
struct componentname *cnp; /* may be null */
struct vnode *uppervp; /* may be null */
struct vnode *lowervp; /* may be null */
int docache;
{
int error;
struct union_node *un;
struct union_node **pp;
struct vnode *xlowervp = NULLVP;
struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
int hash;
int vflag;
int try;
if (uppervp == NULLVP && lowervp == NULLVP)
@ -237,8 +295,22 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
lowervp = NULLVP;
}
/* detect the root vnode (and aliases) */
vflag = 0;
if ((uppervp == um->um_uppervp) &&
((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
if (lowervp == NULLVP) {
lowervp = um->um_lowervp;
if (lowervp != NULLVP)
VREF(lowervp);
}
vflag = VROOT;
}
loop:
for (try = 0; try < 3; try++) {
if (!docache) {
un = 0;
} else for (try = 0; try < 3; try++) {
switch (try) {
case 0:
if (lowervp == NULLVP)
@ -269,7 +341,8 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
(un->un_uppervp == uppervp ||
un->un_uppervp == NULLVP) &&
(UNIONTOV(un)->v_mount == mp)) {
if (vget(UNIONTOV(un), 0)) {
if (vget(UNIONTOV(un), 0,
cnp ? cnp->cn_proc : NULL)) {
union_list_unlock(hash);
goto loop;
}
@ -352,8 +425,7 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
*/
if (lowervp != un->un_lowervp) {
union_newlower(un, lowervp);
if (cnp && (lowervp != NULLVP) &&
(lowervp->v_type == VREG)) {
if (cnp && (lowervp != NULLVP)) {
un->un_hash = cnp->cn_hash;
un->un_path = malloc(cnp->cn_namelen+1,
M_TEMP, M_WAITOK);
@ -370,14 +442,16 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
return (0);
}
/*
* otherwise lock the vp list while we call getnewvnode
* since that can block.
*/
hash = UNION_HASH(uppervp, lowervp);
if (docache) {
/*
* otherwise lock the vp list while we call getnewvnode
* since that can block.
*/
hash = UNION_HASH(uppervp, lowervp);
if (union_list_lock(hash))
goto loop;
if (union_list_lock(hash))
goto loop;
}
error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp);
if (error) {
@ -396,6 +470,7 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
M_TEMP, M_WAITOK);
(*vpp)->v_flag |= vflag;
if (uppervp)
(*vpp)->v_type = uppervp->v_type;
else
@ -403,7 +478,13 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
un = VTOUNION(*vpp);
un->un_vnode = *vpp;
un->un_uppervp = uppervp;
un->un_uppersz = VNOVAL;
un->un_lowervp = lowervp;
un->un_lowersz = VNOVAL;
un->un_pvp = undvp;
if (undvp != NULLVP)
VREF(undvp);
un->un_dircache = 0;
un->un_openl = 0;
un->un_flags = UN_LOCKED;
if (un->un_uppervp)
@ -414,7 +495,7 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
else
un->un_pid = -1;
#endif
if (cnp && (lowervp != NULLVP) && (lowervp->v_type == VREG)) {
if (cnp && (lowervp != NULLVP)) {
un->un_hash = cnp->cn_hash;
un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
@ -427,13 +508,17 @@ union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
un->un_dirvp = 0;
}
LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
if (docache) {
LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
un->un_flags |= UN_CACHED;
}
if (xlowervp)
vrele(xlowervp);
out:
union_list_unlock(hash);
if (docache)
union_list_unlock(hash);
return (error);
}
@ -444,13 +529,18 @@ union_freevp(vp)
{
struct union_node *un = VTOUNION(vp);
LIST_REMOVE(un, un_cache);
if (un->un_flags & UN_CACHED) {
un->un_flags &= ~UN_CACHED;
LIST_REMOVE(un, un_cache);
}
if (un->un_uppervp)
if (un->un_pvp != NULLVP)
vrele(un->un_pvp);
if (un->un_uppervp != NULLVP)
vrele(un->un_uppervp);
if (un->un_lowervp)
if (un->un_lowervp != NULLVP)
vrele(un->un_lowervp);
if (un->un_dirvp)
if (un->un_dirvp != NULLVP)
vrele(un->un_dirvp);
if (un->un_path)
free(un->un_path, M_TEMP);
@ -467,11 +557,11 @@ union_freevp(vp)
* and (tvp) are locked on entry and exit.
*/
int
union_copyfile(p, cred, fvp, tvp)
struct proc *p;
struct ucred *cred;
union_copyfile(fvp, tvp, cred, p)
struct vnode *fvp;
struct vnode *tvp;
struct ucred *cred;
struct proc *p;
{
char *buf;
struct uio uio;
@ -490,12 +580,12 @@ union_copyfile(p, cred, fvp, tvp)
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_offset = 0;
VOP_UNLOCK(fvp); /* XXX */
LEASE_CHECK(fvp, p, cred, LEASE_READ);
VOP_LOCK(fvp); /* XXX */
VOP_UNLOCK(tvp); /* XXX */
LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
VOP_LOCK(tvp); /* XXX */
VOP_UNLOCK(fvp, 0, p); /* XXX */
VOP_LEASE(fvp, p, cred, LEASE_READ);
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
VOP_UNLOCK(tvp, 0, p); /* XXX */
VOP_LEASE(tvp, p, cred, LEASE_WRITE);
vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
@ -534,6 +624,123 @@ union_copyfile(p, cred, fvp, tvp)
return (error);
}
/*
* (un) is assumed to be locked on entry and remains
* locked on exit.
*/
int
union_copyup(un, docopy, cred, p)
struct union_node *un;
int docopy;
struct ucred *cred;
struct proc *p;
{
int error;
struct vnode *lvp, *uvp;
error = union_vn_create(&uvp, un, p);
if (error)
return (error);
/* at this point, uppervp is locked */
union_newupper(un, uvp);
un->un_flags |= UN_ULOCK;
lvp = un->un_lowervp;
if (docopy) {
/*
* XX - should not ignore errors
* from VOP_CLOSE
*/
vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, p);
error = VOP_OPEN(lvp, FREAD, cred, p);
if (error == 0) {
error = union_copyfile(lvp, uvp, cred, p);
VOP_UNLOCK(lvp, 0, p);
(void) VOP_CLOSE(lvp, FREAD, cred, p);
}
#ifdef UNION_DIAGNOSTIC
if (error == 0)
uprintf("union: copied up %s\n", un->un_path);
#endif
}
un->un_flags &= ~UN_ULOCK;
VOP_UNLOCK(uvp, 0, p);
union_vn_close(uvp, FWRITE, cred, p);
vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY, p);
un->un_flags |= UN_ULOCK;
/*
* Subsequent IOs will go to the top layer, so
* call close on the lower vnode and open on the
* upper vnode to ensure that the filesystem keeps
* its references counts right. This doesn't do
* the right thing with (cred) and (FREAD) though.
* Ignoring error returns is not right, either.
*/
if (error == 0) {
int i;
for (i = 0; i < un->un_openl; i++) {
(void) VOP_CLOSE(lvp, FREAD, cred, p);
(void) VOP_OPEN(uvp, FREAD, cred, p);
}
un->un_openl = 0;
}
return (error);
}
static int
union_relookup(um, dvp, vpp, cnp, cn, path, pathlen)
struct union_mount *um;
struct vnode *dvp;
struct vnode **vpp;
struct componentname *cnp;
struct componentname *cn;
char *path;
int pathlen;
{
int error;
/*
* A new componentname structure must be faked up because
* there is no way to know where the upper level cnp came
* from or what it is being used for. This must duplicate
* some of the work done by NDINIT, some of the work done
* by namei, some of the work done by lookup and some of
* the work done by VOP_LOOKUP when given a CREATE flag.
* Conclusion: Horrible.
*
* The pathname buffer will be FREEed by VOP_MKDIR.
*/
cn->cn_namelen = pathlen;
cn->cn_pnbuf = malloc(cn->cn_namelen+1, M_NAMEI, M_WAITOK);
bcopy(path, cn->cn_pnbuf, cn->cn_namelen);
cn->cn_pnbuf[cn->cn_namelen] = '\0';
cn->cn_nameiop = CREATE;
cn->cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
cn->cn_proc = cnp->cn_proc;
if (um->um_op == UNMNT_ABOVE)
cn->cn_cred = cnp->cn_cred;
else
cn->cn_cred = um->um_cred;
cn->cn_nameptr = cn->cn_pnbuf;
cn->cn_hash = cnp->cn_hash;
cn->cn_consume = cnp->cn_consume;
VREF(dvp);
error = relookup(dvp, vpp, cn);
if (!error)
vrele(dvp);
return (error);
}
/*
* Create a shadow directory in the upper layer.
* The new vnode is returned locked.
@ -558,6 +765,19 @@ union_mkshadow(um, dvp, cnp, vpp)
struct proc *p = cnp->cn_proc;
struct componentname cn;
error = union_relookup(um, dvp, vpp, cnp, &cn,
cnp->cn_nameptr, cnp->cn_namelen);
if (error)
return (error);
if (*vpp) {
VOP_ABORTOP(dvp, &cn);
VOP_UNLOCK(dvp, 0, p);
vrele(*vpp);
*vpp = NULLVP;
return (EEXIST);
}
/*
* policy: when creating the shadow directory in the
* upper layer, create it owned by the user who did
@ -566,57 +786,65 @@ union_mkshadow(um, dvp, cnp, vpp)
* mkdir syscall). (jsp, kb)
*/
/*
* A new componentname structure must be faked up because
* there is no way to know where the upper level cnp came
* from or what it is being used for. This must duplicate
* some of the work done by NDINIT, some of the work done
* by namei, some of the work done by lookup and some of
* the work done by VOP_LOOKUP when given a CREATE flag.
* Conclusion: Horrible.
*
* The pathname buffer will be FREEed by VOP_MKDIR.
*/
cn.cn_pnbuf = malloc(cnp->cn_namelen+1, M_NAMEI, M_WAITOK);
bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cnp->cn_namelen);
cn.cn_pnbuf[cnp->cn_namelen] = '\0';
cn.cn_nameiop = CREATE;
cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
cn.cn_proc = cnp->cn_proc;
if (um->um_op == UNMNT_ABOVE)
cn.cn_cred = cnp->cn_cred;
else
cn.cn_cred = um->um_cred;
cn.cn_nameptr = cn.cn_pnbuf;
cn.cn_namelen = cnp->cn_namelen;
cn.cn_hash = cnp->cn_hash;
cn.cn_consume = cnp->cn_consume;
VREF(dvp);
if (error = relookup(dvp, vpp, &cn))
return (error);
vrele(dvp);
if (*vpp) {
VOP_ABORTOP(dvp, &cn);
VOP_UNLOCK(dvp);
vrele(*vpp);
*vpp = NULLVP;
return (EEXIST);
}
VATTR_NULL(&va);
va.va_type = VDIR;
va.va_mode = um->um_cmode;
/* LEASE_CHECK: dvp is locked */
LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE);
/* VOP_LEASE: dvp is locked */
VOP_LEASE(dvp, p, cn.cn_cred, LEASE_WRITE);
error = VOP_MKDIR(dvp, vpp, &cn, &va);
return (error);
}
/*
* Create a whiteout entry in the upper layer.
*
* (um) points to the union mount structure for access to the
* the mounting process's credentials.
* (dvp) is the directory in which to create the whiteout.
* it is locked on entry and exit.
* (cnp) is the componentname to be created.
*/
int
union_mkwhiteout(um, dvp, cnp, path)
struct union_mount *um;
struct vnode *dvp;
struct componentname *cnp;
char *path;
{
int error;
struct vattr va;
struct proc *p = cnp->cn_proc;
struct vnode *wvp;
struct componentname cn;
VOP_UNLOCK(dvp, 0, p);
error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path));
if (error) {
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
return (error);
}
if (wvp) {
VOP_ABORTOP(dvp, &cn);
vrele(dvp);
vrele(wvp);
return (EEXIST);
}
/* VOP_LEASE: dvp is locked */
VOP_LEASE(dvp, p, p->p_ucred, LEASE_WRITE);
error = VOP_WHITEOUT(dvp, &cn, CREATE);
if (error)
VOP_ABORTOP(dvp, &cn);
vrele(dvp);
return (error);
}
/*
* union_vn_create: creates and opens a new shadow file
* on the upper union layer. this function is similar
@ -691,7 +919,7 @@ union_vn_create(vpp, un, p)
VATTR_NULL(vap);
vap->va_type = VREG;
vap->va_mode = cmode;
LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE);
VOP_LEASE(un->un_dirvp, p, cred, LEASE_WRITE);
if (error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap))
return (error);
@ -712,33 +940,147 @@ union_vn_close(vp, fmode, cred, p)
struct ucred *cred;
struct proc *p;
{
if (fmode & FWRITE)
--vp->v_writecount;
return (VOP_CLOSE(vp, fmode));
return (VOP_CLOSE(vp, fmode, cred, p));
}
void
union_removed_upper(un)
struct union_node *un;
{
if (un->un_flags & UN_ULOCK) {
un->un_flags &= ~UN_ULOCK;
VOP_UNLOCK(un->un_uppervp);
}
struct proc *p = curproc; /* XXX */
union_newupper(un, NULLVP);
if (un->un_flags & UN_CACHED) {
un->un_flags &= ~UN_CACHED;
LIST_REMOVE(un, un_cache);
}
if (un->un_flags & UN_ULOCK) {
un->un_flags &= ~UN_ULOCK;
VOP_UNLOCK(un->un_uppervp, 0, p);
}
}
#if 0
struct vnode *
union_lowervp(vp)
struct vnode *vp;
{
struct union_node *un = VTOUNION(vp);
if (un->un_lowervp && (vp->v_type == un->un_lowervp->v_type)) {
if (vget(un->un_lowervp, 0))
return (NULLVP);
if ((un->un_lowervp != NULLVP) &&
(vp->v_type == un->un_lowervp->v_type)) {
if (vget(un->un_lowervp, 0) == 0)
return (un->un_lowervp);
}
return (un->un_lowervp);
return (NULLVP);
}
#endif
/*
* determine whether a whiteout is needed
* during a remove/rmdir operation.
*/
int
union_dowhiteout(un, cred, p)
struct union_node *un;
struct ucred *cred;
struct proc *p;
{
struct vattr va;
if (un->un_lowervp != NULLVP)
return (1);
if (VOP_GETATTR(un->un_uppervp, &va, cred, p) == 0 &&
(va.va_flags & OPAQUE))
return (1);
return (0);
}
static void
union_dircache_r(vp, vppp, cntp)
struct vnode *vp;
struct vnode ***vppp;
int *cntp;
{
struct union_node *un;
if (vp->v_op != union_vnodeop_p) {
if (vppp) {
VREF(vp);
*(*vppp)++ = vp;
if (--(*cntp) == 0)
panic("union: dircache table too small");
} else {
(*cntp)++;
}
return;
}
un = VTOUNION(vp);
if (un->un_uppervp != NULLVP)
union_dircache_r(un->un_uppervp, vppp, cntp);
if (un->un_lowervp != NULLVP)
union_dircache_r(un->un_lowervp, vppp, cntp);
}
struct vnode *
union_dircache(vp, p)
struct vnode *vp;
struct proc *p;
{
int cnt;
struct vnode *nvp;
struct vnode **vpp;
struct vnode **dircache;
struct union_node *un;
int error;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
dircache = VTOUNION(vp)->un_dircache;
nvp = NULLVP;
if (dircache == 0) {
cnt = 0;
union_dircache_r(vp, 0, &cnt);
cnt++;
dircache = (struct vnode **)
malloc(cnt * sizeof(struct vnode *),
M_TEMP, M_WAITOK);
vpp = dircache;
union_dircache_r(vp, &vpp, &cnt);
*vpp = NULLVP;
vpp = dircache + 1;
} else {
vpp = dircache;
do {
if (*vpp++ == VTOUNION(vp)->un_uppervp)
break;
} while (*vpp != NULLVP);
}
if (*vpp == NULLVP)
goto out;
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, p);
VREF(*vpp);
error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0);
if (error)
goto out;
VTOUNION(vp)->un_dircache = 0;
un = VTOUNION(nvp);
un->un_dircache = dircache;
out:
VOP_UNLOCK(vp, 0, p);
return (nvp);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1994 The Regents of the University of California.
* Copyright (c) 1994 Jan-Simon Pendry.
* Copyright (c) 1994, 1995 The Regents of the University of California.
* Copyright (c) 1994, 1995 Jan-Simon Pendry.
* All rights reserved.
*
* This code is derived from software donated to Berkeley by
@ -34,7 +34,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)union_vfsops.c 8.7 (Berkeley) 3/5/94
* @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95
*/
/*
@ -69,7 +69,7 @@ union_mount(mp, path, data, ndp, p)
struct union_args args;
struct vnode *lowerrootvp = NULLVP;
struct vnode *upperrootvp = NULLVP;
struct union_mount *um;
struct union_mount *um = 0;
struct ucred *cred = 0;
struct ucred *scred;
struct vattr va;
@ -94,34 +94,6 @@ union_mount(mp, path, data, ndp, p)
goto bad;
}
/*
* Take a copy of the process's credentials. This isn't
* quite right since the euid will always be zero and we
* want to get the "real" users credentials. So fix up
* the uid field after taking the copy.
*/
cred = crdup(p->p_ucred);
cred->cr_uid = p->p_cred->p_ruid;
/*
* Ensure the *real* user has write permission on the
* mounted-on directory. This allows the mount_union
* command to be made setuid root so allowing anyone
* to do union mounts onto any directory on which they
* have write permission and which they also own.
*/
error = VOP_GETATTR(mp->mnt_vnodecovered, &va, cred, p);
if (error)
goto bad;
if ((va.va_uid != cred->cr_uid) &&
(cred->cr_uid != 0)) {
error = EACCES;
goto bad;
}
error = VOP_ACCESS(mp->mnt_vnodecovered, VWRITE, cred, p);
if (error)
goto bad;
/*
* Get argument
*/
@ -132,18 +104,10 @@ union_mount(mp, path, data, ndp, p)
VREF(lowerrootvp);
/*
* Find upper node. Use the real process credentials,
* not the effective ones since this will have come
* through a setuid process (mount_union). All this
* messing around with permissions is entirely bogus
* and should be removed by allowing any user straight
* past the mount system call.
* Find upper node.
*/
scred = p->p_ucred;
p->p_ucred = cred;
NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT,
UIO_USERSPACE, args.target, p);
p->p_ucred = scred;
if (error = namei(ndp))
goto bad;
@ -193,7 +157,18 @@ union_mount(mp, path, data, ndp, p)
goto bad;
}
um->um_cred = cred;
/*
* Unless the mount is readonly, ensure that the top layer
* supports whiteout operations
*/
if ((mp->mnt_flag & MNT_RDONLY) == 0) {
error = VOP_WHITEOUT(um->um_uppervp, (struct componentname *) 0, LOOKUP);
if (error)
goto bad;
}
um->um_cred = p->p_ucred;
crhold(um->um_cred);
um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask;
/*
@ -221,24 +196,18 @@ union_mount(mp, path, data, ndp, p)
*/
mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY);
/*
* This is a user mount. Privilege check for unmount
* will be done in union_unmount.
*/
mp->mnt_flag |= MNT_USER;
mp->mnt_data = (qaddr_t) um;
getnewfsid(mp, MOUNT_UNION);
vfs_getnewfsid(mp);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
switch (um->um_op) {
case UNMNT_ABOVE:
cp = "<above>";
cp = "<above>:";
break;
case UNMNT_BELOW:
cp = "<below>";
cp = "<below>:";
break;
case UNMNT_REPLACE:
cp = "";
@ -260,6 +229,8 @@ union_mount(mp, path, data, ndp, p)
return (0);
bad:
if (um)
free(um, M_UFSMNT);
if (cred)
crfree(cred);
if (upperrootvp)
@ -296,38 +267,54 @@ union_unmount(mp, mntflags, p)
struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
struct vnode *um_rootvp;
int error;
int freeing;
int flags = 0;
extern int doforce;
#ifdef UNION_DIAGNOSTIC
printf("union_unmount(mp = %x)\n", mp);
#endif
/* only the mounter, or superuser can unmount */
if ((p->p_cred->p_ruid != um->um_cred->cr_uid) &&
(error = suser(p->p_ucred, &p->p_acflag)))
return (error);
if (mntflags & MNT_FORCE) {
/* union can never be rootfs so don't check for it */
if (!doforce)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
if (error = union_root(mp, &um_rootvp))
return (error);
/*
* Keep flushing vnodes from the mount list.
* This is needed because of the un_pvp held
* reference to the parent vnode.
* If more vnodes have been freed on a given pass,
* the try again. The loop will iterate at most
* (d) times, where (d) is the maximum tree depth
* in the filesystem.
*/
for (freeing = 0; vflush(mp, um_rootvp, flags) != 0;) {
struct vnode *vp;
int n;
/* count #vnodes held on mount list */
for (n = 0, vp = mp->mnt_vnodelist.lh_first;
vp != NULLVP;
vp = vp->v_mntvnodes.le_next)
n++;
/* if this is unchanged then stop */
if (n == freeing)
break;
/* otherwise try once more time */
freeing = n;
}
/* At this point the root vnode should have a single reference */
if (um_rootvp->v_usecount > 1) {
vput(um_rootvp);
return (EBUSY);
}
if (error = vflush(mp, um_rootvp, flags)) {
vput(um_rootvp);
return (error);
}
#ifdef UNION_DIAGNOSTIC
vprint("alias root of lower", um_rootvp);
vprint("union root", um_rootvp);
#endif
/*
* Discard references to upper and lower target vnodes.
@ -357,16 +344,11 @@ union_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
int error;
int loselock;
#ifdef UNION_DIAGNOSTIC
printf("union_root(mp = %x, lvp = %x, uvp = %x)\n", mp,
um->um_lowervp,
um->um_uppervp);
#endif
/*
* Return locked reference to root.
*/
@ -375,7 +357,7 @@ union_root(mp, vpp)
VOP_ISLOCKED(um->um_uppervp)) {
loselock = 1;
} else {
VOP_LOCK(um->um_uppervp);
vn_lock(um->um_uppervp, LK_EXCLUSIVE | LK_RETRY, p);
loselock = 0;
}
if (um->um_lowervp)
@ -385,16 +367,17 @@ union_root(mp, vpp)
(struct vnode *) 0,
(struct componentname *) 0,
um->um_uppervp,
um->um_lowervp);
um->um_lowervp,
1);
if (error) {
if (!loselock)
VOP_UNLOCK(um->um_uppervp);
vrele(um->um_uppervp);
if (loselock)
vrele(um->um_uppervp);
else
vput(um->um_uppervp);
if (um->um_lowervp)
vrele(um->um_lowervp);
} else {
(*vpp)->v_flag |= VROOT;
if (loselock)
VTOUNION(*vpp)->un_flags &= ~UN_ULOCK;
}
@ -402,18 +385,6 @@ union_root(mp, vpp)
return (error);
}
int
union_quotactl(mp, cmd, uid, arg, p)
struct mount *mp;
int cmd;
uid_t uid;
caddr_t arg;
struct proc *p;
{
return (EOPNOTSUPP);
}
int
union_statfs(mp, sbp, p)
struct mount *mp;
@ -457,7 +428,6 @@ union_statfs(mp, sbp, p)
if (error)
return (error);
sbp->f_type = MOUNT_UNION;
sbp->f_flags = mstat.f_flags;
sbp->f_bsize = mstat.f_bsize;
sbp->f_iosize = mstat.f_iosize;
@ -468,18 +438,23 @@ union_statfs(mp, sbp, p)
* kind of sense. none of this makes sense though.
*/
if (mstat.f_bsize != lbsize) {
if (mstat.f_bsize != lbsize)
sbp->f_blocks = sbp->f_blocks * lbsize / mstat.f_bsize;
sbp->f_bfree = sbp->f_bfree * lbsize / mstat.f_bsize;
sbp->f_bavail = sbp->f_bavail * lbsize / mstat.f_bsize;
}
/*
* The "total" fields count total resources in all layers,
* the "free" fields count only those resources which are
* free in the upper layer (since only the upper layer
* is writeable).
*/
sbp->f_blocks += mstat.f_blocks;
sbp->f_bfree += mstat.f_bfree;
sbp->f_bavail += mstat.f_bavail;
sbp->f_bfree = mstat.f_bfree;
sbp->f_bavail = mstat.f_bavail;
sbp->f_files += mstat.f_files;
sbp->f_ffree += mstat.f_ffree;
sbp->f_ffree = mstat.f_ffree;
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
@ -487,53 +462,22 @@ union_statfs(mp, sbp, p)
return (0);
}
int
union_sync(mp, waitfor, cred, p)
struct mount *mp;
int waitfor;
struct ucred *cred;
struct proc *p;
{
/*
* XXX - Assumes no data cached at union layer.
*/
#define union_sync ((int (*) __P((struct mount *, int, struct ucred *, \
struct proc *)))nullop)
/*
* XXX - Assumes no data cached at union layer.
*/
return (0);
}
int
union_vget(mp, ino, vpp)
struct mount *mp;
ino_t ino;
struct vnode **vpp;
{
return (EOPNOTSUPP);
}
int
union_fhtovp(mp, fidp, nam, vpp, exflagsp, credanonp)
struct mount *mp;
struct fid *fidp;
struct mbuf *nam;
struct vnode **vpp;
int *exflagsp;
struct ucred **credanonp;
{
return (EOPNOTSUPP);
}
int
union_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
return (EOPNOTSUPP);
}
int union_init __P((void));
#define union_fhtovp ((int (*) __P((struct mount *, struct fid *, \
struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp)
int union_init __P((struct vfsconf *));
#define union_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \
struct proc *)))eopnotsupp)
#define union_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
#define union_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \
eopnotsupp)
#define union_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp)
struct vfsops union_vfsops = {
union_mount,
@ -547,4 +491,5 @@ struct vfsops union_vfsops = {
union_fhtovp,
union_vptofh,
union_init,
union_sysctl,
};

File diff suppressed because it is too large Load Diff