1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
/* For 4.3 integer FS ID compatibility */
|
|
|
|
#include "opt_compat.h"
|
2000-07-03 13:26:54 +00:00
|
|
|
#include "opt_ffs.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-02-25 15:54:06 +00:00
|
|
|
#include <sys/buf.h>
|
1997-02-10 02:22:35 +00:00
|
|
|
#include <sys/sysent.h>
|
1999-09-11 00:46:08 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mount.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1995-11-12 06:43:28 +00:00
|
|
|
#include <sys/sysproto.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/kernel.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
1998-11-03 14:29:09 +00:00
|
|
|
#include <sys/linker.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/stat.h>
|
1996-09-03 14:25:27 +00:00
|
|
|
#include <sys/unistd.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/dirent.h>
|
1999-12-19 06:08:07 +00:00
|
|
|
#include <sys/extattr.h>
|
2001-02-21 06:39:57 +00:00
|
|
|
#include <sys/jail.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-12-19 06:08:07 +00:00
|
|
|
#include <machine/limits.h>
|
1995-03-16 18:17:34 +00:00
|
|
|
#include <miscfs/union/union.h>
|
1999-12-19 06:08:07 +00:00
|
|
|
#include <sys/sysctl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_object.h>
|
1997-12-27 02:56:39 +00:00
|
|
|
#include <vm/vm_zone.h>
|
2000-11-18 21:01:04 +00:00
|
|
|
#include <vm/vm_page.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
static int change_dir __P((struct nameidata *ndp, struct proc *p));
|
2001-02-28 20:54:28 +00:00
|
|
|
static void checkdirs __P((struct vnode *olddp, struct vnode *newdp));
|
1999-03-23 14:26:40 +00:00
|
|
|
static int chroot_refuse_vdir_fds __P((struct filedesc *fdp));
|
1999-08-22 01:46:57 +00:00
|
|
|
static int getutimes __P((const struct timeval *, struct timespec *));
|
1998-05-11 03:55:28 +00:00
|
|
|
static int setfown __P((struct proc *, struct vnode *, uid_t, gid_t));
|
|
|
|
static int setfmode __P((struct proc *, struct vnode *, int));
|
|
|
|
static int setfflags __P((struct proc *, struct vnode *, int));
|
1999-08-22 01:46:57 +00:00
|
|
|
static int setutimes __P((struct proc *, struct vnode *,
|
|
|
|
const struct timespec *, int));
|
1997-10-23 09:29:09 +00:00
|
|
|
static int usermount = 0; /* if 1, non-root can mount fs. */
|
|
|
|
|
1998-11-03 08:01:48 +00:00
|
|
|
int (*union_dircheckp) __P((struct proc *, struct vnode **, struct file *));
|
|
|
|
|
1997-10-23 09:29:09 +00:00
|
|
|
SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Virtual File System System Calls
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mount a file system.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount_args {
|
1997-02-10 02:22:35 +00:00
|
|
|
char *type;
|
1994-05-24 10:09:53 +00:00
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
caddr_t data;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
mount(p, uap)
|
2001-02-16 14:31:49 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct mount_args /* {
|
1997-02-10 02:22:35 +00:00
|
|
|
syscallarg(char *) type;
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(caddr_t) data;
|
|
|
|
} */ *uap;
|
2001-03-01 21:00:17 +00:00
|
|
|
{
|
|
|
|
char *fstype;
|
|
|
|
char *fspath;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK | M_ZERO);
|
|
|
|
fspath = malloc(MNAMELEN, M_TEMP, M_WAITOK | M_ZERO);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vfs_mount() actually takes a kernel string for `type' and
|
|
|
|
* `path' now, so extract them.
|
|
|
|
*/
|
|
|
|
error = copyinstr(SCARG(uap, type), fstype, MFSNAMELEN, NULL);
|
|
|
|
if (error)
|
|
|
|
goto finish;
|
|
|
|
error = copyinstr(SCARG(uap, path), fspath, MNAMELEN, NULL);
|
|
|
|
if (error)
|
|
|
|
goto finish;
|
|
|
|
error = vfs_mount(p, fstype, fspath, SCARG(uap, flags),
|
|
|
|
SCARG(uap, data));
|
|
|
|
finish:
|
|
|
|
free(fstype, M_TEMP);
|
|
|
|
free(fspath, M_TEMP);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vfs_mount(): actually attempt a filesystem mount.
|
|
|
|
*
|
|
|
|
* This routine is designed to be a "generic" entry point for routines
|
|
|
|
* that wish to mount a filesystem. All parameters except `fsdata' are
|
|
|
|
* pointers into kernel space. `fsdata' is currently still a pointer
|
|
|
|
* into userspace.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vfs_mount(p, fstype, fspath, fsflags, fsdata)
|
|
|
|
struct proc *p;
|
|
|
|
char *fstype;
|
|
|
|
char *fspath;
|
|
|
|
int fsflags;
|
|
|
|
void *fsdata;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vfsconf *vfsp;
|
1997-11-12 05:42:33 +00:00
|
|
|
int error, flag = 0, flag2 = 0;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vattr va;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
2001-03-01 21:00:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Be ultra-paranoid about making sure the type and fspath
|
|
|
|
* variables will fit in our mp buffers, including the
|
|
|
|
* terminating NUL.
|
|
|
|
*/
|
2001-03-02 14:05:49 +00:00
|
|
|
if ((strlen(fstype) >= MFSNAMELEN - 1) ||
|
|
|
|
(strlen(fspath) >= MNAMELEN - 1))
|
2001-03-01 21:00:17 +00:00
|
|
|
return (ENAMETOOLONG);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-04-27 11:18:52 +00:00
|
|
|
if (usermount == 0 && (error = suser(p)))
|
1997-10-23 09:29:09 +00:00
|
|
|
return (error);
|
1999-01-30 12:27:00 +00:00
|
|
|
/*
|
|
|
|
* Do not allow NFS export by non-root users.
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
if (fsflags & MNT_EXPORTED) {
|
1999-04-27 11:18:52 +00:00
|
|
|
error = suser(p);
|
1999-01-30 12:27:00 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
|
|
|
|
*/
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
if (suser_xxx(p->p_ucred, 0, 0))
|
2001-03-01 21:00:17 +00:00
|
|
|
fsflags |= MNT_NOSUID | MNT_NODEV;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Get vnode to be covered
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
2001-03-01 21:00:17 +00:00
|
|
|
if (fsflags & MNT_UPDATE) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((vp->v_flag & VROOT) == 0) {
|
|
|
|
vput(vp);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
mp = vp->v_mount;
|
|
|
|
flag = mp->mnt_flag;
|
1997-11-12 05:42:33 +00:00
|
|
|
flag2 = mp->mnt_kern_flag;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* We only allow the filesystem to be reloaded if it
|
|
|
|
* is currently mounted read-only.
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
if ((fsflags & MNT_RELOAD) &&
|
1994-05-24 10:09:53 +00:00
|
|
|
((mp->mnt_flag & MNT_RDONLY) == 0)) {
|
|
|
|
vput(vp);
|
|
|
|
return (EOPNOTSUPP); /* Needs translation */
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Only root, or the user that did the original mount is
|
|
|
|
* permitted to update it.
|
|
|
|
*/
|
|
|
|
if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
|
1999-04-27 11:18:52 +00:00
|
|
|
(error = suser(p))) {
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if (vfs_busy(mp, LK_NOWAIT, 0, p)) {
|
|
|
|
vput(vp);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
if ((vp->v_flag & VMOUNT) != 0 ||
|
|
|
|
vp->v_mountedhere != NULL) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
vfs_unbusy(mp, p);
|
|
|
|
vput(vp);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
vp->v_flag |= VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
2001-03-01 21:00:17 +00:00
|
|
|
mp->mnt_flag |= fsflags &
|
2000-08-09 01:57:11 +00:00
|
|
|
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto update;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* If the user is not root, ensure that they own the directory
|
|
|
|
* onto which we are attempting to mount.
|
|
|
|
*/
|
|
|
|
if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) ||
|
|
|
|
(va.va_uid != p->p_ucred->cr_uid &&
|
1999-04-27 11:18:52 +00:00
|
|
|
(error = suser(p)))) {
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
2000-08-09 01:57:11 +00:00
|
|
|
if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, p, 0, 0)) != 0) {
|
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-08-09 01:57:11 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type != VDIR) {
|
|
|
|
vput(vp);
|
|
|
|
return (ENOTDIR);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
|
2001-03-01 21:00:17 +00:00
|
|
|
if (!strcmp(vfsp->vfc_name, fstype))
|
1997-02-10 02:22:35 +00:00
|
|
|
break;
|
|
|
|
if (vfsp == NULL) {
|
1998-11-03 14:29:09 +00:00
|
|
|
linker_file_t lf;
|
|
|
|
|
|
|
|
/* Only load modules for root (very important!) */
|
1999-04-27 11:18:52 +00:00
|
|
|
if ((error = suser(p)) != 0) {
|
1998-11-03 14:29:09 +00:00
|
|
|
vput(vp);
|
|
|
|
return error;
|
|
|
|
}
|
2001-03-01 21:00:17 +00:00
|
|
|
error = linker_load_file(fstype, &lf);
|
1998-11-03 14:29:09 +00:00
|
|
|
if (error || lf == NULL) {
|
|
|
|
vput(vp);
|
|
|
|
if (lf == NULL)
|
|
|
|
error = ENODEV;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
lf->userrefs++;
|
|
|
|
/* lookup again, see if the VFS was loaded */
|
|
|
|
for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
|
2001-03-01 21:00:17 +00:00
|
|
|
if (!strcmp(vfsp->vfc_name, fstype))
|
1998-11-03 14:29:09 +00:00
|
|
|
break;
|
|
|
|
if (vfsp == NULL) {
|
|
|
|
lf->userrefs--;
|
|
|
|
linker_file_unload(lf);
|
|
|
|
vput(vp);
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
1998-09-10 02:27:52 +00:00
|
|
|
if ((vp->v_flag & VMOUNT) != 0 ||
|
|
|
|
vp->v_mountedhere != NULL) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
1998-09-10 02:27:52 +00:00
|
|
|
vp->v_flag |= VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
1997-02-10 02:22:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and initialize the filesystem.
|
|
|
|
*/
|
2000-12-08 21:51:06 +00:00
|
|
|
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
|
1997-02-10 02:22:35 +00:00
|
|
|
(void)vfs_busy(mp, LK_NOWAIT, 0, p);
|
|
|
|
mp->mnt_op = vfsp->vfc_vfsops;
|
|
|
|
mp->mnt_vfc = vfsp;
|
|
|
|
vfsp->vfc_refcount++;
|
|
|
|
mp->mnt_stat.f_type = vfsp->vfc_typenum;
|
|
|
|
mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
|
2001-03-01 21:00:17 +00:00
|
|
|
strncpy(mp->mnt_stat.f_fstypename, fstype, MFSNAMELEN);
|
|
|
|
mp->mnt_stat.f_fstypename[MFSNAMELEN - 1] = '\0';
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_vnodecovered = vp;
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
|
2001-03-01 21:00:17 +00:00
|
|
|
strncpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
|
|
|
|
mp->mnt_stat.f_mntonname[MNAMELEN - 1] = '\0';
|
1999-09-29 20:05:33 +00:00
|
|
|
mp->mnt_iosize_max = DFLTPHYS;
|
1998-09-10 02:27:52 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
update:
|
|
|
|
/*
|
|
|
|
* Set the mount level flags.
|
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
if (fsflags & MNT_RDONLY)
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag |= MNT_RDONLY;
|
|
|
|
else if (mp->mnt_flag & MNT_RDONLY)
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_WANTRDWR;
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
|
1997-09-27 13:40:20 +00:00
|
|
|
MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
|
1999-11-01 04:57:43 +00:00
|
|
|
MNT_NOSYMFOLLOW | MNT_IGNORE |
|
1997-11-13 00:28:51 +00:00
|
|
|
MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
|
2001-03-01 21:00:17 +00:00
|
|
|
mp->mnt_flag |= fsflags & (MNT_NOSUID | MNT_NOEXEC |
|
1997-02-10 02:22:35 +00:00
|
|
|
MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
|
1999-11-01 04:57:43 +00:00
|
|
|
MNT_NOSYMFOLLOW | MNT_IGNORE |
|
1997-11-13 00:28:51 +00:00
|
|
|
MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Mount the filesystem.
|
1999-12-15 23:02:35 +00:00
|
|
|
* XXX The final recipients of VFS_MOUNT just overwrite the ndp they
|
|
|
|
* get. No freeing of cn_pnbuf.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-03-01 21:00:17 +00:00
|
|
|
error = VFS_MOUNT(mp, fspath, fsdata, &nd, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
1997-11-12 05:42:33 +00:00
|
|
|
if (mp->mnt_kern_flag & MNTK_WANTRDWR)
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag &= ~MNT_RDONLY;
|
2000-07-11 22:07:57 +00:00
|
|
|
mp->mnt_flag &=~
|
|
|
|
(MNT_UPDATE | MNT_RELOAD | MNT_FORCE | MNT_SNAPSHOT);
|
1997-11-22 06:10:36 +00:00
|
|
|
mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag = flag;
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag = flag2;
|
1997-11-22 06:10:36 +00:00
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0) {
|
|
|
|
if (mp->mnt_syncer == NULL)
|
|
|
|
error = vfs_allocate_syncvnode(mp);
|
|
|
|
} else {
|
|
|
|
if (mp->mnt_syncer != NULL)
|
|
|
|
vrele(mp->mnt_syncer);
|
|
|
|
mp->mnt_syncer = NULL;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
vfs_unbusy(mp, p);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
vp->v_flag &= ~VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
2000-08-09 01:57:11 +00:00
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1998-09-10 02:27:52 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Put the new filesystem on the mount list after root.
|
|
|
|
*/
|
|
|
|
cache_purge(vp);
|
|
|
|
if (!error) {
|
2001-02-28 20:54:28 +00:00
|
|
|
struct vnode *newdp;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
1998-09-10 02:27:52 +00:00
|
|
|
vp->v_flag &= ~VMOUNT;
|
|
|
|
vp->v_mountedhere = mp;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mountlist_mtx);
|
2001-02-28 20:54:28 +00:00
|
|
|
if (VFS_ROOT(mp, &newdp))
|
|
|
|
panic("mount: lost mount");
|
|
|
|
checkdirs(vp, newdp);
|
|
|
|
vput(newdp);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
|
|
|
error = vfs_allocate_syncvnode(mp);
|
1997-02-10 02:22:35 +00:00
|
|
|
vfs_unbusy(mp, p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = VFS_START(mp, 0, p)) != 0)
|
1995-08-11 11:31:18 +00:00
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
1998-09-10 02:27:52 +00:00
|
|
|
vp->v_flag &= ~VMOUNT;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&vp->v_interlock);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_vfc->vfc_refcount--;
|
|
|
|
vfs_unbusy(mp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
free((caddr_t)mp, M_MOUNT);
|
|
|
|
vput(vp);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Scan all active processes to see if any of them have a current
|
2001-02-28 20:54:28 +00:00
|
|
|
* or root directory of `olddp'. If so, replace them with the new
|
|
|
|
* mount point.
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
|
|
|
static void
|
2001-02-28 20:54:28 +00:00
|
|
|
checkdirs(olddp, newdp)
|
|
|
|
struct vnode *olddp, *newdp;
|
1997-02-10 02:22:35 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
if (olddp->v_usecount == 1)
|
|
|
|
return;
|
2000-12-13 00:17:05 +00:00
|
|
|
ALLPROC_LOCK(AP_SHARED);
|
1999-11-16 16:28:58 +00:00
|
|
|
LIST_FOREACH(p, &allproc, p_list) {
|
1997-02-10 02:22:35 +00:00
|
|
|
fdp = p->p_fd;
|
2001-03-07 02:25:13 +00:00
|
|
|
if (fdp == NULL)
|
|
|
|
continue;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fdp->fd_cdir == olddp) {
|
|
|
|
vrele(fdp->fd_cdir);
|
|
|
|
VREF(newdp);
|
|
|
|
fdp->fd_cdir = newdp;
|
|
|
|
}
|
|
|
|
if (fdp->fd_rdir == olddp) {
|
|
|
|
vrele(fdp->fd_rdir);
|
|
|
|
VREF(newdp);
|
|
|
|
fdp->fd_rdir = newdp;
|
|
|
|
}
|
|
|
|
}
|
2000-12-13 00:17:05 +00:00
|
|
|
ALLPROC_LOCK(AP_RELEASE);
|
1997-02-10 02:22:35 +00:00
|
|
|
if (rootvnode == olddp) {
|
|
|
|
vrele(rootvnode);
|
|
|
|
VREF(newdp);
|
|
|
|
rootvnode = newdp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Unmount a file system.
|
|
|
|
*
|
|
|
|
* Note: unmount takes a path to the vnode mounted on as argument,
|
|
|
|
* not special file (as before).
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct unmount_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
unmount(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct unmount_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct vnode *vp;
|
|
|
|
struct mount *mp;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp = vp->v_mount;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Only root, or the user that did the original mount is
|
|
|
|
* permitted to unmount this filesystem.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
|
1999-04-27 11:18:52 +00:00
|
|
|
(error = suser(p))) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Don't allow unmounting the root file system.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if (mp->mnt_flag & MNT_ROOTFS) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
1994-08-20 16:03:26 +00:00
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Must be the root of the filesystem
|
1994-08-20 16:03:26 +00:00
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((vp->v_flag & VROOT) == 0) {
|
|
|
|
vput(vp);
|
1994-08-20 16:03:26 +00:00
|
|
|
return (EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
|
|
|
vput(vp);
|
|
|
|
return (dounmount(mp, SCARG(uap, flags), p));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the actual file system unmount.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
dounmount(mp, flags, p)
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int flags;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
2001-02-28 20:54:28 +00:00
|
|
|
struct vnode *coveredvp, *fsrootvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
1998-07-03 03:47:24 +00:00
|
|
|
int async_flag;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_UNMOUNT;
|
2000-10-04 01:29:17 +00:00
|
|
|
lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
1997-07-17 07:17:33 +00:00
|
|
|
|
|
|
|
if (mp->mnt_flag & MNT_EXPUBLIC)
|
|
|
|
vfs_setpublicfs(NULL, NULL, NULL);
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
vfs_msync(mp, MNT_WAIT);
|
1998-07-03 03:47:24 +00:00
|
|
|
async_flag = mp->mnt_flag & MNT_ASYNC;
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_flag &=~ MNT_ASYNC;
|
|
|
|
cache_purgevfs(mp); /* remove cache entries for this file sys */
|
1998-03-08 09:59:44 +00:00
|
|
|
if (mp->mnt_syncer != NULL)
|
|
|
|
vrele(mp->mnt_syncer);
|
2001-02-28 20:54:28 +00:00
|
|
|
/* Move process cdir/rdir refs on fs root to underlying vnode. */
|
|
|
|
if (VFS_ROOT(mp, &fsrootvp) == 0) {
|
|
|
|
if (mp->mnt_vnodecovered != NULL)
|
|
|
|
checkdirs(fsrootvp, mp->mnt_vnodecovered);
|
|
|
|
if (fsrootvp == rootvnode) {
|
|
|
|
vrele(rootvnode);
|
|
|
|
rootvnode = NULL;
|
|
|
|
}
|
|
|
|
vput(fsrootvp);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (((mp->mnt_flag & MNT_RDONLY) ||
|
|
|
|
(error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) ||
|
2000-07-11 22:07:57 +00:00
|
|
|
(flags & MNT_FORCE)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VFS_UNMOUNT(mp, flags, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
vn_finished_write(mp);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error) {
|
2001-02-28 20:54:28 +00:00
|
|
|
/* Undo cdir/rdir and rootvnode changes made above. */
|
|
|
|
if (VFS_ROOT(mp, &fsrootvp) == 0) {
|
|
|
|
if (mp->mnt_vnodecovered != NULL)
|
|
|
|
checkdirs(mp->mnt_vnodecovered, fsrootvp);
|
|
|
|
if (rootvnode == NULL) {
|
|
|
|
rootvnode = fsrootvp;
|
|
|
|
vref(rootvnode);
|
|
|
|
}
|
|
|
|
vput(fsrootvp);
|
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
|
|
|
|
(void) vfs_allocate_syncvnode(mp);
|
1997-11-12 05:42:33 +00:00
|
|
|
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
|
1998-07-03 03:47:24 +00:00
|
|
|
mp->mnt_flag |= async_flag;
|
1997-02-10 02:22:35 +00:00
|
|
|
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
|
2000-10-04 01:29:17 +00:00
|
|
|
&mountlist_mtx, p);
|
1998-12-12 21:07:09 +00:00
|
|
|
if (mp->mnt_kern_flag & MNTK_MWAIT)
|
|
|
|
wakeup((caddr_t)mp);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1999-11-20 10:00:46 +00:00
|
|
|
TAILQ_REMOVE(&mountlist, mp, mnt_list);
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
|
|
|
|
coveredvp->v_mountedhere = (struct mount *)0;
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(coveredvp);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_vfc->vfc_refcount--;
|
1999-11-16 16:28:58 +00:00
|
|
|
if (!LIST_EMPTY(&mp->mnt_vnodelist))
|
1997-02-10 02:22:35 +00:00
|
|
|
panic("unmount: dangling vnode");
|
2000-10-04 01:29:17 +00:00
|
|
|
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
|
|
|
|
lockdestroy(&mp->mnt_lock);
|
1997-11-12 05:42:33 +00:00
|
|
|
if (mp->mnt_kern_flag & MNTK_MWAIT)
|
1997-02-10 02:22:35 +00:00
|
|
|
wakeup((caddr_t)mp);
|
|
|
|
free((caddr_t)mp, M_MOUNT);
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sync each mounted filesystem.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-10-08 00:06:22 +00:00
|
|
|
struct sync_args {
|
|
|
|
int dummy;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1995-10-08 00:06:22 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
#ifdef DEBUG
|
1997-11-22 06:41:21 +00:00
|
|
|
static int syncprt = 0;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
|
1997-02-10 02:22:35 +00:00
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
sync(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct sync_args *uap;
|
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp, *nmp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int asyncflag;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
|
2000-10-04 01:29:17 +00:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1997-02-10 02:22:35 +00:00
|
|
|
continue;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0 &&
|
|
|
|
vn_start_write(NULL, &mp, V_NOWAIT) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
asyncflag = mp->mnt_flag & MNT_ASYNC;
|
|
|
|
mp->mnt_flag &= ~MNT_ASYNC;
|
1995-05-21 21:39:31 +00:00
|
|
|
vfs_msync(mp, MNT_NOWAIT);
|
1998-03-08 09:59:44 +00:00
|
|
|
VFS_SYNC(mp, MNT_NOWAIT,
|
2000-07-11 22:07:57 +00:00
|
|
|
((p != NULL) ? p->p_ucred : NOCRED), p);
|
1998-03-08 09:59:44 +00:00
|
|
|
mp->mnt_flag |= asyncflag;
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1997-02-10 02:22:35 +00:00
|
|
|
vfs_unbusy(mp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mountlist_mtx);
|
1997-02-12 06:46:11 +00:00
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* XXX don't call vfs_bufstats() yet because that routine
|
|
|
|
* was not imported in the Lite2 merge.
|
|
|
|
*/
|
1997-02-10 02:22:35 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (syncprt)
|
|
|
|
vfs_bufstats();
|
|
|
|
#endif /* DIAGNOSTIC */
|
1997-02-12 06:46:11 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
/* XXX PRISON: could be per prison flag */
|
|
|
|
static int prison_quotas;
|
|
|
|
#if 0
|
|
|
|
SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Change filesystem quotas.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct quotactl_args {
|
|
|
|
char *path;
|
|
|
|
int cmd;
|
|
|
|
int uid;
|
|
|
|
caddr_t arg;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
quotactl(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct quotactl_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) cmd;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(caddr_t) arg;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2001-02-21 06:39:57 +00:00
|
|
|
if (jailed(p->p_ucred) && !prison_quotas)
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
return (EPERM);
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2000-07-11 22:07:57 +00:00
|
|
|
error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
|
|
|
|
SCARG(uap, arg), p);
|
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get filesystem statistics.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct statfs_args {
|
|
|
|
char *path;
|
|
|
|
struct statfs *buf;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
statfs(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct statfs_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct statfs *) buf;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct mount *mp;
|
|
|
|
register struct statfs *sp;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
1997-03-23 20:08:22 +00:00
|
|
|
struct statfs sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
mp = nd.ni_vp->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
1994-10-02 17:35:40 +00:00
|
|
|
error = VFS_STATFS(mp, sp, p);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
if (suser_xxx(p->p_ucred, 0, 0)) {
|
1997-03-23 20:08:22 +00:00
|
|
|
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
|
|
|
|
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
|
|
|
|
sp = &sb;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get filesystem statistics.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fstatfs_args {
|
|
|
|
int fd;
|
|
|
|
struct statfs *buf;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
fstatfs(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fstatfs_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(struct statfs *) buf;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct mount *mp;
|
|
|
|
register struct statfs *sp;
|
|
|
|
int error;
|
1997-03-23 20:08:22 +00:00
|
|
|
struct statfs sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
mp = ((struct vnode *)fp->f_data)->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = VFS_STATFS(mp, sp, p);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
if (suser_xxx(p->p_ucred, 0, 0)) {
|
1997-03-23 20:08:22 +00:00
|
|
|
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
|
|
|
|
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
|
|
|
|
sp = &sb;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get statistics on all filesystems.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getfsstat_args {
|
|
|
|
struct statfs *buf;
|
|
|
|
long bufsize;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
getfsstat(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct getfsstat_args /* {
|
|
|
|
syscallarg(struct statfs *) buf;
|
|
|
|
syscallarg(long) bufsize;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct mount *mp, *nmp;
|
|
|
|
register struct statfs *sp;
|
|
|
|
caddr_t sfsp;
|
|
|
|
long count, maxcount, error;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
|
|
|
|
sfsp = (caddr_t)SCARG(uap, buf);
|
1995-08-11 11:31:18 +00:00
|
|
|
count = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
|
2000-10-04 01:29:17 +00:00
|
|
|
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1996-01-16 13:07:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (sfsp && count < maxcount) {
|
1994-05-24 10:09:53 +00:00
|
|
|
sp = &mp->mnt_stat;
|
|
|
|
/*
|
1998-03-08 09:59:44 +00:00
|
|
|
* If MNT_NOWAIT or MNT_LAZY is specified, do not
|
|
|
|
* refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
|
|
|
|
* overrides MNT_WAIT.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1998-03-08 09:59:44 +00:00
|
|
|
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
|
1997-02-10 02:22:35 +00:00
|
|
|
(SCARG(uap, flags) & MNT_WAIT)) &&
|
1996-01-16 13:07:14 +00:00
|
|
|
(error = VFS_STATFS(mp, sp, p))) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1997-02-10 02:22:35 +00:00
|
|
|
vfs_unbusy(mp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
1996-01-16 13:07:14 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
1994-10-08 22:33:43 +00:00
|
|
|
error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
|
1996-01-16 13:07:14 +00:00
|
|
|
if (error) {
|
1997-02-10 02:22:35 +00:00
|
|
|
vfs_unbusy(mp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1996-01-16 13:07:14 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
sfsp += sizeof(*sp);
|
|
|
|
}
|
|
|
|
count++;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mountlist_mtx);
|
1999-11-20 10:00:46 +00:00
|
|
|
nmp = TAILQ_NEXT(mp, mnt_list);
|
1997-02-10 02:22:35 +00:00
|
|
|
vfs_unbusy(mp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mountlist_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (sfsp && count > maxcount)
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = maxcount;
|
1994-05-24 10:09:53 +00:00
|
|
|
else
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = count;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change current working directory to a given file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchdir_args {
|
|
|
|
int fd;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
fchdir(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fchdir_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct vnode *vp, *tdp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
1997-02-10 02:22:35 +00:00
|
|
|
VREF(vp);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
error = ENOTDIR;
|
|
|
|
else
|
|
|
|
error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
|
1997-02-10 02:22:35 +00:00
|
|
|
while (!error && (mp = vp->v_mountedhere) != NULL) {
|
|
|
|
if (vfs_busy(mp, 0, 0, p))
|
|
|
|
continue;
|
|
|
|
error = VFS_ROOT(mp, &tdp);
|
|
|
|
vfs_unbusy(mp, p);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
vput(vp);
|
|
|
|
vp = tdp;
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(fdp->fd_cdir);
|
|
|
|
fdp->fd_cdir = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change current working directory (``.'').
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chdir_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
chdir(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct chdir_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = change_dir(&nd, p)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(fdp->fd_cdir);
|
|
|
|
fdp->fd_cdir = nd.ni_vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1999-03-23 14:26:40 +00:00
|
|
|
/*
|
|
|
|
* Helper function for raised chroot(2) security function: Refuse if
|
|
|
|
* any filedescriptors are open directories.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
chroot_refuse_vdir_fds(fdp)
|
|
|
|
struct filedesc *fdp;
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
|
|
|
|
error = getvnode(fdp, fd, &fp);
|
|
|
|
if (error)
|
|
|
|
continue;
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
continue;
|
|
|
|
return(EPERM);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This sysctl determines if we will allow a process to chroot(2) if it
|
|
|
|
* has a directory open:
|
|
|
|
* 0: disallowed for all processes.
|
|
|
|
* 1: allowed for processes that were not already chroot(2)'ed.
|
|
|
|
* 2: allowed for all processes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int chroot_allow_open_directories = 1;
|
|
|
|
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
|
|
|
|
&chroot_allow_open_directories, 0, "");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Change notion of root (``/'') directory.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chroot_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
chroot(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct chroot_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
error = suser_xxx(0, p, PRISON_ROOT);
|
1999-03-23 14:26:40 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (chroot_allow_open_directories == 0 ||
|
|
|
|
(chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode))
|
|
|
|
error = chroot_refuse_vdir_fds(fdp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = change_dir(&nd, p)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-02-15 04:17:09 +00:00
|
|
|
vrele(fdp->fd_rdir);
|
1994-05-24 10:09:53 +00:00
|
|
|
fdp->fd_rdir = nd.ni_vp;
|
1999-09-25 14:14:21 +00:00
|
|
|
if (!fdp->fd_jdir) {
|
|
|
|
fdp->fd_jdir = nd.ni_vp;
|
|
|
|
VREF(fdp->fd_jdir);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common routine for chroot and chdir.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
change_dir(ndp, p)
|
|
|
|
register struct nameidata *ndp;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = namei(ndp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = ndp->ni_vp;
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
error = ENOTDIR;
|
|
|
|
else
|
|
|
|
error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
|
|
|
|
if (error)
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(vp);
|
|
|
|
else
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check permissions, allocate an open file structure,
|
|
|
|
* and call the device open routine if any.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct open_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
open(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct open_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-04 03:34:11 +00:00
|
|
|
struct filedesc *fdp = p->p_fd;
|
|
|
|
struct file *fp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct vattr vat;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1997-10-28 10:29:55 +00:00
|
|
|
int cmode, flags, oflags;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *nfp;
|
|
|
|
int type, indx, error;
|
|
|
|
struct flock lf;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-10-28 10:29:55 +00:00
|
|
|
oflags = SCARG(uap, flags);
|
|
|
|
if ((oflags & O_ACCMODE) == O_ACCMODE)
|
1997-10-22 07:28:51 +00:00
|
|
|
return (EINVAL);
|
1997-10-28 10:29:55 +00:00
|
|
|
flags = FFLAGS(oflags);
|
1994-10-02 17:35:40 +00:00
|
|
|
error = falloc(p, &nfp, &indx);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
fp = nfp;
|
1997-02-10 02:22:35 +00:00
|
|
|
cmode = ((SCARG(uap, mode) &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
|
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_dupfd = -indx - 1; /* XXX check for fdopen */
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* Bump the ref count to prevent another process from closing
|
|
|
|
* the descriptor while we are blocked in vn_open()
|
|
|
|
*/
|
|
|
|
fhold(fp);
|
2000-07-04 03:34:11 +00:00
|
|
|
error = vn_open(&nd, &flags, cmode);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* release our own reference
|
|
|
|
*/
|
|
|
|
fdrop(fp, p);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle special fdopen() case. bleh. dupfdopen() is
|
|
|
|
* responsible for dropping the old contents of ofiles[indx]
|
|
|
|
* if it succeeds.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((error == ENODEV || error == ENXIO) &&
|
1997-02-10 02:22:35 +00:00
|
|
|
p->p_dupfd >= 0 && /* XXX from fdopen */
|
1994-05-24 10:09:53 +00:00
|
|
|
(error =
|
2000-11-18 21:01:04 +00:00
|
|
|
dupfdopen(p, fdp, indx, p->p_dupfd, flags, error)) == 0) {
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = indx;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* Clean up the descriptor, but only if another thread hadn't
|
|
|
|
* replaced or closed it.
|
|
|
|
*/
|
|
|
|
if (fdp->fd_ofiles[indx] == fp) {
|
|
|
|
fdp->fd_ofiles[indx] = NULL;
|
|
|
|
fdrop(fp, p);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
p->p_dupfd = 0;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
1996-12-19 19:42:37 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* There should be 2 references on the file, one from the descriptor
|
|
|
|
* table, and one for us.
|
|
|
|
*
|
|
|
|
* Handle the case where someone closed the file (via its file
|
|
|
|
* descriptor) while we were blocked. The end result should look
|
|
|
|
* like opening the file succeeded but it was immediately closed.
|
|
|
|
*/
|
|
|
|
if (fp->f_count == 1) {
|
|
|
|
KASSERT(fdp->fd_ofiles[indx] != fp,
|
|
|
|
("Open file descriptor lost all refs"));
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
|
|
|
vn_close(vp, flags & FMASK, fp->f_cred, p);
|
|
|
|
fdrop(fp, p);
|
|
|
|
p->p_retval[0] = indx;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_data = (caddr_t)vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag = flags & FMASK;
|
|
|
|
fp->f_ops = &vnops;
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
|
2000-07-04 03:34:11 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (flags & (O_EXLOCK | O_SHLOCK)) {
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
if (flags & O_EXLOCK)
|
|
|
|
lf.l_type = F_WRLCK;
|
|
|
|
else
|
|
|
|
lf.l_type = F_RDLCK;
|
|
|
|
type = F_FLOCK;
|
|
|
|
if ((flags & FNONBLOCK) == 0)
|
|
|
|
type |= F_WAIT;
|
2000-07-04 03:34:11 +00:00
|
|
|
if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0)
|
|
|
|
goto bad;
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag |= FHASLOCK;
|
|
|
|
}
|
2000-07-04 03:34:11 +00:00
|
|
|
if (flags & O_TRUNC) {
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
goto bad;
|
2000-07-04 03:34:11 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
VATTR_NULL(&vat);
|
|
|
|
vat.va_size = 0;
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
error = VOP_SETATTR(vp, &vat, p->p_ucred, p);
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
2000-07-04 03:34:11 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
}
|
1999-08-12 20:38:32 +00:00
|
|
|
/* assert that vn_open created a backing object if one is needed */
|
2000-09-12 09:49:08 +00:00
|
|
|
KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
|
1999-08-12 20:38:32 +00:00
|
|
|
("open: vmio vnode has no backing object after vn_open"));
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* Release our private reference, leaving the one associated with
|
|
|
|
* the descriptor table intact.
|
|
|
|
*/
|
|
|
|
fdrop(fp, p);
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = indx;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
2000-07-04 03:34:11 +00:00
|
|
|
bad:
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[indx] == fp) {
|
|
|
|
fdp->fd_ofiles[indx] = NULL;
|
|
|
|
fdrop(fp, p);
|
|
|
|
}
|
|
|
|
fdrop(fp, p);
|
2000-07-04 03:34:11 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_43
|
|
|
|
/*
|
|
|
|
* Create a file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ocreat_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ocreat(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ocreat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct open_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ nuap;
|
|
|
|
|
|
|
|
SCARG(&nuap, path) = SCARG(uap, path);
|
|
|
|
SCARG(&nuap, mode) = SCARG(uap, mode);
|
|
|
|
SCARG(&nuap, flags) = O_WRONLY | O_CREAT | O_TRUNC;
|
1997-11-06 19:29:57 +00:00
|
|
|
return (open(p, &nuap));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a special file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mknod_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
int dev;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
mknod(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct mknod_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
syscallarg(int) dev;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
1998-06-07 17:13:14 +00:00
|
|
|
int whiteout = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
switch (SCARG(uap, mode) & S_IFMT) {
|
|
|
|
case S_IFCHR:
|
|
|
|
case S_IFBLK:
|
|
|
|
error = suser(p);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = suser_xxx(0, p, PRISON_ROOT);
|
|
|
|
break;
|
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vp != NULL) {
|
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EEXIST;
|
2000-07-11 22:07:57 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
|
|
|
|
vattr.va_rdev = SCARG(uap, dev);
|
|
|
|
whiteout = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
switch (SCARG(uap, mode) & S_IFMT) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case S_IFMT: /* used by badsect to flag bad sectors */
|
|
|
|
vattr.va_type = VBAD;
|
|
|
|
break;
|
|
|
|
case S_IFCHR:
|
|
|
|
vattr.va_type = VCHR;
|
|
|
|
break;
|
|
|
|
case S_IFBLK:
|
|
|
|
vattr.va_type = VBLK;
|
|
|
|
break;
|
1997-02-10 02:22:35 +00:00
|
|
|
case S_IFWHT:
|
|
|
|
whiteout = 1;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error) {
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
1999-12-15 23:02:35 +00:00
|
|
|
if (whiteout)
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
|
1999-12-15 23:02:35 +00:00
|
|
|
else {
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
|
|
|
|
&nd.ni_cnd, &vattr);
|
1999-11-13 14:35:50 +00:00
|
|
|
if (error == 0)
|
|
|
|
vput(nd.ni_vp);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mknod");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "mknod");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Create a named pipe.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mkfifo_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
mkfifo(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct mkfifo_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if (nd.ni_vp != NULL) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_type = VFIFO;
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
|
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
1998-05-07 04:58:58 +00:00
|
|
|
error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
|
1999-11-13 14:35:50 +00:00
|
|
|
if (error == 0)
|
|
|
|
vput(nd.ni_vp);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-07 04:58:58 +00:00
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-07 04:58:58 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a hard file link.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct link_args {
|
|
|
|
char *path;
|
|
|
|
char *link;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
link(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct link_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(char *) link;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
int error;
|
|
|
|
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
vrele(vp);
|
|
|
|
return (EPERM); /* POSIX */
|
|
|
|
}
|
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), p);
|
|
|
|
if ((error = namei(&nd)) == 0) {
|
|
|
|
if (nd.ni_vp != NULL) {
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
error = EEXIST;
|
|
|
|
} else {
|
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
vrele(vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "link");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "link");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a symbolic link.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct symlink_args {
|
|
|
|
char *path;
|
|
|
|
char *link;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
symlink(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct symlink_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(char *) link;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
char *path;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-09-21 04:24:27 +00:00
|
|
|
path = zalloc(namei_zone);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
if (nd.ni_vp) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-11-13 20:58:17 +00:00
|
|
|
if (error == 0)
|
|
|
|
vput(nd.ni_vp);
|
1998-05-07 04:58:58 +00:00
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
|
1999-12-12 03:28:14 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
|
1994-05-24 10:09:53 +00:00
|
|
|
out:
|
1997-09-21 04:24:27 +00:00
|
|
|
zfree(namei_zone, path);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Delete a whiteout from the filesystem.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
undelete(p, uap)
|
1997-02-10 02:22:35 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct undelete_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
|
|
|
error = namei(&nd);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
if (nd.ni_vp)
|
|
|
|
vrele(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
1999-12-15 23:02:35 +00:00
|
|
|
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE);
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
|
1997-02-10 02:22:35 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Delete a name from the filesystem.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct unlink_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
unlink(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct unlink_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1996-05-24 16:19:23 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
error = EPERM; /* POSIX */
|
|
|
|
else {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The root of a mounted filesystem cannot be deleted.
|
1996-05-24 16:19:23 +00:00
|
|
|
*
|
|
|
|
* XXX: can this only be a VDIR case?
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
if (vp->v_flag & VROOT)
|
|
|
|
error = EBUSY;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vrele(vp);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error) {
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
|
|
|
vput(vp);
|
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "unlink");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "unlink");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reposition read/write file offset.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct lseek_args {
|
|
|
|
int fd;
|
|
|
|
int pad;
|
|
|
|
off_t offset;
|
|
|
|
int whence;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
lseek(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct lseek_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) offset;
|
|
|
|
syscallarg(int) whence;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct ucred *cred = p->p_ucred;
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
register struct file *fp;
|
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
|
|
|
if (fp->f_type != DTYPE_VNODE)
|
|
|
|
return (ESPIPE);
|
1997-02-10 02:22:35 +00:00
|
|
|
switch (SCARG(uap, whence)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case L_INCR:
|
1998-04-19 22:20:32 +00:00
|
|
|
fp->f_offset += SCARG(uap, offset);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
case L_XTND:
|
1994-10-02 17:35:40 +00:00
|
|
|
error=VOP_GETATTR((struct vnode *)fp->f_data, &vattr, cred, p);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1998-04-19 22:20:32 +00:00
|
|
|
fp->f_offset = SCARG(uap, offset) + vattr.va_size;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
case L_SET:
|
1998-04-19 22:20:32 +00:00
|
|
|
fp->f_offset = SCARG(uap, offset);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
1998-04-19 22:20:32 +00:00
|
|
|
*(off_t *)(p->p_retval) = fp->f_offset;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
/*
|
|
|
|
* Reposition read/write file offset.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct olseek_args {
|
|
|
|
int fd;
|
|
|
|
long offset;
|
|
|
|
int whence;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
olseek(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct olseek_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(long) offset;
|
|
|
|
syscallarg(int) whence;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct lseek_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) offset;
|
|
|
|
syscallarg(int) whence;
|
|
|
|
} */ nuap;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(&nuap, fd) = SCARG(uap, fd);
|
|
|
|
SCARG(&nuap, offset) = SCARG(uap, offset);
|
|
|
|
SCARG(&nuap, whence) = SCARG(uap, whence);
|
1997-11-06 19:29:57 +00:00
|
|
|
error = lseek(p, &nuap);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check access permissions.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct access_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
access(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct access_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-09-02 12:31:55 +00:00
|
|
|
struct ucred *cred, *tmpcred;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct vnode *vp;
|
2000-09-02 12:31:55 +00:00
|
|
|
int error, flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-09-02 12:31:55 +00:00
|
|
|
cred = p->p_ucred;
|
|
|
|
/*
|
|
|
|
* Create and modify a temporary credential instead of one that
|
|
|
|
* is potentially shared. This could also mess up socket
|
|
|
|
* buffer accounting which can run in an interrupt context.
|
|
|
|
*
|
|
|
|
* XXX - Depending on how "threads" are finally implemented, it
|
|
|
|
* may be better to explicitly pass the credential to namei()
|
|
|
|
* rather than to modify the potentially shared process structure.
|
|
|
|
*/
|
|
|
|
tmpcred = crdup(cred);
|
|
|
|
tmpcred->cr_uid = p->p_cred->p_ruid;
|
|
|
|
tmpcred->cr_groups[0] = p->p_cred->p_rgid;
|
|
|
|
p->p_ucred = tmpcred;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out1;
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
|
|
|
|
/* Flags == 0 means only check for existence. */
|
1997-02-10 02:22:35 +00:00
|
|
|
if (SCARG(uap, flags)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
flags = 0;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (SCARG(uap, flags) & R_OK)
|
1994-05-24 10:09:53 +00:00
|
|
|
flags |= VREAD;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (SCARG(uap, flags) & W_OK)
|
1994-05-24 10:09:53 +00:00
|
|
|
flags |= VWRITE;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (SCARG(uap, flags) & X_OK)
|
1994-05-24 10:09:53 +00:00
|
|
|
flags |= VEXEC;
|
|
|
|
if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
|
|
|
|
error = VOP_ACCESS(vp, flags, cred, p);
|
|
|
|
}
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
out1:
|
2000-09-02 12:31:55 +00:00
|
|
|
p->p_ucred = cred;
|
|
|
|
crfree(tmpcred);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
/*
|
|
|
|
* Get file status; this version follows links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ostat_args {
|
|
|
|
char *path;
|
|
|
|
struct ostat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ostat(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ostat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct ostat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
struct ostat osb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = vn_stat(nd.ni_vp, &sb, p);
|
|
|
|
vput(nd.ni_vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
cvtstat(&sb, &osb);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file status; this version does not follow links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct olstat_args {
|
|
|
|
char *path;
|
|
|
|
struct ostat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
olstat(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct olstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct ostat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-03-31 12:02:53 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct stat sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ostat osb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1994-09-02 04:14:44 +00:00
|
|
|
vp = nd.ni_vp;
|
1997-03-31 12:02:53 +00:00
|
|
|
error = vn_stat(vp, &sb, p);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-03-31 12:02:53 +00:00
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
cvtstat(&sb, &osb);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert from an old to a new stat structure.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
cvtstat(st, ost)
|
|
|
|
struct stat *st;
|
|
|
|
struct ostat *ost;
|
|
|
|
{
|
|
|
|
|
|
|
|
ost->st_dev = st->st_dev;
|
|
|
|
ost->st_ino = st->st_ino;
|
|
|
|
ost->st_mode = st->st_mode;
|
|
|
|
ost->st_nlink = st->st_nlink;
|
|
|
|
ost->st_uid = st->st_uid;
|
|
|
|
ost->st_gid = st->st_gid;
|
|
|
|
ost->st_rdev = st->st_rdev;
|
|
|
|
if (st->st_size < (quad_t)1 << 32)
|
|
|
|
ost->st_size = st->st_size;
|
|
|
|
else
|
|
|
|
ost->st_size = -2;
|
|
|
|
ost->st_atime = st->st_atime;
|
|
|
|
ost->st_mtime = st->st_mtime;
|
|
|
|
ost->st_ctime = st->st_ctime;
|
|
|
|
ost->st_blksize = st->st_blksize;
|
|
|
|
ost->st_blocks = st->st_blocks;
|
|
|
|
ost->st_flags = st->st_flags;
|
|
|
|
ost->st_gen = st->st_gen;
|
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 || COMPAT_SUNOS */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file status; this version follows links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct stat_args {
|
|
|
|
char *path;
|
|
|
|
struct stat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
stat(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct stat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct stat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
error = vn_stat(nd.ni_vp, &sb, p);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(nd.ni_vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file status; this version does not follow links.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct lstat_args {
|
|
|
|
char *path;
|
|
|
|
struct stat *ub;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
lstat(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct lstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct stat *) ub;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
1997-03-31 12:02:53 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct stat sb;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1997-03-31 12:02:53 +00:00
|
|
|
error = vn_stat(vp, &sb, p);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-03-31 12:02:53 +00:00
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Implementation of the NetBSD stat() function.
|
|
|
|
* XXX This should probably be collapsed with the FreeBSD version,
|
|
|
|
* as the differences are only due to vn_stat() clearing spares at
|
|
|
|
* the end of the structures. vn_stat could be split to avoid this,
|
|
|
|
* and thus collapse the following to close to zero code.
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
void
|
|
|
|
cvtnstat(sb, nsb)
|
|
|
|
struct stat *sb;
|
|
|
|
struct nstat *nsb;
|
|
|
|
{
|
|
|
|
nsb->st_dev = sb->st_dev;
|
|
|
|
nsb->st_ino = sb->st_ino;
|
|
|
|
nsb->st_mode = sb->st_mode;
|
|
|
|
nsb->st_nlink = sb->st_nlink;
|
|
|
|
nsb->st_uid = sb->st_uid;
|
|
|
|
nsb->st_gid = sb->st_gid;
|
|
|
|
nsb->st_rdev = sb->st_rdev;
|
|
|
|
nsb->st_atimespec = sb->st_atimespec;
|
|
|
|
nsb->st_mtimespec = sb->st_mtimespec;
|
|
|
|
nsb->st_ctimespec = sb->st_ctimespec;
|
|
|
|
nsb->st_size = sb->st_size;
|
|
|
|
nsb->st_blocks = sb->st_blocks;
|
|
|
|
nsb->st_blksize = sb->st_blksize;
|
|
|
|
nsb->st_flags = sb->st_flags;
|
|
|
|
nsb->st_gen = sb->st_gen;
|
1999-11-18 08:14:20 +00:00
|
|
|
nsb->st_qspare[0] = sb->st_qspare[0];
|
|
|
|
nsb->st_qspare[1] = sb->st_qspare[1];
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct nstat_args {
|
|
|
|
char *path;
|
|
|
|
struct nstat *ub;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
nstat(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct nstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct nstat *) ub;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
struct nstat nsb;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = vn_stat(nd.ni_vp, &sb, p);
|
|
|
|
vput(nd.ni_vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
cvtnstat(&sb, &nsb);
|
|
|
|
error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-09-14 19:13:59 +00:00
|
|
|
* NetBSD lstat. Get file status; this version does not follow links.
|
1998-05-11 03:55:28 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lstat_args {
|
|
|
|
char *path;
|
|
|
|
struct stat *ub;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
nlstat(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct nlstat_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct nstat *) ub;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct stat sb;
|
|
|
|
struct nstat nsb;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = vn_stat(vp, &sb, p);
|
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
cvtnstat(&sb, &nsb);
|
|
|
|
error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Get configurable pathname variables.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct pathconf_args {
|
|
|
|
char *path;
|
|
|
|
int name;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
pathconf(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct pathconf_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) name;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-11-06 19:29:57 +00:00
|
|
|
error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), p->p_retval);
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(nd.ni_vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return target name of a symbolic link.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct readlink_args {
|
|
|
|
char *path;
|
|
|
|
char *buf;
|
|
|
|
int count;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
readlink(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct readlink_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(int) count;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct vnode *vp;
|
|
|
|
struct iovec aiov;
|
|
|
|
struct uio auio;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp->v_type != VLNK)
|
|
|
|
error = EINVAL;
|
|
|
|
else {
|
1997-02-10 02:22:35 +00:00
|
|
|
aiov.iov_base = SCARG(uap, buf);
|
|
|
|
aiov.iov_len = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_offset = 0;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
1997-02-10 02:22:35 +00:00
|
|
|
auio.uio_resid = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_READLINK(vp, &auio, p->p_ucred);
|
|
|
|
}
|
|
|
|
vput(vp);
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for chflags() and fchflags().
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
|
|
|
setfflags(p, vp, flags)
|
|
|
|
struct proc *p;
|
|
|
|
struct vnode *vp;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
1999-08-02 21:34:46 +00:00
|
|
|
/*
|
1999-08-04 04:52:18 +00:00
|
|
|
* Prevent non-root users from setting flags on devices. When
|
|
|
|
* a device is reused, users can retain ownership of the device
|
|
|
|
* if they are allowed to set flags and programs assume that
|
|
|
|
* chown can't fail when done as root.
|
1999-08-02 21:34:46 +00:00
|
|
|
*/
|
1999-08-04 04:52:18 +00:00
|
|
|
if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
|
|
|
|
((error = suser_xxx(p->p_ucred, p, PRISON_ROOT)) != 0))
|
|
|
|
return (error);
|
1999-08-03 17:07:04 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
1998-05-11 03:55:28 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
1998-09-24 15:02:46 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1998-05-11 03:55:28 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_flags = flags;
|
|
|
|
error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-08-04 04:52:18 +00:00
|
|
|
return (error);
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Change flags of a file given a path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chflags_args {
|
|
|
|
char *path;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
chflags(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct chflags_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = setfflags(p, nd.ni_vp, SCARG(uap, flags));
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change flags of a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchflags_args {
|
|
|
|
int fd;
|
|
|
|
int flags;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
fchflags(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fchflags_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1998-05-11 03:55:28 +00:00
|
|
|
return setfflags(p, (struct vnode *) fp->f_data, SCARG(uap, flags));
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for chmod(), lchmod() and fchmod().
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
|
|
|
setfmode(p, vp, mode)
|
|
|
|
struct proc *p;
|
|
|
|
struct vnode *vp;
|
|
|
|
int mode;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
1998-09-24 15:02:46 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1995-10-22 09:32:48 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1998-05-11 03:55:28 +00:00
|
|
|
vattr.va_mode = mode & ALLPERMS;
|
1995-10-22 09:32:48 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-11 03:55:28 +00:00
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode of a file given path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chmod_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
chmod(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct chmod_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = setfmode(p, nd.ni_vp, SCARG(uap, mode));
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode of a file given path name (don't follow links.)
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lchmod_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
lchmod(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct lchmod_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = setfmode(p, nd.ni_vp, SCARG(uap, mode));
|
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode of a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchmod_args {
|
|
|
|
int fd;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
fchmod(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fchmod_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1998-05-11 03:55:28 +00:00
|
|
|
return setfmode(p, (struct vnode *)fp->f_data, SCARG(uap, mode));
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation for chown(), lchown(), and fchown()
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
|
|
|
setfown(p, vp, uid, gid)
|
|
|
|
struct proc *p;
|
|
|
|
struct vnode *vp;
|
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
1998-09-24 15:02:46 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1995-10-22 09:32:48 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1998-05-11 03:55:28 +00:00
|
|
|
vattr.va_uid = uid;
|
|
|
|
vattr.va_gid = gid;
|
1995-10-22 09:32:48 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-11 03:55:28 +00:00
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set ownership given a path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct chown_args {
|
|
|
|
char *path;
|
|
|
|
int uid;
|
|
|
|
int gid;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
chown(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct chown_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(int) gid;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = setfown(p, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
|
|
|
|
vrele(nd.ni_vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-03-31 12:21:37 +00:00
|
|
|
/*
|
|
|
|
* Set ownership given a path name, do not cross symlinks.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lchown_args {
|
|
|
|
char *path;
|
|
|
|
int uid;
|
|
|
|
int gid;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
lchown(p, uap)
|
1997-03-31 12:21:37 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct lchown_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(int) gid;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1997-03-31 12:21:37 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-11 03:55:28 +00:00
|
|
|
error = setfown(p, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
|
|
|
|
vrele(nd.ni_vp);
|
1997-03-31 12:21:37 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Set ownership given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fchown_args {
|
|
|
|
int fd;
|
|
|
|
int uid;
|
|
|
|
int gid;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
fchown(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fchown_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) uid;
|
|
|
|
syscallarg(int) gid;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1998-05-11 03:55:28 +00:00
|
|
|
return setfown(p, (struct vnode *)fp->f_data,
|
|
|
|
SCARG(uap, uid), SCARG(uap, gid));
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for utimes(), lutimes(), and futimes().
|
|
|
|
*/
|
1998-05-11 03:55:28 +00:00
|
|
|
static int
|
1999-08-22 01:46:57 +00:00
|
|
|
getutimes(usrtvp, tsp)
|
|
|
|
const struct timeval *usrtvp;
|
|
|
|
struct timespec *tsp;
|
|
|
|
{
|
|
|
|
struct timeval tv[2];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (usrtvp == NULL) {
|
1999-08-22 16:50:30 +00:00
|
|
|
microtime(&tv[0]);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
|
1999-08-22 01:46:57 +00:00
|
|
|
tsp[1] = tsp[0];
|
|
|
|
} else {
|
|
|
|
if ((error = copyin(usrtvp, tv, sizeof (tv))) != 0)
|
|
|
|
return (error);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Common implementation code for utimes(), lutimes(), and futimes().
|
|
|
|
*/
|
1999-08-22 01:46:57 +00:00
|
|
|
static int
|
|
|
|
setutimes(p, vp, ts, nullflag)
|
1998-05-11 03:55:28 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct vnode *vp;
|
1999-08-22 01:46:57 +00:00
|
|
|
const struct timespec *ts;
|
1998-05-11 03:55:28 +00:00
|
|
|
int nullflag;
|
|
|
|
{
|
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1998-05-11 03:55:28 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
1998-09-24 15:02:46 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1995-10-22 09:32:48 +00:00
|
|
|
VATTR_NULL(&vattr);
|
1999-08-22 01:46:57 +00:00
|
|
|
vattr.va_atime = ts[0];
|
|
|
|
vattr.va_mtime = ts[1];
|
1998-05-11 03:55:28 +00:00
|
|
|
if (nullflag)
|
|
|
|
vattr.va_vaflags |= VA_UTIMES_NULL;
|
1995-10-22 09:32:48 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1998-05-11 03:55:28 +00:00
|
|
|
return error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the access and modification times of a file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct utimes_args {
|
|
|
|
char *path;
|
|
|
|
struct timeval *tptr;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
utimes(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct utimes_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct timeval *) tptr;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timespec ts[2];
|
|
|
|
struct timeval *usrtvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1999-08-22 01:46:57 +00:00
|
|
|
usrtvp = SCARG(uap, tptr);
|
|
|
|
if ((error = getutimes(usrtvp, ts)) != 0)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-08-22 01:46:57 +00:00
|
|
|
error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL);
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-05-11 03:55:28 +00:00
|
|
|
/*
|
|
|
|
* Set the access and modification times of a file.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct lutimes_args {
|
|
|
|
char *path;
|
|
|
|
struct timeval *tptr;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
lutimes(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct lutimes_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(struct timeval *) tptr;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timespec ts[2];
|
|
|
|
struct timeval *usrtvp;
|
1998-05-11 03:55:28 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1999-08-22 01:46:57 +00:00
|
|
|
usrtvp = SCARG(uap, tptr);
|
|
|
|
if ((error = getutimes(usrtvp, ts)) != 0)
|
|
|
|
return (error);
|
1999-07-29 17:02:56 +00:00
|
|
|
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-08-22 01:46:57 +00:00
|
|
|
error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL);
|
1998-05-11 03:55:28 +00:00
|
|
|
vrele(nd.ni_vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the access and modification times of a file.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct futimes_args {
|
|
|
|
int fd;
|
|
|
|
struct timeval *tptr;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
futimes(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct futimes_args /* {
|
|
|
|
syscallarg(int ) fd;
|
|
|
|
syscallarg(struct timeval *) tptr;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timespec ts[2];
|
1998-05-11 03:55:28 +00:00
|
|
|
struct file *fp;
|
1999-08-22 01:46:57 +00:00
|
|
|
struct timeval *usrtvp;
|
1998-05-11 03:55:28 +00:00
|
|
|
int error;
|
|
|
|
|
1999-08-22 01:46:57 +00:00
|
|
|
usrtvp = SCARG(uap, tptr);
|
|
|
|
if ((error = getutimes(usrtvp, ts)) != 0)
|
|
|
|
return (error);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1998-05-11 03:55:28 +00:00
|
|
|
return (error);
|
1999-08-22 01:46:57 +00:00
|
|
|
return setutimes(p, (struct vnode *)fp->f_data, ts, usrtvp == NULL);
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Truncate a file given its path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct truncate_args {
|
|
|
|
char *path;
|
|
|
|
int pad;
|
|
|
|
off_t length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
truncate(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct truncate_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1994-09-02 10:23:43 +00:00
|
|
|
if (uap->length < 0)
|
|
|
|
return(EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
error = EISDIR;
|
|
|
|
else if ((error = vn_writechk(vp)) == 0 &&
|
|
|
|
(error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) == 0) {
|
|
|
|
VATTR_NULL(&vattr);
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_size = SCARG(uap, length);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
|
|
|
|
}
|
|
|
|
vput(vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncate a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ftruncate_args {
|
|
|
|
int fd;
|
|
|
|
int pad;
|
|
|
|
off_t length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ftruncate(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ftruncate_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
1994-09-02 10:23:43 +00:00
|
|
|
if (uap->length < 0)
|
|
|
|
return(EINVAL);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & FWRITE) == 0)
|
|
|
|
return (EINVAL);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
error = EISDIR;
|
|
|
|
else if ((error = vn_writechk(vp)) == 0) {
|
|
|
|
VATTR_NULL(&vattr);
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_size = SCARG(uap, length);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, fp->f_cred, p);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
/*
|
|
|
|
* Truncate a file given its path name.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct otruncate_args {
|
|
|
|
char *path;
|
|
|
|
long length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
otruncate(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct otruncate_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(long) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct truncate_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ nuap;
|
|
|
|
|
|
|
|
SCARG(&nuap, path) = SCARG(uap, path);
|
|
|
|
SCARG(&nuap, length) = SCARG(uap, length);
|
1997-11-06 19:29:57 +00:00
|
|
|
return (truncate(p, &nuap));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncate a file given a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct oftruncate_args {
|
|
|
|
int fd;
|
|
|
|
long length;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
oftruncate(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct oftruncate_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(long) length;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct ftruncate_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) pad;
|
|
|
|
syscallarg(off_t) length;
|
|
|
|
} */ nuap;
|
|
|
|
|
|
|
|
SCARG(&nuap, fd) = SCARG(uap, fd);
|
|
|
|
SCARG(&nuap, length) = SCARG(uap, length);
|
1997-11-06 19:29:57 +00:00
|
|
|
return (ftruncate(p, &nuap));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_43 || COMPAT_SUNOS */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sync an open file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fsync_args {
|
|
|
|
int fd;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
fsync(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fsync_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
2000-09-12 09:49:08 +00:00
|
|
|
vm_object_t obj;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
return (error);
|
1998-09-24 15:02:46 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
2000-09-12 09:49:08 +00:00
|
|
|
if (VOP_GETVOBJECT(vp, &obj) == 0)
|
|
|
|
vm_object_page_clean(obj, 0, 0, 0);
|
2000-06-16 08:48:51 +00:00
|
|
|
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
|
2000-07-03 13:26:54 +00:00
|
|
|
#ifdef SOFTUPDATES
|
2000-06-16 14:32:13 +00:00
|
|
|
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
|
|
|
|
error = softdep_fsync(vp);
|
2000-07-03 13:26:54 +00:00
|
|
|
#endif
|
2000-06-16 14:32:13 +00:00
|
|
|
|
1998-09-24 15:02:46 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rename files. Source and destination must either both be directories,
|
|
|
|
* or both not be directories. If target is a directory, it must be empty.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct rename_args {
|
|
|
|
char *from;
|
|
|
|
char *to;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
rename(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct rename_args /* {
|
|
|
|
syscallarg(char *) from;
|
|
|
|
syscallarg(char *) to;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *tvp, *fvp, *tdvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nameidata fromnd, tond;
|
|
|
|
int error;
|
|
|
|
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1994-05-24 10:09:53 +00:00
|
|
|
NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE,
|
1997-02-10 02:22:35 +00:00
|
|
|
SCARG(uap, from), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&fromnd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
fvp = fromnd.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(fvp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
|
|
|
vrele(fromnd.ni_dvp);
|
|
|
|
vrele(fvp);
|
|
|
|
goto out1;
|
|
|
|
}
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | NOOBJ,
|
1997-02-10 02:22:35 +00:00
|
|
|
UIO_USERSPACE, SCARG(uap, to), p);
|
1995-07-31 00:35:58 +00:00
|
|
|
if (fromnd.ni_vp->v_type == VDIR)
|
|
|
|
tond.ni_cnd.cn_flags |= WILLBEDIR;
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&tond)) != 0) {
|
1995-11-18 11:35:05 +00:00
|
|
|
/* Translate error code for rename("dir1", "dir2/."). */
|
|
|
|
if (error == EISDIR && fvp->v_type == VDIR)
|
|
|
|
error = EINVAL;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(fromnd.ni_dvp);
|
|
|
|
vrele(fvp);
|
|
|
|
goto out1;
|
|
|
|
}
|
|
|
|
tdvp = tond.ni_dvp;
|
|
|
|
tvp = tond.ni_vp;
|
|
|
|
if (tvp != NULL) {
|
|
|
|
if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
|
|
|
|
error = ENOTDIR;
|
|
|
|
goto out;
|
|
|
|
} else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
|
|
|
|
error = EISDIR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fvp == tdvp)
|
|
|
|
error = EINVAL;
|
|
|
|
/*
|
|
|
|
* If source is the same as the destination (that is the
|
|
|
|
* same inode number with the same name in the same directory),
|
|
|
|
* then there is nothing to do.
|
|
|
|
*/
|
|
|
|
if (fvp == tvp && fromnd.ni_dvp == tdvp &&
|
|
|
|
fromnd.ni_cnd.cn_namelen == tond.ni_cnd.cn_namelen &&
|
|
|
|
!bcmp(fromnd.ni_cnd.cn_nameptr, tond.ni_cnd.cn_nameptr,
|
|
|
|
fromnd.ni_cnd.cn_namelen))
|
|
|
|
error = -1;
|
|
|
|
out:
|
|
|
|
if (!error) {
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(tdvp, p, p->p_ucred, LEASE_WRITE);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
if (fromnd.ni_dvp != tdvp) {
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
}
|
1995-03-19 11:16:58 +00:00
|
|
|
if (tvp) {
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE);
|
1995-03-19 11:16:58 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
|
|
|
|
tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
|
|
|
NDFREE(&tond, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&fromnd, NDF_ONLY_PNBUF);
|
|
|
|
NDFREE(&tond, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (tdvp == tvp)
|
|
|
|
vrele(tdvp);
|
|
|
|
else
|
|
|
|
vput(tdvp);
|
|
|
|
if (tvp)
|
|
|
|
vput(tvp);
|
|
|
|
vrele(fromnd.ni_dvp);
|
|
|
|
vrele(fvp);
|
|
|
|
}
|
|
|
|
vrele(tond.ni_startdir);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(fromnd.ni_dvp, "rename");
|
|
|
|
ASSERT_VOP_UNLOCKED(fromnd.ni_vp, "rename");
|
|
|
|
ASSERT_VOP_UNLOCKED(tond.ni_dvp, "rename");
|
|
|
|
ASSERT_VOP_UNLOCKED(tond.ni_vp, "rename");
|
1994-05-24 10:09:53 +00:00
|
|
|
out1:
|
|
|
|
if (fromnd.ni_startdir)
|
|
|
|
vrele(fromnd.ni_startdir);
|
|
|
|
if (error == -1)
|
|
|
|
return (0);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a directory file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mkdir_args {
|
|
|
|
char *path;
|
|
|
|
int mode;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
mkdir(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct mkdir_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
syscallarg(int) mode;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
|
1995-07-31 00:35:58 +00:00
|
|
|
nd.ni_cnd.cn_flags |= WILLBEDIR;
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp != NULL) {
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
vrele(vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_type = VDIR;
|
1997-02-10 02:22:35 +00:00
|
|
|
vattr.va_mode = (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_fd->fd_cmask;
|
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-07 04:58:58 +00:00
|
|
|
vput(nd.ni_dvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error)
|
|
|
|
vput(nd.ni_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mkdir");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "mkdir");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a directory file.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct rmdir_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
rmdir(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct rmdir_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp->v_type != VDIR) {
|
|
|
|
error = ENOTDIR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* No rmdir "." please.
|
|
|
|
*/
|
|
|
|
if (nd.ni_dvp == vp) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The root of a mounted filesystem cannot be deleted.
|
|
|
|
*/
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vp->v_flag & VROOT) {
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EBUSY;
|
2000-07-11 22:07:57 +00:00
|
|
|
goto out;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
if (nd.ni_dvp == vp)
|
|
|
|
vrele(nd.ni_dvp);
|
|
|
|
else
|
|
|
|
vput(nd.ni_dvp);
|
|
|
|
vput(vp);
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
|
|
|
|
vn_finished_write(mp);
|
1999-12-15 23:02:35 +00:00
|
|
|
out:
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1998-05-07 04:58:58 +00:00
|
|
|
if (nd.ni_dvp == vp)
|
|
|
|
vrele(nd.ni_dvp);
|
|
|
|
else
|
|
|
|
vput(nd.ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vput(vp);
|
1997-04-04 17:46:21 +00:00
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_dvp, "rmdir");
|
|
|
|
ASSERT_VOP_UNLOCKED(nd.ni_vp, "rmdir");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_43
|
|
|
|
/*
|
|
|
|
* Read a block of directory entries in a file system independent format.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ogetdirentries_args {
|
|
|
|
int fd;
|
|
|
|
char *buf;
|
|
|
|
u_int count;
|
|
|
|
long *basep;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ogetdirentries(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct ogetdirentries_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(u_int) count;
|
|
|
|
syscallarg(long *) basep;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1998-11-03 08:01:48 +00:00
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio auio, kuio;
|
|
|
|
struct iovec aiov, kiov;
|
|
|
|
struct dirent *dp, *edp;
|
|
|
|
caddr_t dirbuf;
|
1997-02-10 02:22:35 +00:00
|
|
|
int error, eofflag, readcnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
long loff;
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & FREAD) == 0)
|
|
|
|
return (EBADF);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
1997-02-10 02:22:35 +00:00
|
|
|
unionread:
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
return (EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
aiov.iov_base = SCARG(uap, buf);
|
|
|
|
aiov.iov_len = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
1997-02-10 02:22:35 +00:00
|
|
|
auio.uio_resid = SCARG(uap, count);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
loff = auio.uio_offset = fp->f_offset;
|
|
|
|
# if (BYTE_ORDER != LITTLE_ENDIAN)
|
|
|
|
if (vp->v_mount->mnt_maxsymlinklen <= 0) {
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag,
|
|
|
|
NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_offset = auio.uio_offset;
|
|
|
|
} else
|
|
|
|
# endif
|
|
|
|
{
|
|
|
|
kuio = auio;
|
|
|
|
kuio.uio_iov = &kiov;
|
|
|
|
kuio.uio_segflg = UIO_SYSSPACE;
|
1997-02-10 02:22:35 +00:00
|
|
|
kiov.iov_len = SCARG(uap, count);
|
|
|
|
MALLOC(dirbuf, caddr_t, SCARG(uap, count), M_TEMP, M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
kiov.iov_base = dirbuf;
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag,
|
|
|
|
NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_offset = kuio.uio_offset;
|
|
|
|
if (error == 0) {
|
1997-02-10 02:22:35 +00:00
|
|
|
readcnt = SCARG(uap, count) - kuio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
edp = (struct dirent *)&dirbuf[readcnt];
|
|
|
|
for (dp = (struct dirent *)dirbuf; dp < edp; ) {
|
|
|
|
# if (BYTE_ORDER == LITTLE_ENDIAN)
|
|
|
|
/*
|
|
|
|
* The expected low byte of
|
|
|
|
* dp->d_namlen is our dp->d_type.
|
|
|
|
* The high MBZ byte of dp->d_namlen
|
|
|
|
* is our dp->d_namlen.
|
|
|
|
*/
|
|
|
|
dp->d_type = dp->d_namlen;
|
|
|
|
dp->d_namlen = 0;
|
|
|
|
# else
|
|
|
|
/*
|
|
|
|
* The dp->d_type is the high byte
|
|
|
|
* of the expected dp->d_namlen,
|
|
|
|
* so must be zero'ed.
|
|
|
|
*/
|
|
|
|
dp->d_type = 0;
|
|
|
|
# endif
|
|
|
|
if (dp->d_reclen > 0) {
|
|
|
|
dp = (struct dirent *)
|
|
|
|
((char *)dp + dp->d_reclen);
|
|
|
|
} else {
|
|
|
|
error = EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (dp >= edp)
|
|
|
|
error = uiomove(dirbuf, readcnt, &auio);
|
|
|
|
}
|
|
|
|
FREE(dirbuf, M_TEMP);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-03-03 02:35:51 +00:00
|
|
|
if (SCARG(uap, count) == auio.uio_resid) {
|
|
|
|
if (union_dircheckp) {
|
|
|
|
error = union_dircheckp(p, &vp, fp);
|
|
|
|
if (error == -1)
|
|
|
|
goto unionread;
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if ((vp->v_flag & VROOT) &&
|
|
|
|
(vp->v_mount->mnt_flag & MNT_UNION)) {
|
|
|
|
struct vnode *tvp = vp;
|
|
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
|
|
VREF(vp);
|
|
|
|
fp->f_data = (caddr_t) vp;
|
|
|
|
fp->f_offset = 0;
|
|
|
|
vrele(tvp);
|
1997-02-10 02:22:35 +00:00
|
|
|
goto unionread;
|
1999-03-03 02:35:51 +00:00
|
|
|
}
|
1999-02-27 07:06:05 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
|
|
|
|
sizeof(long));
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a block of directory entries in a file system independent format.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getdirentries_args {
|
|
|
|
int fd;
|
|
|
|
char *buf;
|
|
|
|
u_int count;
|
|
|
|
long *basep;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
getdirentries(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct getdirentries_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(u_int) count;
|
|
|
|
syscallarg(long *) basep;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1998-11-03 08:01:48 +00:00
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
long loff;
|
1997-02-10 02:22:35 +00:00
|
|
|
int error, eofflag;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & FREAD) == 0)
|
|
|
|
return (EBADF);
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
|
|
|
unionread:
|
|
|
|
if (vp->v_type != VDIR)
|
|
|
|
return (EINVAL);
|
1997-02-10 02:22:35 +00:00
|
|
|
aiov.iov_base = SCARG(uap, buf);
|
|
|
|
aiov.iov_len = SCARG(uap, count);
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
1997-02-10 02:22:35 +00:00
|
|
|
auio.uio_resid = SCARG(uap, count);
|
1997-09-21 04:24:27 +00:00
|
|
|
/* vn_lock(vp, LK_SHARED | LK_RETRY, p); */
|
1997-02-10 02:22:35 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
loff = auio.uio_offset = fp->f_offset;
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_offset = auio.uio_offset;
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_UNLOCK(vp, 0, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-03-03 02:35:51 +00:00
|
|
|
if (SCARG(uap, count) == auio.uio_resid) {
|
|
|
|
if (union_dircheckp) {
|
|
|
|
error = union_dircheckp(p, &vp, fp);
|
|
|
|
if (error == -1)
|
|
|
|
goto unionread;
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if ((vp->v_flag & VROOT) &&
|
|
|
|
(vp->v_mount->mnt_flag & MNT_UNION)) {
|
|
|
|
struct vnode *tvp = vp;
|
|
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
|
|
VREF(vp);
|
|
|
|
fp->f_data = (caddr_t) vp;
|
|
|
|
fp->f_offset = 0;
|
|
|
|
vrele(tvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto unionread;
|
1999-03-03 02:35:51 +00:00
|
|
|
}
|
1999-02-27 07:06:05 +00:00
|
|
|
}
|
1998-05-11 03:55:28 +00:00
|
|
|
if (SCARG(uap, basep) != NULL) {
|
|
|
|
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
|
|
|
|
sizeof(long));
|
|
|
|
}
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1998-05-11 03:55:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct getdents_args {
|
|
|
|
int fd;
|
|
|
|
char *buf;
|
|
|
|
size_t count;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
getdents(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct getdents_args /* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char *) buf;
|
|
|
|
syscallarg(u_int) count;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct getdirentries_args ap;
|
|
|
|
ap.fd = uap->fd;
|
|
|
|
ap.buf = uap->buf;
|
|
|
|
ap.count = uap->count;
|
|
|
|
ap.basep = NULL;
|
|
|
|
return getdirentries(p, &ap);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the mode mask for creation of filesystem nodes.
|
2000-03-28 07:16:37 +00:00
|
|
|
*
|
|
|
|
* MP SAFE
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct umask_args {
|
|
|
|
int newmask;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1997-02-10 02:22:35 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
umask(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct umask_args /* {
|
|
|
|
syscallarg(int) newmask;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp;
|
|
|
|
|
|
|
|
fdp = p->p_fd;
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = fdp->fd_cmask;
|
1997-02-10 02:22:35 +00:00
|
|
|
fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Void all references to file by ripping underlying filesystem
|
|
|
|
* away from vnode.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct revoke_args {
|
|
|
|
char *path;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
revoke(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct revoke_args /* {
|
|
|
|
syscallarg(char *) path;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
|
|
|
int error;
|
|
|
|
struct nameidata nd;
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
vp = nd.ni_vp;
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2000-11-02 21:14:13 +00:00
|
|
|
if (vp->v_type != VCHR) {
|
1999-01-24 06:28:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
if (p->p_ucred->cr_uid != vattr.va_uid &&
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
(error = suser_xxx(0, p, PRISON_ROOT)))
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
goto out;
|
1999-08-26 14:53:31 +00:00
|
|
|
if (vcount(vp) > 1)
|
1997-02-10 02:22:35 +00:00
|
|
|
VOP_REVOKE(vp, REVOKEALL);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
out:
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert a user file descriptor to a kernel file entry.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
getvnode(fdp, fd, fpp)
|
|
|
|
struct filedesc *fdp;
|
|
|
|
int fd;
|
1995-11-13 08:22:21 +00:00
|
|
|
struct file **fpp;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
|
|
|
|
if ((u_int)fd >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[fd]) == NULL)
|
|
|
|
return (EBADF);
|
1996-12-19 19:42:37 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
|
|
|
*fpp = fp;
|
|
|
|
return (0);
|
|
|
|
}
|
1999-09-11 00:46:08 +00:00
|
|
|
/*
|
|
|
|
* Get (NFS) file handle
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct getfh_args {
|
|
|
|
char *fname;
|
|
|
|
fhandle_t *fhp;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
getfh(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct getfh_args *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
fhandle_t fh;
|
|
|
|
register struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
|
|
|
error = suser(p);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, p);
|
|
|
|
error = namei(&nd);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
1999-09-11 00:46:08 +00:00
|
|
|
vp = nd.ni_vp;
|
|
|
|
bzero(&fh, sizeof(fh));
|
|
|
|
fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
|
|
|
|
error = VFS_VPTOFH(vp, &fh.fh_fid);
|
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = copyout(&fh, uap->fhp, sizeof (fh));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* syscall for the rpc.lockd to use to translate a NFS file handle into
|
|
|
|
* an open descriptor.
|
|
|
|
*
|
|
|
|
* warning: do not remove the suser() call or this becomes one giant
|
|
|
|
* security hole.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fhopen_args {
|
|
|
|
const struct fhandle *u_fhp;
|
|
|
|
int flags;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
fhopen(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct fhopen_args /* {
|
|
|
|
syscallarg(const struct fhandle *) u_fhp;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct fhandle fhp;
|
|
|
|
struct vattr vat;
|
|
|
|
struct vattr *vap = &vat;
|
|
|
|
struct flock lf;
|
|
|
|
struct file *fp;
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
int fmode, mode, error, type;
|
|
|
|
struct file *nfp;
|
|
|
|
int indx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
|
|
|
error = suser(p);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
fmode = FFLAGS(SCARG(uap, flags));
|
|
|
|
/* why not allow a non-read/write open for our lockd? */
|
|
|
|
if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
|
|
|
|
return (EINVAL);
|
|
|
|
error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
|
|
|
|
if (error)
|
|
|
|
return(error);
|
|
|
|
/* find the mount point */
|
|
|
|
mp = vfs_getvfs(&fhp.fh_fsid);
|
|
|
|
if (mp == NULL)
|
|
|
|
return (ESTALE);
|
|
|
|
/* now give me my vnode, it gets returned to me locked */
|
|
|
|
error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* from now on we have to make sure not
|
|
|
|
* to forget about the vnode
|
|
|
|
* any error that causes an abort must vput(vp)
|
|
|
|
* just set error = err and 'goto bad;'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* from vn_open
|
|
|
|
*/
|
|
|
|
if (vp->v_type == VLNK) {
|
|
|
|
error = EMLINK;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (vp->v_type == VSOCK) {
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
mode = 0;
|
|
|
|
if (fmode & (FWRITE | O_TRUNC)) {
|
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
error = EISDIR;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
error = vn_writechk(vp);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
mode |= VWRITE;
|
|
|
|
}
|
|
|
|
if (fmode & FREAD)
|
|
|
|
mode |= VREAD;
|
|
|
|
if (mode) {
|
|
|
|
error = VOP_ACCESS(vp, mode, p->p_ucred, p);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (fmode & O_TRUNC) {
|
|
|
|
VOP_UNLOCK(vp, 0, p); /* XXX */
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
1999-09-11 00:46:08 +00:00
|
|
|
VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
|
|
|
|
VATTR_NULL(vap);
|
|
|
|
vap->va_size = 0;
|
|
|
|
error = VOP_SETATTR(vp, vap, p->p_ucred, p);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-09-11 00:46:08 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
error = VOP_OPEN(vp, fmode, p->p_ucred, p);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
/*
|
|
|
|
* Make sure that a VM object is created for VMIO support.
|
|
|
|
*/
|
|
|
|
if (vn_canvmio(vp) == TRUE) {
|
|
|
|
if ((error = vfs_object_create(vp, p, p->p_ucred)) != 0)
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (fmode & FWRITE)
|
|
|
|
vp->v_writecount++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* end of vn_open code
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((error = falloc(p, &nfp, &indx)) != 0)
|
|
|
|
goto bad;
|
|
|
|
fp = nfp;
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Hold an extra reference to avoid having fp ripped out
|
|
|
|
* from under us while we block in the lock op
|
|
|
|
*/
|
|
|
|
fhold(fp);
|
1999-09-11 00:46:08 +00:00
|
|
|
nfp->f_data = (caddr_t)vp;
|
|
|
|
nfp->f_flag = fmode & FMASK;
|
|
|
|
nfp->f_ops = &vnops;
|
|
|
|
nfp->f_type = DTYPE_VNODE;
|
|
|
|
if (fmode & (O_EXLOCK | O_SHLOCK)) {
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
if (fmode & O_EXLOCK)
|
|
|
|
lf.l_type = F_WRLCK;
|
|
|
|
else
|
|
|
|
lf.l_type = F_RDLCK;
|
|
|
|
type = F_FLOCK;
|
|
|
|
if ((fmode & FNONBLOCK) == 0)
|
|
|
|
type |= F_WAIT;
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
|
|
|
if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* The lock request failed. Normally close the
|
|
|
|
* descriptor but handle the case where someone might
|
|
|
|
* have dup()d or close()d it when we weren't looking.
|
|
|
|
*/
|
|
|
|
if (fdp->fd_ofiles[indx] == fp) {
|
|
|
|
fdp->fd_ofiles[indx] = NULL;
|
|
|
|
fdrop(fp, p);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* release our private reference
|
|
|
|
*/
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1999-09-11 00:46:08 +00:00
|
|
|
}
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
|
|
|
fp->f_flag |= FHASLOCK;
|
|
|
|
}
|
2000-09-12 09:49:08 +00:00
|
|
|
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
|
1999-09-11 00:46:08 +00:00
|
|
|
vfs_object_create(vp, p, p->p_ucred);
|
|
|
|
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1999-09-11 00:46:08 +00:00
|
|
|
p->p_retval[0] = indx;
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
bad:
|
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Stat an (NFS) file handle.
|
|
|
|
*/
|
1999-09-11 00:46:08 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fhstat_args {
|
|
|
|
struct fhandle *u_fhp;
|
|
|
|
struct stat *sb;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
fhstat(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct fhstat_args /* {
|
|
|
|
syscallarg(struct fhandle *) u_fhp;
|
|
|
|
syscallarg(struct stat *) sb;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
fhandle_t fh;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
|
|
|
error = suser(p);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
|
|
|
|
return (ESTALE);
|
|
|
|
if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
|
|
|
|
return (error);
|
|
|
|
error = vn_stat(vp, &sb, p);
|
|
|
|
vput(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-09-14 19:13:59 +00:00
|
|
|
/*
|
|
|
|
* Implement fstatfs() for (NFS) file handles.
|
|
|
|
*/
|
1999-09-11 00:46:08 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fhstatfs_args {
|
|
|
|
struct fhandle *u_fhp;
|
|
|
|
struct statfs *buf;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
fhstatfs(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct fhstatfs_args /* {
|
|
|
|
syscallarg(struct fhandle) *u_fhp;
|
|
|
|
syscallarg(struct statfs) *buf;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
struct statfs *sp;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct statfs sb;
|
|
|
|
fhandle_t fh;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be super user
|
|
|
|
*/
|
|
|
|
if ((error = suser(p)))
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
|
|
|
|
return (ESTALE);
|
|
|
|
if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
|
|
|
|
return (error);
|
|
|
|
mp = vp->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
|
|
|
vput(vp);
|
|
|
|
if ((error = VFS_STATFS(mp, sp, p)) != 0)
|
|
|
|
return (error);
|
|
|
|
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
|
|
|
|
if (suser_xxx(p->p_ucred, 0, 0)) {
|
|
|
|
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
|
|
|
|
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
|
|
|
|
sp = &sb;
|
|
|
|
}
|
|
|
|
return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
|
|
|
|
}
|
1999-12-19 06:08:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall to push extended attribute configuration information into the
|
|
|
|
* VFS. Accepts a path, which it converts to a mountpoint, as well as
|
|
|
|
* a command (int cmd), and attribute name and misc data. For now, the
|
|
|
|
* attribute name is left in userspace for consumption by the VFS_op.
|
|
|
|
* It will probably be changed to be copied into sysspace by the
|
|
|
|
* syscall in the future, once issues with various consumers of the
|
|
|
|
* attribute code have raised their hands.
|
|
|
|
*
|
|
|
|
* Currently this is used only by UFS Extended Attributes.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
extattrctl(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct extattrctl_args *uap;
|
|
|
|
{
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
struct vnode *filename_vp;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
struct mount *mp;
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
1999-12-19 06:08:07 +00:00
|
|
|
int error;
|
|
|
|
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
/*
|
|
|
|
* SCARG(uap, attrname) not always defined. We check again later
|
|
|
|
* when we invoke the VFS call so as to pass in NULL there if needed.
|
|
|
|
*/
|
|
|
|
if (SCARG(uap, attrname) != NULL) {
|
|
|
|
error = copyinstr(SCARG(uap, attrname), attrname,
|
|
|
|
EXTATTR_MAXNAMELEN, NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SCARG(uap, filename) not always defined. If it is, grab
|
|
|
|
* a vnode lock, which VFS_EXTATTRCTL() will later release.
|
|
|
|
*/
|
|
|
|
filename_vp = NULL;
|
|
|
|
if (SCARG(uap, filename) != NULL) {
|
|
|
|
NDINIT(&nd, LOOKUP | LOCKLEAF, FOLLOW, UIO_USERSPACE,
|
|
|
|
SCARG(uap, filename), p);
|
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
|
|
|
filename_vp = nd.ni_vp;
|
|
|
|
NDFREE(&nd, NDF_NO_VP_RELE | NDF_NO_VP_UNLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SCARG(uap, path) always defined. */
|
1999-12-19 06:08:07 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
|
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH);
|
1999-12-19 06:08:07 +00:00
|
|
|
NDFREE(&nd, 0);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
if (error) {
|
|
|
|
if (filename_vp)
|
|
|
|
vrele(filename_vp);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (SCARG(uap, attrname) != NULL) {
|
|
|
|
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
|
|
|
|
SCARG(uap, namespace), attrname, p);
|
|
|
|
} else {
|
|
|
|
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
|
|
|
|
SCARG(uap, namespace), NULL, p);
|
|
|
|
}
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
/*
|
|
|
|
* VFS_EXTATTRCTL will have unlocked, but not de-ref'd,
|
|
|
|
* filename_vp, so vrele it if it is defined.
|
|
|
|
*/
|
|
|
|
if (filename_vp != NULL)
|
|
|
|
vrele(filename_vp);
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
1999-12-19 06:08:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall to set a named extended attribute on a file or directory.
|
|
|
|
* Accepts attribute name, and a uio structure pointing to the data to set.
|
|
|
|
* The uio is consumed in the style of writev(). The real work happens
|
|
|
|
* in VOP_SETEXTATTR().
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
extattr_set_file(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct extattr_set_file_args *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
u_int iovlen, cnt;
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2000-07-26 20:29:20 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-12-19 06:08:07 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return(error);
|
2000-07-26 20:29:20 +00:00
|
|
|
if ((error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH)) != 0) {
|
|
|
|
NDFREE(&nd, 0);
|
|
|
|
return (error);
|
|
|
|
}
|
1999-12-19 06:08:07 +00:00
|
|
|
iovlen = uap->iovcnt * sizeof(struct iovec);
|
|
|
|
if (uap->iovcnt > UIO_SMALLIOV) {
|
|
|
|
if (uap->iovcnt > UIO_MAXIOV) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
|
|
|
|
needfree = iov;
|
|
|
|
} else
|
|
|
|
iov = aiov;
|
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = uap->iovcnt;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
auio.uio_offset = 0;
|
|
|
|
if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
|
|
|
|
goto done;
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
for (i = 0; i < uap->iovcnt; i++) {
|
|
|
|
if (iov->iov_len > INT_MAX - auio.uio_resid) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
auio.uio_resid += iov->iov_len;
|
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
cnt = auio.uio_resid;
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
error = VOP_SETEXTATTR(nd.ni_vp, SCARG(uap, namespace), attrname,
|
|
|
|
&auio, p->p_cred->pc_ucred, p);
|
1999-12-19 06:08:07 +00:00
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
p->p_retval[0] = cnt;
|
|
|
|
done:
|
|
|
|
if (needfree)
|
|
|
|
FREE(needfree, M_IOV);
|
|
|
|
NDFREE(&nd, 0);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-12-19 06:08:07 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall to get a named extended attribute on a file or directory.
|
|
|
|
* Accepts attribute name, and a uio structure pointing to a buffer for the
|
|
|
|
* data. The uio is consumed in the style of readv(). The real work
|
|
|
|
* happens in VOP_GETEXTATTR();
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
extattr_get_file(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct extattr_get_file_args *uap;
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
u_int iovlen, cnt;
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2000-07-26 20:29:20 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
|
|
|
SCARG(uap, path), p);
|
1999-12-19 06:08:07 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return (error);
|
|
|
|
iovlen = uap->iovcnt * sizeof (struct iovec);
|
|
|
|
if (uap->iovcnt > UIO_SMALLIOV) {
|
|
|
|
if (uap->iovcnt > UIO_MAXIOV) {
|
|
|
|
NDFREE(&nd, 0);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
|
|
|
|
needfree = iov;
|
|
|
|
} else {
|
|
|
|
iov = aiov;
|
|
|
|
needfree = NULL;
|
|
|
|
}
|
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = uap->iovcnt;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
auio.uio_offset = 0;
|
|
|
|
if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
|
|
|
|
goto done;
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
for (i = 0; i < uap->iovcnt; i++) {
|
|
|
|
if (iov->iov_len > INT_MAX - auio.uio_resid) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
auio.uio_resid += iov->iov_len;
|
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
cnt = auio.uio_resid;
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
error = VOP_GETEXTATTR(nd.ni_vp, SCARG(uap, namespace), attrname,
|
|
|
|
&auio, p->p_cred->pc_ucred, p);
|
1999-12-19 06:08:07 +00:00
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
p->p_retval[0] = cnt;
|
|
|
|
done:
|
|
|
|
if (needfree)
|
|
|
|
FREE(needfree, M_IOV);
|
|
|
|
NDFREE(&nd, 0);
|
|
|
|
return(error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall to delete a named extended attribute from a file or directory.
|
|
|
|
* Accepts attribute name. The real work happens in VOP_SETEXTATTR().
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
extattr_delete_file(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct extattr_delete_file_args *uap;
|
|
|
|
{
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
1999-12-19 06:08:07 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
char attrname[EXTATTR_MAXNAMELEN];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
|
|
|
|
if (error)
|
|
|
|
return(error);
|
2000-07-28 19:52:38 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
|
2000-07-26 20:29:20 +00:00
|
|
|
SCARG(uap, path), p);
|
1999-12-19 06:08:07 +00:00
|
|
|
if ((error = namei(&nd)) != 0)
|
|
|
|
return(error);
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH)) != 0) {
|
2000-07-26 20:29:20 +00:00
|
|
|
NDFREE(&nd, 0);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
}
|
o Change the API and ABI of the Extended Attribute kernel interfaces to
introduce a new argument, "namespace", rather than relying on a first-
character namespace indicator. This is in line with more recent
thinking on EA interfaces on various mailing lists, including the
posix1e, Linux acl-devel, and trustedbsd-discuss forums. Two namespaces
are defined by default, EXTATTR_NAMESPACE_SYSTEM and
EXTATTR_NAMESPACE_USER, where the primary distinction lies in the
access control model: user EAs are accessible based on the normal
MAC and DAC file/directory protections, and system attributes are
limited to kernel-originated or appropriately privileged userland
requests.
o These API changes occur at several levels: the namespace argument is
introduced in the extattr_{get,set}_file() system call interfaces,
at the vnode operation level in the vop_{get,set}extattr() interfaces,
and in the UFS extended attribute implementation. Changes are also
introduced in the VFS extattrctl() interface (system call, VFS,
and UFS implementation), where the arguments are modified to include
a namespace field, as well as modified to advoid direct access to
userspace variables from below the VFS layer (in the style of recent
changes to mount by adrian@FreeBSD.org). This required some cleanup
and bug fixing regarding VFS locks and the VFS interface, as a vnode
pointer may now be optionally submitted to the VFS_EXTATTRCTL()
call. Updated documentation for the VFS interface will be committed
shortly.
o In the near future, the auto-starting feature will be updated to
search two sub-directories to the ".attribute" directory in appropriate
file systems: "user" and "system" to locate attributes intended for
those namespaces, as the single filename is no longer sufficient
to indicate what namespace the attribute is intended for. Until this
is committed, all attributes auto-started by UFS will be placed in
the EXTATTR_NAMESPACE_SYSTEM namespace.
o The default POSIX.1e attribute names for ACLs and Capabilities have
been updated to no longer include the '$' in their filename. As such,
if you're using these features, you'll need to rename the attribute
backing files to the same names without '$' symbols in front.
o Note that these changes will require changes in userland, which will
be committed shortly. These include modifications to the extended
attribute utilities, as well as to libutil for new namespace
string conversion routines. Once the matching userland changes are
committed, a buildworld is recommended to update all the necessary
include files and verify that the kernel and userland environments
are in sync. Note: If you do not use extended attributes (most people
won't), upgrading is not imperative although since the system call
API has changed, the new userland extended attribute code will no longer
compile with old include files.
o Couple of minor cleanups while I'm there: make more code compilation
conditional on FFS_EXTATTR, which should recover a bit of space on
kernels running without EA's, as well as update copyright dates.
Obtained from: TrustedBSD Project
2001-03-15 02:54:29 +00:00
|
|
|
error = VOP_SETEXTATTR(nd.ni_vp, SCARG(uap, namespace), attrname,
|
|
|
|
NULL, p->p_cred->pc_ucred, p);
|
1999-12-19 06:08:07 +00:00
|
|
|
NDFREE(&nd, 0);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-12-19 06:08:07 +00:00
|
|
|
return(error);
|
|
|
|
}
|