Move the code dealing with vnode out of several functions into a single

helper function vm_mmap_vnode.

Discussed with:	jeffr,alc (a while ago)
This commit is contained in:
Alexander Kabaev 2004-02-27 22:02:15 +00:00
parent e578c2421c
commit c8daea132f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=126332

View File

@ -110,6 +110,9 @@ vmmapentry_rsrc_init(dummy)
max_proc_mmap /= 100;
}
static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct vnode *, vm_ooffset_t, vm_object_t *);
/*
* MPSAFE
*/
@ -203,17 +206,15 @@ mmap(td, uap)
struct thread *td;
struct mmap_args *uap;
{
struct file *fp = NULL;
struct file *fp;
struct vnode *vp;
vm_offset_t addr;
vm_size_t size, pageoff;
vm_prot_t prot, maxprot;
void *handle;
int flags, error;
int disablexworkaround;
off_t pos;
struct vmspace *vms = td->td_proc->p_vmspace;
vm_object_t obj;
addr = (vm_offset_t) uap->addr;
size = uap->len;
@ -221,7 +222,6 @@ mmap(td, uap)
flags = uap->flags;
pos = uap->pos;
vp = NULL;
fp = NULL;
/* make sure mapping fits into numeric range etc */
if ((ssize_t) uap->len < 0 ||
@ -284,7 +284,6 @@ mmap(td, uap)
lim_max(td->td_proc, RLIMIT_DATA));
PROC_UNLOCK(td->td_proc);
}
mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
do {
if (flags & MAP_ANON) {
/*
@ -306,7 +305,6 @@ mmap(td, uap)
error = EINVAL;
goto done;
}
/*
* POSIX shared-memory objects are defined to have
* kernel persistence, and are not defined to support
@ -318,60 +316,6 @@ mmap(td, uap)
if (fp->f_flag & FPOSIXSHM)
flags |= MAP_NOSYNC;
vp = fp->f_vnode;
error = vget(vp, LK_EXCLUSIVE, td);
if (error)
goto done;
if (vp->v_type != VREG && vp->v_type != VCHR) {
error = EINVAL;
goto done;
}
if (vp->v_type == VREG) {
/*
* Get the proper underlying object
*/
if (VOP_GETVOBJECT(vp, &obj) != 0) {
error = EINVAL;
goto done;
}
if (obj->handle != vp) {
vput(vp);
vp = (struct vnode*)obj->handle;
vget(vp, LK_EXCLUSIVE, td);
}
}
/*
* XXX hack to handle use of /dev/zero to map anon memory (ala
* SunOS).
*/
if ((vp->v_type == VCHR) &&
(vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) {
handle = NULL;
maxprot = VM_PROT_ALL;
flags |= MAP_ANON;
pos = 0;
break;
}
/*
* cdevs does not provide private mappings of any kind.
*/
/*
* However, for XIG X server to continue to work,
* we should allow the superuser to do it anyway.
* We only allow it at securelevel < 1.
* (Because the XIG X server writes directly to video
* memory via /dev/mem, it should never work at any
* other securelevel.
* XXX this will have to go
*/
if (securelevel_ge(td->td_ucred, 1))
disablexworkaround = 1;
else
disablexworkaround = suser(td);
if (vp->v_type == VCHR && disablexworkaround &&
(flags & (MAP_PRIVATE|MAP_COPY))) {
error = EINVAL;
goto done;
}
/*
* Ensure that file and memory protections are
* compatible. Note that we only worry about
@ -393,26 +337,11 @@ mmap(td, uap)
* MAP_SHARED or via the implicit sharing of character
* device mappings), and we are trying to get write
* permission although we opened it without asking
* for it, bail out. Check for superuser, only if
* we're at securelevel < 1, to allow the XIG X server
* to continue to work.
* for it, bail out.
*/
if ((flags & MAP_SHARED) != 0 ||
(vp->v_type == VCHR && disablexworkaround)) {
if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) {
if ((fp->f_flag & FWRITE) != 0) {
struct vattr va;
if ((error =
VOP_GETATTR(vp, &va,
td->td_ucred, td))) {
goto done;
}
if ((va.va_flags &
(SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) {
maxprot |= VM_PROT_WRITE;
} else if (prot & PROT_WRITE) {
error = EPERM;
goto done;
}
maxprot |= VM_PROT_WRITE;
} else if ((prot & PROT_WRITE) != 0) {
error = EACCES;
goto done;
@ -420,7 +349,6 @@ mmap(td, uap)
} else {
maxprot |= VM_PROT_WRITE;
}
handle = (void *)vp;
} while (0);
@ -435,28 +363,11 @@ mmap(td, uap)
goto done;
}
error = 0;
#ifdef MAC
if (handle != NULL && (flags & MAP_SHARED) != 0) {
error = mac_check_vnode_mmap(td->td_ucred,
(struct vnode *)handle, prot);
}
#endif
if (vp != NULL) {
vput(vp);
vp = NULL;
}
mtx_unlock(&Giant);
if (error == 0)
error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
flags, handle, pos);
mtx_lock(&Giant);
error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
flags, handle, pos);
if (error == 0)
td->td_retval[0] = (register_t) (addr + pageoff);
done:
if (vp)
vput(vp);
mtx_unlock(&Giant);
if (fp)
fdrop(fp, td);
@ -1175,6 +1086,124 @@ kern_munlock(td, addr, size)
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
/*
* vm_mmap_vnode()
*
* MPSAFE
*
* Helper function for vm_mmap. Perform sanity check specific for mmap
* operations on vnodes.
*/
int
vm_mmap_vnode(struct thread *td, vm_size_t objsize,
vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp)
{
struct vattr va;
void *handle;
vm_object_t obj;
int disablexworkaround, error, flags, type;
mtx_lock(&Giant);
if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) {
mtx_unlock(&Giant);
return (error);
}
flags = *flagsp;
if (vp->v_type == VREG) {
/*
* Get the proper underlying object
*/
if (VOP_GETVOBJECT(vp, &obj) != 0) {
error = EINVAL;
goto done;
}
if (obj->handle != vp) {
vput(vp);
vp = (struct vnode*)obj->handle;
vget(vp, LK_EXCLUSIVE, td);
}
type = OBJT_VNODE;
handle = vp;
} else if (vp->v_type == VCHR) {
type = OBJT_DEVICE;
handle = vp->v_rdev;
if(vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON) {
*maxprotp = VM_PROT_ALL;
*flagsp |= MAP_ANON;
error = 0;
goto done;
}
/*
* cdevs does not provide private mappings of any kind.
*/
/*
* However, for XIG X server to continue to work,
* we should allow the superuser to do it anyway.
* We only allow it at securelevel < 1.
* (Because the XIG X server writes directly to video
* memory via /dev/mem, it should never work at any
* other securelevel.
* XXX this will have to go
*/
if (securelevel_ge(td->td_ucred, 1))
disablexworkaround = 1;
else
disablexworkaround = suser(td);
if (disablexworkaround && (flags & (MAP_PRIVATE|MAP_COPY))) {
error = EINVAL;
goto done;
}
/*
* Force device mappings to be shared.
*/
flags &= ~(MAP_PRIVATE|MAP_COPY);
flags |= MAP_SHARED;
} else {
error = EINVAL;
goto done;
}
if ((error = VOP_GETATTR(vp, &va, td->td_ucred, td))) {
goto done;
}
if ((flags & MAP_SHARED) != 0) {
if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
if (prot & PROT_WRITE) {
error = EPERM;
goto done;
}
*maxprotp &= ~VM_PROT_WRITE;
}
#ifdef MAC
error = mac_check_vnode_mmap(td->td_ucred, vp, prot);
if (error != 0)
goto done;
#endif
}
/*
* If it is a regular file without any references
* we do not need to sync it.
* Adjust object size to be the size of actual file.
*/
if (vp->v_type == VREG) {
objsize = round_page(va.va_size);
if (va.va_nlink == 0)
flags |= MAP_NOSYNC;
}
obj = vm_pager_allocate(type, handle, objsize, prot, foff);
if (obj == NULL) {
error = (type == OBJT_DEVICE ? EINVAL : ENOMEM);
goto done;
}
*objp = obj;
*flagsp = flags;
done:
vput(vp);
mtx_unlock(&Giant);
return (error);
}
/*
* vm_mmap()
*
@ -1191,8 +1220,6 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
{
boolean_t fitit;
vm_object_t object;
struct vnode *vp = NULL;
objtype_t type;
int rv = KERN_SUCCESS;
vm_ooffset_t objsize;
int docow, error;
@ -1231,75 +1258,28 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
fitit = FALSE;
(void) vm_map_remove(map, *addr, *addr + size);
}
/*
* Lookup/allocate object.
*/
if (handle != NULL) {
error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
handle, foff, &object);
if (error) {
return (error);
}
}
if (flags & MAP_ANON) {
type = OBJT_DEFAULT;
object = NULL;
docow = 0;
/*
* Unnamed anonymous regions always start at 0.
*/
if (handle == 0)
foff = 0;
} else {
vp = (struct vnode *) handle;
mtx_lock(&Giant);
error = vget(vp, LK_EXCLUSIVE, td);
if (error) {
mtx_unlock(&Giant);
return (error);
}
if (vp->v_type == VCHR) {
type = OBJT_DEVICE;
handle = vp->v_rdev;
vput(vp);
mtx_unlock(&Giant);
} else {
struct vattr vat;
error = VOP_GETATTR(vp, &vat, td->td_ucred, td);
if (error) {
vput(vp);
mtx_unlock(&Giant);
return (error);
}
objsize = round_page(vat.va_size);
type = OBJT_VNODE;
/*
* if it is a regular file without any references
* we do not need to sync it.
*/
if (vp->v_type == VREG && vat.va_nlink == 0) {
flags |= MAP_NOSYNC;
}
}
}
if (handle == NULL) {
object = NULL;
docow = 0;
} else {
object = vm_pager_allocate(type,
handle, objsize, prot, foff);
if (type == OBJT_VNODE) {
vput(vp);
mtx_unlock(&Giant);
}
if (object == NULL) {
return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
}
docow = MAP_PREFAULT_PARTIAL;
}
/*
* Force device mappings to be shared.
*/
if (type == OBJT_DEVICE) {
flags &= ~(MAP_PRIVATE|MAP_COPY);
flags |= MAP_SHARED;
}
if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
docow |= MAP_COPY_ON_WRITE;
if (flags & MAP_NOSYNC)