- Change the vm_mmap() function to accept an objtype_t parameter specifying

the type of object represented by the handle argument.
- Allow vm_mmap() to map device memory via cdev objects in addition to
  vnodes and anonymous memory.  Note that mmaping a cdev directly does not
  currently perform any MAC checks like mapping a vnode does.
- Unbreak the DRM getbufs ioctl by having it call vm_mmap() directly on the
  cdev the ioctl is acting on rather than trying to find a suitable vnode
  to map from.

Reviewed by:	alc, arch@
This commit is contained in:
John Baldwin 2005-04-01 20:00:11 +00:00
parent 5ec8c336e7
commit 98df9218da
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=144501
12 changed files with 104 additions and 33 deletions

View File

@ -189,14 +189,14 @@ exec_osf1_imgact(struct image_params *imgp)
/* set up text segment */
if ((error = vm_mmap(&vmspace->vm_map, &taddr, tsize,
VM_PROT_READ|VM_PROT_EXECUTE, VM_PROT_ALL, MAP_FIXED|MAP_COPY,
(caddr_t)imgp->vp, ECOFF_TXTOFF(execp)))) {
OBJT_VNODE, imgp->vp, ECOFF_TXTOFF(execp)))) {
DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
goto bail;
}
/* .. data .. */
if ((error = vm_mmap(&vmspace->vm_map, &daddr, dsize,
VM_PROT_READ|VM_PROT_EXECUTE|VM_PROT_WRITE, VM_PROT_ALL,
MAP_FIXED|MAP_COPY, (caddr_t)imgp->vp, ECOFF_DATOFF(execp)))) {
MAP_FIXED|MAP_COPY, OBJT_VNODE, imgp->vp, ECOFF_DATOFF(execp)))) {
DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
goto bail;
}

View File

@ -1458,7 +1458,8 @@ osf1_uswitch(td, uap)
if (uap->mask & OSF1_USW_NULLP) {
rv = vm_mmap(&(p->p_vmspace->vm_map), &zero, PAGE_SIZE,
VM_PROT_READ, VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED | MAP_ANON, NULL, 0);
MAP_PRIVATE | MAP_FIXED | MAP_ANON, OBJT_DEFAULT,
NULL, 0);
if (!rv)
return(KERN_SUCCESS);
else {

View File

@ -333,7 +333,7 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
/* Pull in executable header into kernel_map */
error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE,
VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0);
VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
/*
* Lock no longer needed
*/
@ -422,7 +422,7 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
/* map file into kernel_map */
error = vm_mmap(kernel_map, &buffer,
round_page(a_out->a_text + a_out->a_data + file_offset),
VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp,
VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp,
trunc_page(file_offset));
if (error)
goto cleanup;
@ -453,7 +453,7 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
*/
error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr,
a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset);
MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
if (error)
goto cleanup;
}

View File

@ -145,7 +145,7 @@ exec_svr4_imgact(imgp)
error = vm_mmap(kernel_map, &buffer,
round_page(a_out->a_text + a_out->a_data + file_offset),
VM_PROT_READ, VM_PROT_READ, 0,
(caddr_t) imgp->vp, trunc_page(file_offset));
OBJT_VNODE, imgp->vp, trunc_page(file_offset));
if (error)
goto fail;
@ -182,7 +182,7 @@ exec_svr4_imgact(imgp)
VM_PROT_READ | VM_PROT_EXECUTE,
VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED,
(caddr_t)imgp->vp, file_offset);
OBJT_VNODE, imgp->vp, file_offset);
if (error)
goto fail;

View File

@ -923,12 +923,8 @@ int DRM(mapbufs)( DRM_IOCTL_ARGS )
#ifdef __FreeBSD__
vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
#ifdef this_is_just_plain_bogus
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
#else
retcode = EOPNOTSUPP;
#endif
VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
#elif defined(__NetBSD__)
vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
retcode = uvm_mmap(&vms->vm_map, &vaddr, size,

View File

@ -91,7 +91,7 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
}
DPRINTF(("%s(%d): vm_mmap(&vmspace->vm_map, &0x%08lx, 0x%x, 0x%x, "
"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, vp, 0x%x)\n",
"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
__FILE__, __LINE__, map_addr, map_len, prot, map_offset));
if ((error = vm_mmap(&vmspace->vm_map,
@ -100,7 +100,8 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
prot,
VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED,
(caddr_t) vp,
OBJT_VNODE,
vp,
map_offset)) != 0)
return error;
@ -136,7 +137,8 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
VM_PROT_READ,
VM_PROT_READ,
0,
(caddr_t) vp,
OBJT_VNODE,
vp,
trunc_page(offset + filsz))) != 0)
return error;
@ -215,7 +217,8 @@ coff_load_file(struct thread *td, char *name)
VM_PROT_READ,
VM_PROT_READ,
0,
(caddr_t) vp,
OBJT_VNODE,
vp,
0)) != 0)
goto unlocked_fail;
@ -372,7 +375,8 @@ exec_coff_imgact(imgp)
VM_PROT_READ,
VM_PROT_READ,
0,
(caddr_t) imgp->vp,
OBJT_VNODE,
imgp->vp,
foff)) != 0) {
error = ENOEXEC;
goto fail;

View File

@ -143,8 +143,8 @@ exec_linux_imgact(struct image_params *imgp)
error = vm_mmap(kernel_map, &buffer,
round_page(a_out->a_text + a_out->a_data + file_offset),
VM_PROT_READ, VM_PROT_READ, 0,
(caddr_t) imgp->vp, trunc_page(file_offset));
VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE,
imgp->vp, trunc_page(file_offset));
if (error)
goto fail;
@ -181,7 +181,8 @@ exec_linux_imgact(struct image_params *imgp)
VM_PROT_READ | VM_PROT_EXECUTE,
VM_PROT_ALL,
MAP_PRIVATE | MAP_FIXED,
(caddr_t)imgp->vp, file_offset);
OBJT_VNODE,
imgp->vp, file_offset);
if (error)
goto fail;

View File

@ -238,7 +238,8 @@ do_aout_hdr(struct imgact_gzip * gz)
&vmaddr,
gz->a_out.a_text + gz->a_out.a_data,
VM_PROT_ALL, VM_PROT_ALL, MAP_ANON | MAP_FIXED,
0,
OBJT_DEFAULT,
NULL,
0);
if (error) {
@ -311,7 +312,8 @@ NextByte(void *vp)
VM_PROT_READ, /* protection */
VM_PROT_READ, /* max protection */
0, /* flags */
(caddr_t) igz->ip->vp, /* vnode */
OBJT_VNODE, /* handle type */
igz->ip->vp, /* vnode */
igz->offset); /* offset */
if (error) {
igz->where = __LINE__;

View File

@ -80,6 +80,10 @@ typedef u_char vm_prot_t; /* protection codes */
#define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
#define VM_PROT_DEFAULT VM_PROT_ALL
enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS,
OBJT_DEAD };
typedef u_char objtype_t;
union vm_map_object;
typedef union vm_map_object vm_map_object_t;

View File

@ -72,7 +72,7 @@ void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
void vm_forkproc(struct thread *, struct proc *, struct thread *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
struct vmspace *vmspace_fork(struct vmspace *);

View File

@ -109,6 +109,8 @@ vmmapentry_rsrc_init(dummy)
static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct vnode *, vm_ooffset_t, vm_object_t *);
static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct cdev *, vm_ooffset_t, vm_object_t *);
/*
* MPSAFE
@ -205,6 +207,7 @@ mmap(td, uap)
vm_size_t size, pageoff;
vm_prot_t prot, maxprot;
void *handle;
objtype_t handle_type;
int flags, error;
off_t pos;
struct vmspace *vms = td->td_proc->p_vmspace;
@ -282,6 +285,7 @@ mmap(td, uap)
* Mapping blank space is trivial.
*/
handle = NULL;
handle_type = OBJT_DEFAULT;
maxprot = VM_PROT_ALL;
pos = 0;
} else {
@ -344,6 +348,7 @@ mmap(td, uap)
maxprot |= VM_PROT_WRITE;
}
handle = (void *)vp;
handle_type = OBJT_VNODE;
}
/*
@ -358,7 +363,7 @@ mmap(td, uap)
}
error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
flags, handle, pos);
flags, handle_type, handle, pos);
if (error == 0)
td->td_retval[0] = (register_t) (addr + pageoff);
done:
@ -1165,6 +1170,55 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
return (error);
}
/*
* vm_mmap_cdev()
*
* MPSAFE
*
* Helper function for vm_mmap. Perform sanity check specific for mmap
* operations on cdevs.
*/
int
vm_mmap_cdev(struct thread *td, vm_size_t objsize,
vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
struct cdev *cdev, vm_ooffset_t foff, vm_object_t *objp)
{
vm_object_t obj;
int flags;
flags = *flagsp;
/* XXX: lack thredref on device */
if (cdev->si_devsw->d_flags & D_MMAP_ANON) {
*maxprotp = VM_PROT_ALL;
*flagsp |= MAP_ANON;
return (0);
}
/*
* cdevs does not provide private mappings of any kind.
*/
if ((*maxprotp & VM_PROT_WRITE) == 0 &&
(prot & PROT_WRITE) != 0)
return (EACCES);
if (flags & (MAP_PRIVATE|MAP_COPY))
return (EINVAL);
/*
* Force device mappings to be shared.
*/
flags |= MAP_SHARED;
#ifdef MAC_XXX
error = mac_check_cdev_mmap(td->td_ucred, cdev, prot);
if (error != 0)
return (error);
#endif
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, foff);
if (obj == NULL)
return (EINVAL);
*objp = obj;
*flagsp = flags;
return (0);
}
/*
* vm_mmap()
*
@ -1176,7 +1230,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
int
vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
vm_prot_t maxprot, int flags,
void *handle,
objtype_t handle_type, void *handle,
vm_ooffset_t foff)
{
boolean_t fitit;
@ -1222,13 +1276,26 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
/*
* Lookup/allocate object.
*/
if (handle != NULL) {
switch (handle_type) {
case OBJT_DEVICE:
error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
handle, foff, &object);
break;
case OBJT_VNODE:
error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
handle, foff, &object);
if (error) {
return (error);
break;
case OBJT_DEFAULT:
if (handle == NULL) {
error = 0;
break;
}
/* FALLTHROUGH */
default:
error = EINVAL;
}
if (error)
return (error);
if (flags & MAP_ANON) {
object = NULL;
docow = 0;

View File

@ -71,10 +71,6 @@
#include <sys/_lock.h>
#include <sys/_mutex.h>
enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS,
OBJT_DEAD };
typedef u_char objtype_t;
/*
* Types defined:
*