Add an extension to the character device interface that allows character

device drivers to use arbitrary VM objects to satisfy individual mmap()
requests.
- A new d_mmap_single(cdev, &foff, objsize, &object, prot) callback is
  added to cdevsw.  This function is called for each mmap() request.
  If it returns ENODEV, then the mmap() request will fall back to using
  the device's device pager object and d_mmap().  Otherwise, the method
  can return a VM object to satisfy this entire mmap() request via
  *object.  It can also modify the starting offset into this object via
  *foff.  This allows device drivers to use the file offset as a cookie
  to identify specific VM objects.
- vm_mmap_vnode() has been changed to call vm_mmap_cdev() directly when
  mapping V_CHR vnodes.  This avoids duplicating all the cdev mmap
  handling code and simplifies some of vm_mmap_vnode().
- D_VERSION has been bumped to D_VERSION_02.  Older device drivers
  using D_VERSION_01 are still supported.

MFC after:	1 month
This commit is contained in:
John Baldwin 2009-06-01 21:32:52 +00:00
parent d825c7936c
commit 64345f0b57
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=193275
3 changed files with 80 additions and 62 deletions

View File

@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <machine/stdarg.h>
#include <fs/devfs/devfs_int.h>
#include <vm/vm.h>
static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
@ -275,6 +276,7 @@ dead_strategy(struct bio *bp)
#define dead_dump (dumper_t *)enxio
#define dead_kqfilter (d_kqfilter_t *)enxio
#define dead_mmap_single (d_mmap_single_t *)enodev
static struct cdevsw dead_cdevsw = {
.d_version = D_VERSION,
@ -289,7 +291,8 @@ static struct cdevsw dead_cdevsw = {
.d_strategy = dead_strategy,
.d_name = "dead",
.d_dump = dead_dump,
.d_kqfilter = dead_kqfilter
.d_kqfilter = dead_kqfilter,
.d_mmap_single = dead_mmap_single
};
/* Default methods if driver does not specify method */
@ -301,6 +304,7 @@ static struct cdevsw dead_cdevsw = {
#define no_ioctl (d_ioctl_t *)enodev
#define no_mmap (d_mmap_t *)enodev
#define no_kqfilter (d_kqfilter_t *)enodev
#define no_mmap_single (d_mmap_single_t *)enodev
static void
no_strategy(struct bio *bp)
@ -480,6 +484,23 @@ giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
return (retval);
}
static int
giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
vm_object_t *object, int nprot)
{
struct cdevsw *dsw;
int retval;
dsw = dev_refthread(dev);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object,
nprot);
mtx_unlock(&Giant);
dev_relthread(dev);
return (retval);
}
static void
notify(struct cdev *dev, const char *ev)
@ -569,7 +590,8 @@ prep_cdevsw(struct cdevsw *devsw)
return;
}
if (devsw->d_version != D_VERSION_01) {
if (devsw->d_version != D_VERSION_01 &&
devsw->d_version != D_VERSION_02) {
printf(
"WARNING: Device driver \"%s\" has wrong version %s\n",
devsw->d_name == NULL ? "???" : devsw->d_name,
@ -585,6 +607,8 @@ prep_cdevsw(struct cdevsw *devsw)
devsw->d_dump = dead_dump;
devsw->d_kqfilter = dead_kqfilter;
}
if (devsw->d_version == D_VERSION_01)
devsw->d_mmap_single = NULL;
if (devsw->d_flags & D_NEEDGIANT) {
if (devsw->d_gianttrick == NULL) {
@ -613,6 +637,7 @@ prep_cdevsw(struct cdevsw *devsw)
FIXUP(d_mmap, no_mmap, giant_mmap);
FIXUP(d_strategy, no_strategy, giant_strategy);
FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter);
FIXUP(d_mmap_single, no_mmap_single, giant_mmap_single);
if (devsw->d_dump == NULL) devsw->d_dump = no_dump;

View File

@ -103,6 +103,7 @@ struct thread;
struct uio;
struct knote;
struct clonedevs;
struct vm_object;
struct vnode;
/*
@ -136,6 +137,8 @@ typedef int d_poll_t(struct cdev *dev, int events, struct thread *td);
typedef int d_kqfilter_t(struct cdev *dev, struct knote *kn);
typedef int d_mmap_t(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr,
int nprot);
typedef int d_mmap_single_t(struct cdev *cdev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **object, int nprot);
typedef void d_purge_t(struct cdev *dev);
typedef int d_spare2_t(struct cdev *dev);
@ -175,7 +178,8 @@ typedef int dumper_t(
*/
#define D_VERSION_00 0x20011966
#define D_VERSION_01 0x17032005 /* Add d_uid,gid,mode & kind */
#define D_VERSION D_VERSION_01
#define D_VERSION_02 0x28042009 /* Add d_mmap_single */
#define D_VERSION D_VERSION_02
/*
* Flags used for internal housekeeping
@ -201,7 +205,7 @@ struct cdevsw {
dumper_t *d_dump;
d_kqfilter_t *d_kqfilter;
d_purge_t *d_purge;
d_spare2_t *d_spare2;
d_mmap_single_t *d_mmap_single;
uid_t d_uid;
gid_t d_gid;
mode_t d_mode;

View File

@ -117,9 +117,9 @@ vmmapentry_rsrc_init(dummy)
}
static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct vnode *, vm_ooffset_t, vm_object_t *);
int *, struct vnode *, vm_ooffset_t *, vm_object_t *);
static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct cdev *, vm_ooffset_t, vm_object_t *);
int *, struct cdev *, vm_ooffset_t *, vm_object_t *);
static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct shmfd *, vm_ooffset_t, vm_object_t *);
@ -1142,15 +1142,14 @@ munlock(td, uap)
int
vm_mmap_vnode(struct thread *td, vm_size_t objsize,
vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp)
struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp)
{
struct vattr va;
void *handle;
vm_object_t obj;
vm_offset_t foff;
struct mount *mp;
struct cdevsw *dsw;
struct ucred *cred;
int error, flags, type;
int error, flags;
int vfslocked;
mp = vp->v_mount;
@ -1160,6 +1159,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
VFS_UNLOCK_GIANT(vfslocked);
return (error);
}
foff = *foffp;
flags = *flagsp;
obj = vp->v_object;
if (vp->v_type == VREG) {
@ -1175,41 +1175,12 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
vp = (struct vnode*)obj->handle;
vget(vp, LK_SHARED, td);
}
type = OBJT_VNODE;
handle = vp;
} else if (vp->v_type == VCHR) {
type = OBJT_DEVICE;
handle = vp->v_rdev;
dsw = dev_refthread(handle);
if (dsw == NULL) {
error = ENXIO;
goto done;
}
if (dsw->d_flags & D_MMAP_ANON) {
dev_relthread(handle);
*maxprotp = VM_PROT_ALL;
*flagsp |= MAP_ANON;
error = 0;
goto done;
}
dev_relthread(handle);
/*
* cdevs does not provide private mappings of any kind.
*/
if ((*maxprotp & VM_PROT_WRITE) == 0 &&
(prot & PROT_WRITE) != 0) {
error = EACCES;
goto done;
}
if (flags & (MAP_PRIVATE|MAP_COPY)) {
error = EINVAL;
goto done;
}
/*
* Force device mappings to be shared.
*/
flags |= MAP_SHARED;
error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp,
vp->v_rdev, foffp, objp);
if (error == 0)
goto mark_atime;
goto done;
} else {
error = EINVAL;
goto done;
@ -1235,18 +1206,18 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
* we do not need to sync it.
* Adjust object size to be the size of actual file.
*/
if (vp->v_type == VREG) {
objsize = round_page(va.va_size);
if (va.va_nlink == 0)
flags |= MAP_NOSYNC;
}
obj = vm_pager_allocate(type, handle, objsize, prot, foff);
objsize = round_page(va.va_size);
if (va.va_nlink == 0)
flags |= MAP_NOSYNC;
obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff);
if (obj == NULL) {
error = (type == OBJT_DEVICE ? EINVAL : ENOMEM);
error = ENOMEM;
goto done;
}
*objp = obj;
*flagsp = flags;
mark_atime:
vfs_mark_atime(vp, cred);
done:
@ -1266,11 +1237,11 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
int
vm_mmap_cdev(struct thread *td, vm_size_t objsize,
vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
struct cdev *cdev, vm_ooffset_t foff, vm_object_t *objp)
struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp)
{
vm_object_t obj;
struct cdevsw *dsw;
int flags;
int error, flags;
flags = *flagsp;
@ -1283,25 +1254,43 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
*flagsp |= MAP_ANON;
return (0);
}
dev_relthread(cdev);
/*
* cdevs does not provide private mappings of any kind.
* cdevs do not provide private mappings of any kind.
*/
if ((*maxprotp & VM_PROT_WRITE) == 0 &&
(prot & PROT_WRITE) != 0)
(prot & PROT_WRITE) != 0) {
dev_relthread(cdev);
return (EACCES);
if (flags & (MAP_PRIVATE|MAP_COPY))
}
if (flags & (MAP_PRIVATE|MAP_COPY)) {
dev_relthread(cdev);
return (EINVAL);
}
/*
* Force device mappings to be shared.
*/
flags |= MAP_SHARED;
#ifdef MAC_XXX
error = mac_check_cdev_mmap(td->td_ucred, cdev, prot);
if (error != 0)
error = mac_cdev_check_mmap(td->td_ucred, cdev, prot);
if (error != 0) {
dev_relthread(cdev);
return (error);
}
#endif
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, foff);
/*
* First, try d_mmap_single(). If that is not implemented
* (returns ENODEV), fall back to using the device pager.
* Note that d_mmap_single() must return a reference to the
* object (it needs to bump the reference count of the object
* it returns somehow).
*
* XXX assumes VM_PROT_* == PROT_*
*/
error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
dev_relthread(cdev);
if (error != ENODEV)
return (error);
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff);
if (obj == NULL)
return (EINVAL);
*objp = obj;
@ -1396,11 +1385,11 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
switch (handle_type) {
case OBJT_DEVICE:
error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
handle, foff, &object);
handle, &foff, &object);
break;
case OBJT_VNODE:
error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
handle, foff, &object);
handle, &foff, &object);
break;
case OBJT_SWAP:
error = vm_mmap_shm(td, size, prot, &maxprot, &flags,