Add new make_dev_p(9) flag MAKEDEV_ETERNAL to inform devfs that created

cdev will never be destroyed. Propagate the flag to devfs vnodes as
VV_ETERNVALDEV. Use the flags to avoid acquiring devmtx and taking a
thread reference on such nodes.

In collaboration with:	pho
MFC after:	1 month
This commit is contained in:
Konstantin Belousov 2010-08-06 09:42:15 +00:00
parent b1f19c11b6
commit 3979450b4c
9 changed files with 146 additions and 98 deletions

View File

@ -528,6 +528,7 @@ devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de)
struct devfs_rule *dr = &dk->dk_rule;
struct cdev *dev;
struct cdevsw *dsw;
int ref;
dev = devfs_rule_getdev(de);
/*
@ -545,14 +546,14 @@ devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de)
if (dr->dr_icond & DRC_DSWFLAGS) {
if (dev == NULL)
return (0);
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (0);
if ((dsw->d_flags & dr->dr_dswflags) == 0) {
dev_relthread(dev);
dev_relthread(dev, ref);
return (0);
}
dev_relthread(dev);
dev_relthread(dev, ref);
}
if (dr->dr_icond & DRC_PATHPTRN)
if (!devfs_rule_matchpath(dk, de))

View File

@ -82,13 +82,14 @@ struct mtx cdevpriv_mtx;
MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
static int
devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp)
devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
int *ref)
{
*dswp = devvn_refthread(fp->f_vnode, devp);
*dswp = devvn_refthread(fp->f_vnode, devp, ref);
if (*devp != fp->f_data) {
if (*dswp != NULL)
dev_relthread(*devp);
dev_relthread(*devp, *ref);
return (ENXIO);
}
KASSERT((*devp)->si_refcount > 0,
@ -401,6 +402,8 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
vp->v_vflag |= VV_ISTTY;
dev_unlock();
VI_UNLOCK(vp);
if ((dev->si_flags & SI_ETERNAL) != 0)
vp->v_vflag |= VV_ETERNALDEV;
vp->v_op = &devfs_specops;
} else if (de->de_dirent->d_type == DT_DIR) {
vp->v_type = VDIR;
@ -465,7 +468,7 @@ devfs_close(struct vop_close_args *ap)
struct thread *td = ap->a_td;
struct cdev *dev = vp->v_rdev;
struct cdevsw *dsw;
int vp_locked, error;
int vp_locked, error, ref;
/*
* XXX: Don't call d_close() if we were called because of
@ -508,7 +511,7 @@ devfs_close(struct vop_close_args *ap)
* sum of the reference counts on all the aliased
* vnodes descends to one, we are on last close.
*/
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
VI_LOCK(vp);
@ -518,7 +521,7 @@ devfs_close(struct vop_close_args *ap)
/* Keep device updated on status. */
} else if (count_dev(dev) > 1) {
VI_UNLOCK(vp);
dev_relthread(dev);
dev_relthread(dev, ref);
return (0);
}
vholdl(vp);
@ -528,7 +531,7 @@ devfs_close(struct vop_close_args *ap)
KASSERT(dev->si_refcount > 0,
("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
dev_relthread(dev);
dev_relthread(dev, ref);
vn_lock(vp, vp_locked | LK_RETRY);
vdrop(vp);
return (error);
@ -646,20 +649,20 @@ devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struc
struct cdevsw *dsw;
struct vnode *vp;
struct vnode *vpold;
int error, i;
int error, i, ref;
const char *p;
struct fiodgname_arg *fgn;
struct file *fpop;
fpop = td->td_fpop;
error = devfs_fp_check(fp, &dev, &dsw);
error = devfs_fp_check(fp, &dev, &dsw, &ref);
if (error)
return (error);
if (com == FIODTYPE) {
*(int *)data = dsw->d_flags & D_TYPEMASK;
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
return (0);
} else if (com == FIODGNAME) {
fgn = data;
@ -670,12 +673,12 @@ devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struc
else
error = copyout(p, fgn->buf, i);
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
return (error);
}
error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
td->td_fpop = NULL;
dev_relthread(dev);
dev_relthread(dev, ref);
if (error == ENOIOCTL)
error = ENOTTY;
if (error == 0 && com == TIOCSCTTY) {
@ -710,18 +713,18 @@ devfs_kqfilter_f(struct file *fp, struct knote *kn)
{
struct cdev *dev;
struct cdevsw *dsw;
int error;
int error, ref;
struct file *fpop;
struct thread *td;
td = curthread;
fpop = td->td_fpop;
error = devfs_fp_check(fp, &dev, &dsw);
error = devfs_fp_check(fp, &dev, &dsw, &ref);
if (error)
return (error);
error = dsw->d_kqfilter(dev, kn);
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
return (error);
}
@ -964,7 +967,7 @@ devfs_open(struct vop_open_args *ap)
struct vnode *vp = ap->a_vp;
struct cdev *dev = vp->v_rdev;
struct file *fp = ap->a_fp;
int error, vlocked;
int error, ref, vlocked;
struct cdevsw *dsw;
struct file *fpop;
@ -978,7 +981,7 @@ devfs_open(struct vop_open_args *ap)
if (dev->si_iosize_max == 0)
dev->si_iosize_max = DFLTPHYS;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
@ -998,7 +1001,7 @@ devfs_open(struct vop_open_args *ap)
td->td_fpop = fpop;
vn_lock(vp, vlocked | LK_RETRY);
dev_relthread(dev);
dev_relthread(dev, ref);
if (error)
return (error);
@ -1041,16 +1044,16 @@ devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
{
struct cdev *dev;
struct cdevsw *dsw;
int error;
int error, ref;
struct file *fpop;
fpop = td->td_fpop;
error = devfs_fp_check(fp, &dev, &dsw);
error = devfs_fp_check(fp, &dev, &dsw, &ref);
if (error)
return (poll_no_poll(events));
error = dsw->d_poll(dev, events, td);
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
return(error);
}
@ -1070,12 +1073,12 @@ static int
devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
{
struct cdev *dev;
int ioflag, error, resid;
int ioflag, error, ref, resid;
struct cdevsw *dsw;
struct file *fpop;
fpop = td->td_fpop;
error = devfs_fp_check(fp, &dev, &dsw);
error = devfs_fp_check(fp, &dev, &dsw, &ref);
if (error)
return (error);
resid = uio->uio_resid;
@ -1090,7 +1093,7 @@ devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, st
if (uio->uio_resid != resid || (error == 0 && resid != 0))
vfs_timestamp(&dev->si_atime);
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
if ((flags & FOF_OFFSET) == 0)
fp->f_offset = uio->uio_offset;
@ -1513,12 +1516,12 @@ static int
devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
{
struct cdev *dev;
int error, ioflag, resid;
int error, ioflag, ref, resid;
struct cdevsw *dsw;
struct file *fpop;
fpop = td->td_fpop;
error = devfs_fp_check(fp, &dev, &dsw);
error = devfs_fp_check(fp, &dev, &dsw, &ref);
if (error)
return (error);
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
@ -1536,7 +1539,7 @@ devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, s
dev->si_mtime = dev->si_ctime;
}
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
if ((flags & FOF_OFFSET) == 0)
fp->f_offset = uio->uio_offset;

View File

@ -177,12 +177,16 @@ dev_rel(struct cdev *dev)
}
struct cdevsw *
dev_refthread(struct cdev *dev)
dev_refthread(struct cdev *dev, int *ref)
{
struct cdevsw *csw;
struct cdev_priv *cdp;
mtx_assert(&devmtx, MA_NOTOWNED);
if ((dev->si_flags & SI_ETERNAL) != 0) {
*ref = 0;
return (dev->si_devsw);
}
dev_lock();
csw = dev->si_devsw;
if (csw != NULL) {
@ -193,36 +197,59 @@ dev_refthread(struct cdev *dev)
csw = NULL;
}
dev_unlock();
*ref = 1;
return (csw);
}
struct cdevsw *
devvn_refthread(struct vnode *vp, struct cdev **devp)
devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref)
{
struct cdevsw *csw;
struct cdev_priv *cdp;
struct cdev *dev;
mtx_assert(&devmtx, MA_NOTOWNED);
if ((vp->v_vflag & VV_ETERNALDEV) != 0) {
dev = vp->v_rdev;
if (dev == NULL)
return (NULL);
KASSERT((dev->si_flags & SI_ETERNAL) != 0,
("Not eternal cdev"));
*ref = 0;
csw = dev->si_devsw;
KASSERT(csw != NULL, ("Eternal cdev is destroyed"));
*devp = dev;
return (csw);
}
csw = NULL;
dev_lock();
*devp = vp->v_rdev;
if (*devp != NULL) {
cdp = cdev2priv(*devp);
if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
csw = (*devp)->si_devsw;
if (csw != NULL)
(*devp)->si_threadcount++;
}
dev = vp->v_rdev;
if (dev == NULL) {
dev_unlock();
return (NULL);
}
cdp = cdev2priv(dev);
if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
csw = dev->si_devsw;
if (csw != NULL)
dev->si_threadcount++;
}
dev_unlock();
if (csw != NULL) {
*devp = dev;
*ref = 1;
}
return (csw);
}
void
dev_relthread(struct cdev *dev)
dev_relthread(struct cdev *dev, int ref)
{
mtx_assert(&devmtx, MA_NOTOWNED);
if (!ref)
return;
dev_lock();
KASSERT(dev->si_threadcount > 0,
("%s threadcount is wrong", dev->si_name));
@ -325,15 +352,15 @@ static int
giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -341,15 +368,15 @@ static int
giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -357,15 +384,15 @@ static int
giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -374,9 +401,10 @@ giant_strategy(struct bio *bp)
{
struct cdevsw *dsw;
struct cdev *dev;
int ref;
dev = bp->bio_dev;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL) {
biofinish(bp, NULL, ENXIO);
return;
@ -384,22 +412,22 @@ giant_strategy(struct bio *bp)
mtx_lock(&Giant);
dsw->d_gianttrick->d_strategy(bp);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
}
static int
giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -407,15 +435,15 @@ static int
giant_read(struct cdev *dev, struct uio *uio, int ioflag)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_read(dev, uio, ioflag);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -423,15 +451,15 @@ static int
giant_write(struct cdev *dev, struct uio *uio, int ioflag)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_write(dev, uio, ioflag);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -439,15 +467,15 @@ static int
giant_poll(struct cdev *dev, int events, struct thread *td)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_poll(dev, events, td);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -455,15 +483,15 @@ static int
giant_kqfilter(struct cdev *dev, struct knote *kn)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_kqfilter(dev, kn);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -472,16 +500,16 @@ giant_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
vm_memattr_t *memattr)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot,
memattr);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -490,16 +518,16 @@ giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
vm_object_t *object, int nprot)
{
struct cdevsw *dsw;
int retval;
int ref, retval;
dsw = dev_refthread(dev);
dsw = dev_refthread(dev, &ref);
if (dsw == NULL)
return (ENXIO);
mtx_lock(&Giant);
retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object,
nprot);
mtx_unlock(&Giant);
dev_relthread(dev);
dev_relthread(dev, ref);
return (retval);
}
@ -676,6 +704,8 @@ make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit,
dev = newdev(devsw, unit, dev);
if (flags & MAKEDEV_REF)
dev_refl(dev);
if (flags & MAKEDEV_ETERNAL)
dev->si_flags |= SI_ETERNAL;
if (dev->si_flags & SI_CHEAPCLONE &&
dev->si_flags & SI_NAMED) {
/*
@ -840,6 +870,9 @@ destroy_devl(struct cdev *dev)
mtx_assert(&devmtx, MA_OWNED);
KASSERT(dev->si_flags & SI_NAMED,
("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev)));
KASSERT((dev->si_flags & SI_ETERNAL) == 0,
("WARNING: Driver mistake: destroy_dev on eternal %d\n",
dev2unit(dev)));
devfs_destroy(dev);

View File

@ -1797,7 +1797,7 @@ ttyhook_register(struct tty **rtp, struct proc *p, int fd,
struct cdev *dev;
struct cdevsw *cdp;
struct filedesc *fdp;
int error;
int error, ref;
/* Validate the file descriptor. */
if ((fdp = p->p_fd) == NULL)
@ -1823,7 +1823,7 @@ ttyhook_register(struct tty **rtp, struct proc *p, int fd,
}
/* Make sure it is a TTY. */
cdp = devvn_refthread(fp->f_vnode, &dev);
cdp = devvn_refthread(fp->f_vnode, &dev, &ref);
if (cdp == NULL) {
error = ENXIO;
goto done1;
@ -1859,7 +1859,7 @@ ttyhook_register(struct tty **rtp, struct proc *p, int fd,
th->th_rint = ttyhook_defrint;
done3: tty_unlock(tp);
done2: dev_relthread(dev);
done2: dev_relthread(dev, ref);
done1: fdrop(fp, curthread);
return (error);
}

View File

@ -3203,6 +3203,7 @@ dev_strategy(struct cdev *dev, struct buf *bp)
{
struct cdevsw *csw;
struct bio *bip;
int ref;
if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1)))
panic("b_iocmd botch");
@ -3224,7 +3225,7 @@ dev_strategy(struct cdev *dev, struct buf *bp)
KASSERT(dev->si_refcount > 0,
("dev_strategy on un-referenced struct cdev *(%s)",
devtoname(dev)));
csw = dev_refthread(dev);
csw = dev_refthread(dev, &ref);
if (csw == NULL) {
g_destroy_bio(bip);
bp->b_error = ENXIO;
@ -3233,7 +3234,7 @@ dev_strategy(struct cdev *dev, struct buf *bp)
return;
}
(*csw->d_strategy)(bip);
dev_relthread(dev);
dev_relthread(dev, ref);
}
/*

View File

@ -54,6 +54,7 @@ struct file;
struct cdev {
void *__si_reserved;
u_int si_flags;
#define SI_ETERNAL 0x0001 /* never destroyed */
#define SI_ALIAS 0x0002 /* carrier of alias name */
#define SI_NAMED 0x0004 /* make_dev{_alias} has been called */
#define SI_CHEAPCLONE 0x0008 /* can be removed_dev'ed when vnode reclaims */
@ -249,9 +250,9 @@ int destroy_dev_sched(struct cdev *dev);
int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg);
void destroy_dev_drain(struct cdevsw *csw);
void drain_dev_clone_events(void);
struct cdevsw *dev_refthread(struct cdev *_dev);
struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp);
void dev_relthread(struct cdev *_dev);
struct cdevsw *dev_refthread(struct cdev *_dev, int *_ref);
struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp, int *_ref);
void dev_relthread(struct cdev *_dev, int _ref);
void dev_depends(struct cdev *_pdev, struct cdev *_cdev);
void dev_ref(struct cdev *dev);
void dev_refl(struct cdev *dev);
@ -262,10 +263,11 @@ struct cdev *make_dev(struct cdevsw *_devsw, int _unit, uid_t _uid, gid_t _gid,
struct cdev *make_dev_cred(struct cdevsw *_devsw, int _unit,
struct ucred *_cr, uid_t _uid, gid_t _gid, int _perms,
const char *_fmt, ...) __printflike(7, 8);
#define MAKEDEV_REF 0x1
#define MAKEDEV_WHTOUT 0x2
#define MAKEDEV_NOWAIT 0x4
#define MAKEDEV_WAITOK 0x8
#define MAKEDEV_REF 0x01
#define MAKEDEV_WHTOUT 0x02
#define MAKEDEV_NOWAIT 0x04
#define MAKEDEV_WAITOK 0x08
#define MAKEDEV_ETERNAL 0x10
struct cdev *make_dev_credf(int _flags,
struct cdevsw *_devsw, int _unit,
struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode,
@ -279,6 +281,12 @@ void dev_lock(void);
void dev_unlock(void);
void setconf(void);
#ifdef KLD_MODULE
#define MAKEDEV_ETERNAL_KLD 0
#else
#define MAKEDEV_ETERNAL_KLD MAKEDEV_ETERNAL
#endif
#define dev2unit(d) ((d)->si_drv0)
typedef void (*cdevpriv_dtr_t)(void *data);

View File

@ -243,6 +243,7 @@ struct xvnode {
#define VV_ROOT 0x0001 /* root of its filesystem */
#define VV_ISTTY 0x0002 /* vnode represents a tty */
#define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
#define VV_ETERNALDEV 0x0008 /* device that is never destroyed */
#define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
#define VV_TEXT 0x0020 /* vnode is a pure text prototype */
#define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */

View File

@ -108,6 +108,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t off;
vm_memattr_t dummy;
struct cdevsw *csw;
int ref;
/*
* Offset should be page aligned.
@ -122,7 +123,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* Make sure this device can be mapped.
*/
dev = handle;
csw = dev_refthread(dev);
csw = dev_refthread(dev, &ref);
if (csw == NULL)
return (NULL);
@ -135,7 +136,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
npages = OFF_TO_IDX(size);
for (off = foff; npages--; off += PAGE_SIZE)
if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
dev_relthread(dev);
dev_relthread(dev, ref);
return (NULL);
}
@ -177,7 +178,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object->size = pindex;
}
mtx_unlock(&dev_pager_mtx);
dev_relthread(dev);
dev_relthread(dev, ref);
vm_object_deallocate(object1);
return (object);
}
@ -214,7 +215,7 @@ dev_pager_getpages(object, m, count, reqpage)
vm_page_t m_paddr, page;
vm_memattr_t memattr;
struct cdev *dev;
int i, ret;
int i, ref, ret;
struct cdevsw *csw;
struct thread *td;
struct file *fpop;
@ -225,7 +226,7 @@ dev_pager_getpages(object, m, count, reqpage)
offset = page->pindex;
memattr = object->memattr;
VM_OBJECT_UNLOCK(object);
csw = dev_refthread(dev);
csw = dev_refthread(dev, &ref);
if (csw == NULL)
panic("dev_pager_getpage: no cdevsw");
td = curthread;
@ -235,7 +236,7 @@ dev_pager_getpages(object, m, count, reqpage)
PROT_READ, &memattr);
KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
td->td_fpop = fpop;
dev_relthread(dev);
dev_relthread(dev, ref);
/* If "paddr" is a real page, perform a sanity check on "memattr". */
if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
pmap_page_get_memattr(m_paddr) != memattr) {

View File

@ -1292,15 +1292,15 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
{
vm_object_t obj;
struct cdevsw *dsw;
int error, flags;
int error, flags, ref;
flags = *flagsp;
dsw = dev_refthread(cdev);
dsw = dev_refthread(cdev, &ref);
if (dsw == NULL)
return (ENXIO);
if (dsw->d_flags & D_MMAP_ANON) {
dev_relthread(cdev);
dev_relthread(cdev, ref);
*maxprotp = VM_PROT_ALL;
*flagsp |= MAP_ANON;
return (0);
@ -1310,11 +1310,11 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
*/
if ((*maxprotp & VM_PROT_WRITE) == 0 &&
(prot & PROT_WRITE) != 0) {
dev_relthread(cdev);
dev_relthread(cdev, ref);
return (EACCES);
}
if (flags & (MAP_PRIVATE|MAP_COPY)) {
dev_relthread(cdev);
dev_relthread(cdev, ref);
return (EINVAL);
}
/*
@ -1324,7 +1324,7 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
#ifdef MAC_XXX
error = mac_cdev_check_mmap(td->td_ucred, cdev, prot);
if (error != 0) {
dev_relthread(cdev);
dev_relthread(cdev, ref);
return (error);
}
#endif
@ -1338,7 +1338,7 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
* XXX assumes VM_PROT_* == PROT_*
*/
error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
dev_relthread(cdev);
dev_relthread(cdev, ref);
if (error != ENODEV)
return (error);
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,