vmapbuf: don't smuggle address or length in buf
Instead, add arguments to vmapbuf. Since this argument is always a pointer use a type of void * and cast to vm_offset_t in vmapbuf. (In CheriBSD we've altered vm_fault_quick_hold_pages to take a pointer and check its bounds.) In no other situtation does b_data contain a user pointer and vmapbuf replaces b_data with the actual mapping. Suggested by: jhb Reviewed by: imp, jhb Obtained from: CheriBSD MFC after: 1 week Sponsored by: DARPA Differential Revision: https://reviews.freebsd.org/D26784
This commit is contained in:
parent
7ec2f6bce5
commit
44ca4575ea
@ -955,18 +955,12 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
|
||||
*/
|
||||
mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
|
||||
|
||||
/* put our pointer in the data slot */
|
||||
mapinfo->bp[i]->b_data = *data_ptrs[i];
|
||||
|
||||
/* set the transfer length, we know it's < MAXPHYS */
|
||||
mapinfo->bp[i]->b_bufsize = lengths[i];
|
||||
|
||||
/* set the direction */
|
||||
mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
|
||||
BIO_WRITE : BIO_READ;
|
||||
|
||||
/* Map the buffer into kernel memory. */
|
||||
if (vmapbuf(mapinfo->bp[i], 1) < 0) {
|
||||
if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
|
||||
uma_zfree(pbuf_zone, mapinfo->bp[i]);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -1268,10 +1268,8 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
*/
|
||||
PHOLD(curproc);
|
||||
buf = uma_zalloc(pbuf_zone, M_WAITOK);
|
||||
buf->b_data = pt->buf;
|
||||
buf->b_bufsize = pt->len;
|
||||
buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
|
||||
if (vmapbuf(buf, 1) < 0) {
|
||||
if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
|
||||
ret = EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
@ -4907,22 +4907,21 @@ vm_hold_free_pages(struct buf *bp, int newbsize)
|
||||
* This function only works with pager buffers.
|
||||
*/
|
||||
int
|
||||
vmapbuf(struct buf *bp, int mapbuf)
|
||||
vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
|
||||
{
|
||||
vm_prot_t prot;
|
||||
int pidx;
|
||||
|
||||
if (bp->b_bufsize < 0)
|
||||
return (-1);
|
||||
prot = VM_PROT_READ;
|
||||
if (bp->b_iocmd == BIO_READ)
|
||||
prot |= VM_PROT_WRITE; /* Less backwards than it looks */
|
||||
if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
|
||||
(vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
|
||||
(vm_offset_t)uaddr, len, prot, bp->b_pages,
|
||||
btoc(MAXPHYS))) < 0)
|
||||
return (-1);
|
||||
bp->b_bufsize = len;
|
||||
bp->b_npages = pidx;
|
||||
bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
|
||||
bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
|
||||
if (mapbuf || !unmapped_buf_allowed) {
|
||||
pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
|
||||
bp->b_data = bp->b_kvabase + bp->b_offset;
|
||||
|
@ -575,7 +575,7 @@ void vfs_bio_set_flags(struct buf *bp, int ioflags);
|
||||
void vfs_bio_set_valid(struct buf *, int base, int size);
|
||||
void vfs_busy_pages(struct buf *, int clear_modify);
|
||||
void vfs_unbusy_pages(struct buf *);
|
||||
int vmapbuf(struct buf *, int);
|
||||
int vmapbuf(struct buf *, void *, size_t, int);
|
||||
void vunmapbuf(struct buf *);
|
||||
void brelvp(struct buf *);
|
||||
void bgetvp(struct vnode *, struct buf *);
|
||||
|
@ -216,7 +216,6 @@ ffs_rawread_readahead(struct vnode *vp,
|
||||
bp->b_flags = 0; /* XXX necessary ? */
|
||||
bp->b_iocmd = BIO_READ;
|
||||
bp->b_iodone = bdone;
|
||||
bp->b_data = udata;
|
||||
blockno = offset / bsize;
|
||||
blockoff = (offset % bsize) / DEV_BSIZE;
|
||||
if ((daddr_t) blockno != blockno) {
|
||||
@ -233,9 +232,8 @@ ffs_rawread_readahead(struct vnode *vp,
|
||||
|
||||
if (bp->b_bcount + blockoff * DEV_BSIZE > bsize)
|
||||
bp->b_bcount = bsize - blockoff * DEV_BSIZE;
|
||||
bp->b_bufsize = bp->b_bcount;
|
||||
|
||||
if (vmapbuf(bp, 1) < 0)
|
||||
if (vmapbuf(bp, udata, bp->b_bcount, 1) < 0)
|
||||
return EFAULT;
|
||||
|
||||
maybe_yield();
|
||||
@ -252,9 +250,8 @@ ffs_rawread_readahead(struct vnode *vp,
|
||||
|
||||
if (bp->b_bcount + blockoff * DEV_BSIZE > bsize * (1 + bforwards))
|
||||
bp->b_bcount = bsize * (1 + bforwards) - blockoff * DEV_BSIZE;
|
||||
bp->b_bufsize = bp->b_bcount;
|
||||
|
||||
if (vmapbuf(bp, 1) < 0)
|
||||
if (vmapbuf(bp, udata, bp->b_bcount, 1) < 0)
|
||||
return EFAULT;
|
||||
|
||||
BO_STRATEGY(&dp->v_bufobj, bp);
|
||||
|
Loading…
Reference in New Issue
Block a user