Do not remap usermode pages into KVA for physio.

Sponsored by:	The FreeBSD Foundation
Tested by:	pho
This commit is contained in:
Konstantin Belousov 2013-03-19 14:43:57 +00:00
parent 2cc718a11c
commit e81ff91e62
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=248515
6 changed files with 23 additions and 13 deletions

View File

@ -850,7 +850,7 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
* into a larger area of VM, or if userland races against
* vmapbuf() after the useracc() check.
*/
if (vmapbuf(mapinfo->bp[i]) < 0) {
if (vmapbuf(mapinfo->bp[i], 1) < 0) {
for (j = 0; j < i; ++j) {
*data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
vunmapbuf(mapinfo->bp[j]);

View File

@ -92,7 +92,7 @@ physio(struct cdev *dev, struct uio *uio, int ioflag)
bp->b_blkno = btodb(bp->b_offset);
if (uio->uio_segflg == UIO_USERSPACE)
if (vmapbuf(bp) < 0) {
if (vmapbuf(bp, 0) < 0) {
error = EFAULT;
goto doerror;
}

View File

@ -1323,7 +1323,7 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
/*
* Bring buffer into kernel space.
*/
if (vmapbuf(bp) < 0) {
if (vmapbuf(bp, 1) < 0) {
error = EFAULT;
goto doerror;
}

View File

@ -4279,7 +4279,7 @@ vm_hold_free_pages(struct buf *bp, int newbsize)
* check the return value.
*/
int
vmapbuf(struct buf *bp)
vmapbuf(struct buf *bp, int mapbuf)
{
caddr_t kva;
vm_prot_t prot;
@ -4294,12 +4294,19 @@ vmapbuf(struct buf *bp)
(vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
btoc(MAXPHYS))) < 0)
return (-1);
pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
kva = bp->b_saveaddr;
bp->b_npages = pidx;
bp->b_saveaddr = bp->b_data;
bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
if (mapbuf || !unmapped_buf_allowed) {
pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
bp->b_flags &= ~B_UNMAPPED;
} else {
bp->b_flags |= B_UNMAPPED;
bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
bp->b_saveaddr = bp->b_data;
bp->b_data = unmapped_buf;
}
return(0);
}
@ -4313,7 +4320,10 @@ vunmapbuf(struct buf *bp)
int npages;
npages = bp->b_npages;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
if (bp->b_flags & B_UNMAPPED)
bp->b_flags &= ~B_UNMAPPED;
else
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
vm_page_unhold_pages(bp->b_pages, npages);
bp->b_data = bp->b_saveaddr;

View File

@ -524,7 +524,7 @@ void vfs_bio_set_valid(struct buf *, int base, int size);
void vfs_bio_clrbuf(struct buf *);
void vfs_busy_pages(struct buf *, int clear_modify);
void vfs_unbusy_pages(struct buf *);
int vmapbuf(struct buf *);
int vmapbuf(struct buf *, int);
void vunmapbuf(struct buf *);
void relpbuf(struct buf *, int *);
void brelvp(struct buf *);

View File

@ -241,7 +241,7 @@ ffs_rawread_readahead(struct vnode *vp,
bp->b_bcount = bsize - blockoff * DEV_BSIZE;
bp->b_bufsize = bp->b_bcount;
if (vmapbuf(bp) < 0)
if (vmapbuf(bp, 1) < 0)
return EFAULT;
maybe_yield();
@ -260,7 +260,7 @@ ffs_rawread_readahead(struct vnode *vp,
bp->b_bcount = bsize * (1 + bforwards) - blockoff * DEV_BSIZE;
bp->b_bufsize = bp->b_bcount;
if (vmapbuf(bp) < 0)
if (vmapbuf(bp, 1) < 0)
return EFAULT;
BO_STRATEGY(&dp->v_bufobj, bp);