Give physio a makeover.

- Let physio take read/write compatible args and have it use uio->uio_rw
  to determine the direction.

- physread/physwrite are now #defines for physio

- Remove the inversly named minphys(), dev->si_iosize_max takes over.

- Physio() always uses pbufs.

- Fix the check for non page-aligned transfers, now only unaligned
  transfers larger than (MAXPHYS - PAGE_SIZE) get fragmented (only
  interesting for tapes using max blocksize).

- General wash-and-clean of code.

Constructive input from: bde
This commit is contained in:
Poul-Henning Kamp 1999-10-09 19:44:32 +00:00
parent f63b242474
commit 7179e74f18
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=52066
3 changed files with 60 additions and 131 deletions

View File

@ -29,175 +29,108 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
static void physwakeup __P((struct buf *bp));
int
physread(dev_t dev, struct uio *uio, int ioflag)
static void
physwakeup(struct buf *bp)
{
return(physio(NULL, dev, 1, minphys, uio));
wakeup((caddr_t) bp);
bp->b_flags &= ~B_CALL;
}
int
physwrite(dev_t dev, struct uio *uio, int ioflag)
{
return(physio(NULL, dev, 0, minphys, uio));
}
int
physio(bp, dev, rw, minp, uio)
struct buf *bp;
dev_t dev;
int rw;
u_int (*minp) __P((struct buf *bp));
struct uio *uio;
physio(dev_t dev, struct uio *uio, int ioflag)
{
int i;
int bufflags = rw?B_READ:0;
int error;
int spl;
caddr_t sa;
off_t blockno;
int bp_alloc = (bp == 0);
struct buf *bpa;
u_int iolen;
struct buf *bp;
/*
* Keep the process UPAGES from being swapped. (XXX for performance?)
*/
/* Keep the process UPAGES from being swapped. XXX: why ? */
PHOLD(curproc);
/* create and build a buffer header for a transfer */
bpa = getpbuf(NULL);
if (!bp_alloc)
BUF_LOCK(bp, LK_EXCLUSIVE);
else
bp = bpa;
/*
* get a copy of the kva from the physical buffer
*/
sa = bpa->b_data;
bp = getpbuf(NULL);
sa = bp->b_data;
error = bp->b_error = 0;
/* XXX: sanity check */
if(dev->si_iosize_max < PAGE_SIZE) {
printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
devtoname(dev), dev->si_iosize_max);
dev->si_iosize_max = DFLTPHYS;
}
for (i = 0; i < uio->uio_iovcnt; i++) {
while (uio->uio_iov[i].iov_len) {
if (uio->uio_rw == UIO_READ)
bp->b_flags = B_PHYS | B_CALL | B_READ;
else
bp->b_flags = B_PHYS | B_CALL | B_WRITE;
bp->b_dev = dev;
bp->b_bcount = uio->uio_iov[i].iov_len;
bp->b_flags = B_PHYS | B_CALL | bufflags;
bp->b_iodone = physwakeup;
bp->b_data = uio->uio_iov[i].iov_base;
bp->b_bcount = minp( bp);
if( minp != minphys)
bp->b_bcount = minphys( bp);
bp->b_bufsize = bp->b_bcount;
/*
* pass in the kva from the physical buffer
* for the temporary kernel mapping.
*/
bp->b_bcount = uio->uio_iov[i].iov_len;
bp->b_offset = uio->uio_offset;
bp->b_saveaddr = sa;
blockno = uio->uio_offset >> DEV_BSHIFT;
/* Don't exceed drivers iosize limit */
if (bp->b_bcount > dev->si_iosize_max)
bp->b_bcount = dev->si_iosize_max;
/*
* Make sure the pbuf can map the request
* XXX: The pbuf has kvasize = MAXPHYS so a request
* XXX: larger than MAXPHYS - PAGE_SIZE must be
* XXX: page aligned or it will be fragmented.
*/
iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK;
if ((bp->b_bcount + iolen) > bp->b_kvasize) {
bp->b_bcount = bp->b_kvasize;
if (iolen != 0)
bp->b_bcount -= PAGE_SIZE;
}
bp->b_bufsize = bp->b_bcount;
blockno = bp->b_offset >> DEV_BSHIFT;
if ((daddr_t)blockno != blockno) {
error = EINVAL;
error = EINVAL; /* blockno overflow */
goto doerror;
}
bp->b_blkno = blockno;
bp->b_offset = uio->uio_offset;
if (uio->uio_segflg == UIO_USERSPACE) {
if (rw && !useracc(bp->b_data, bp->b_bufsize, B_WRITE)) {
if (!useracc(bp->b_data, bp->b_bufsize,
bp->b_flags & B_READ)) {
error = EFAULT;
goto doerror;
}
if (!rw && !useracc(bp->b_data, bp->b_bufsize, B_READ)) {
error = EFAULT;
goto doerror;
}
/* bring buffer into kernel space */
vmapbuf(bp);
}
/* perform transfer */
BUF_STRATEGY(bp, 0);
spl = splbio();
while ((bp->b_flags & B_DONE) == 0)
tsleep((caddr_t)bp, PRIBIO, "physstr", 0);
splx(spl);
/* release mapping into kernel space */
if (uio->uio_segflg == UIO_USERSPACE)
vunmapbuf(bp);
/*
* update the uio data
*/
{
int iolen = bp->b_bcount - bp->b_resid;
if (iolen == 0 && !(bp->b_flags & B_ERROR))
goto doerror; /* EOF */
uio->uio_iov[i].iov_len -= iolen;
uio->uio_iov[i].iov_base += iolen;
uio->uio_resid -= iolen;
uio->uio_offset += iolen;
}
/*
* check for an error
*/
iolen = bp->b_bcount - bp->b_resid;
if (iolen == 0 && !(bp->b_flags & B_ERROR))
goto doerror; /* EOF */
uio->uio_iov[i].iov_len -= iolen;
uio->uio_iov[i].iov_base += iolen;
uio->uio_resid -= iolen;
uio->uio_offset += iolen;
if( bp->b_flags & B_ERROR) {
error = bp->b_error;
goto doerror;
}
}
}
doerror:
relpbuf(bpa, NULL);
if (!bp_alloc) {
bp->b_flags &= ~B_PHYS;
BUF_UNLOCK(bp);
}
/*
* Allow the process UPAGES to be swapped again.
*/
relpbuf(bp, NULL);
PRELE(curproc);
return (error);
}
u_int
minphys(bp)
struct buf *bp;
{
u_int maxphys;
maxphys = bp->b_dev->si_iosize_max;
if(!maxphys) {
printf("WARNING: %s maxphys = 0 ??\n", devtoname(bp->b_dev));
maxphys = DFLTPHYS;
bp->b_dev->si_iosize_max = DFLTPHYS;
}
if (bp->b_kvasize && (bp->b_kvasize < maxphys))
maxphys = bp->b_kvasize;
if(((vm_offset_t) bp->b_data) & PAGE_MASK) {
maxphys -= PAGE_SIZE;
}
if( bp->b_bcount > maxphys) {
bp->b_bcount = maxphys;
}
return bp->b_bcount;
}
static void
physwakeup(bp)
struct buf *bp;
{
wakeup((caddr_t) bp);
bp->b_flags &= ~B_CALL;
}

View File

@ -480,11 +480,9 @@ int cluster_read __P((struct vnode *, u_quad_t, daddr_t, long,
struct ucred *, long, int, struct buf **));
int cluster_wbuild __P((struct vnode *, long, daddr_t, int));
void cluster_write __P((struct buf *, u_quad_t));
int physio __P((struct buf *, dev_t, int, u_int (*)(struct buf *),
struct uio *));
int physread __P((dev_t dev, struct uio *uio, int ioflag));
int physwrite __P((dev_t dev, struct uio *uio, int ioflag));
u_int minphys __P((struct buf *));
int physio __P((dev_t dev, struct uio *uio, int ioflag));
#define physread physio
#define physwrite physio
void vfs_bio_set_validclean __P((struct buf *, int base, int size));
void vfs_bio_clrbuf __P((struct buf *));
void vfs_busy_pages __P((struct buf *, int clear_modify));

View File

@ -480,11 +480,9 @@ int cluster_read __P((struct vnode *, u_quad_t, daddr_t, long,
struct ucred *, long, int, struct buf **));
int cluster_wbuild __P((struct vnode *, long, daddr_t, int));
void cluster_write __P((struct buf *, u_quad_t));
int physio __P((struct buf *, dev_t, int, u_int (*)(struct buf *),
struct uio *));
int physread __P((dev_t dev, struct uio *uio, int ioflag));
int physwrite __P((dev_t dev, struct uio *uio, int ioflag));
u_int minphys __P((struct buf *));
int physio __P((dev_t dev, struct uio *uio, int ioflag));
#define physread physio
#define physwrite physio
void vfs_bio_set_validclean __P((struct buf *, int base, int size));
void vfs_bio_clrbuf __P((struct buf *));
void vfs_busy_pages __P((struct buf *, int clear_modify));