Refactor unmapped buffer address handling.
- Use pointer assignment rather than a combination of pointers and flags to switch buffers between unmapped and mapped. This eliminates multiple flags and generally simplifies the logic. - Eliminate b_saveaddr since it is only used with pager bufs which have their b_data re-initialized on each allocation. - Gather up some convenience routines in the buffer cache for manipulating buf space and buf malloc space. - Add an inline, buf_mapped(), to standardize checks around unmapped buffers. In collaboration with: mlaier Reviewed by: kib Tested by: pho (many small revisions ago) Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
ab2a653990
commit
fade8dd714
@ -855,12 +855,12 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
|
|||||||
*/
|
*/
|
||||||
mapinfo->bp[i] = getpbuf(NULL);
|
mapinfo->bp[i] = getpbuf(NULL);
|
||||||
|
|
||||||
/* save the buffer's data address */
|
|
||||||
mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
|
|
||||||
|
|
||||||
/* put our pointer in the data slot */
|
/* put our pointer in the data slot */
|
||||||
mapinfo->bp[i]->b_data = *data_ptrs[i];
|
mapinfo->bp[i]->b_data = *data_ptrs[i];
|
||||||
|
|
||||||
|
/* save the user's data address */
|
||||||
|
mapinfo->bp[i]->b_caller1 = *data_ptrs[i];
|
||||||
|
|
||||||
/* set the transfer length, we know it's < MAXPHYS */
|
/* set the transfer length, we know it's < MAXPHYS */
|
||||||
mapinfo->bp[i]->b_bufsize = lengths[i];
|
mapinfo->bp[i]->b_bufsize = lengths[i];
|
||||||
|
|
||||||
@ -877,7 +877,7 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
|
|||||||
*/
|
*/
|
||||||
if (vmapbuf(mapinfo->bp[i], 1) < 0) {
|
if (vmapbuf(mapinfo->bp[i], 1) < 0) {
|
||||||
for (j = 0; j < i; ++j) {
|
for (j = 0; j < i; ++j) {
|
||||||
*data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
|
*data_ptrs[j] = mapinfo->bp[j]->b_caller1;
|
||||||
vunmapbuf(mapinfo->bp[j]);
|
vunmapbuf(mapinfo->bp[j]);
|
||||||
relpbuf(mapinfo->bp[j], NULL);
|
relpbuf(mapinfo->bp[j], NULL);
|
||||||
}
|
}
|
||||||
@ -958,7 +958,7 @@ cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
|
|||||||
|
|
||||||
for (i = 0; i < numbufs; i++) {
|
for (i = 0; i < numbufs; i++) {
|
||||||
/* Set the user's pointer back to the original value */
|
/* Set the user's pointer back to the original value */
|
||||||
*data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
|
*data_ptrs[i] = mapinfo->bp[i]->b_caller1;
|
||||||
|
|
||||||
/* unmap the buffer */
|
/* unmap the buffer */
|
||||||
vunmapbuf(mapinfo->bp[i]);
|
vunmapbuf(mapinfo->bp[i]);
|
||||||
|
@ -840,7 +840,6 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
|||||||
*/
|
*/
|
||||||
PHOLD(curproc);
|
PHOLD(curproc);
|
||||||
buf = getpbuf(NULL);
|
buf = getpbuf(NULL);
|
||||||
buf->b_saveaddr = buf->b_data;
|
|
||||||
buf->b_data = pt->buf;
|
buf->b_data = pt->buf;
|
||||||
buf->b_bufsize = pt->len;
|
buf->b_bufsize = pt->len;
|
||||||
buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
|
buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
|
||||||
|
@ -137,12 +137,11 @@ SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
|
|||||||
&bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
|
&bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
|
||||||
#else
|
#else
|
||||||
SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
|
SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
|
||||||
"Virtual memory used for buffers");
|
"Physical memory used for buffers");
|
||||||
#endif
|
#endif
|
||||||
static long unmapped_bufspace;
|
static long bufkvaspace;
|
||||||
SYSCTL_LONG(_vfs, OID_AUTO, unmapped_bufspace, CTLFLAG_RD,
|
SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0,
|
||||||
&unmapped_bufspace, 0,
|
"Kernel virtual memory used for buffers");
|
||||||
"Amount of unmapped buffers, inclusive in the bufspace");
|
|
||||||
static long maxbufspace;
|
static long maxbufspace;
|
||||||
SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
|
SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
|
||||||
"Maximum allowed value of bufspace (including buf_daemon)");
|
"Maximum allowed value of bufspace (including buf_daemon)");
|
||||||
@ -454,15 +453,14 @@ bdirtyadd(void)
|
|||||||
* sufficient buffer space. Buffer space becomes recoverable when
|
* sufficient buffer space. Buffer space becomes recoverable when
|
||||||
* bp's get placed back in the queues.
|
* bp's get placed back in the queues.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static __inline void
|
static __inline void
|
||||||
bufspacewakeup(void)
|
bufspacewakeup(void)
|
||||||
{
|
{
|
||||||
int need_wakeup, on;
|
int need_wakeup, on;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If someone is waiting for BUF space, wake them up. Even
|
* If someone is waiting for bufspace, wake them up. Even
|
||||||
* though we haven't freed the kva space yet, the waiting
|
* though we may not have freed the kva space yet, the waiting
|
||||||
* process will be able to now.
|
* process will be able to now.
|
||||||
*/
|
*/
|
||||||
rw_rlock(&nblock);
|
rw_rlock(&nblock);
|
||||||
@ -481,6 +479,50 @@ bufspacewakeup(void)
|
|||||||
rw_runlock(&nblock);
|
rw_runlock(&nblock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bufspaceadjust:
|
||||||
|
*
|
||||||
|
* Adjust the reported bufspace for a KVA managed buffer, possibly
|
||||||
|
* waking any waiters.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
bufspaceadjust(struct buf *bp, int bufsize)
|
||||||
|
{
|
||||||
|
int diff;
|
||||||
|
|
||||||
|
KASSERT((bp->b_flags & B_MALLOC) == 0,
|
||||||
|
("bufspaceadjust: malloc buf %p", bp));
|
||||||
|
diff = bufsize - bp->b_bufsize;
|
||||||
|
if (diff < 0) {
|
||||||
|
atomic_subtract_long(&bufspace, -diff);
|
||||||
|
bufspacewakeup();
|
||||||
|
} else
|
||||||
|
atomic_add_long(&bufspace, diff);
|
||||||
|
bp->b_bufsize = bufsize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bufmallocadjust:
|
||||||
|
*
|
||||||
|
* Adjust the reported bufspace for a malloc managed buffer, possibly
|
||||||
|
* waking any waiters.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
bufmallocadjust(struct buf *bp, int bufsize)
|
||||||
|
{
|
||||||
|
int diff;
|
||||||
|
|
||||||
|
KASSERT((bp->b_flags & B_MALLOC) != 0,
|
||||||
|
("bufmallocadjust: non-malloc buf %p", bp));
|
||||||
|
diff = bufsize - bp->b_bufsize;
|
||||||
|
if (diff < 0) {
|
||||||
|
atomic_subtract_long(&bufmallocspace, -diff);
|
||||||
|
bufspacewakeup();
|
||||||
|
} else
|
||||||
|
atomic_add_long(&bufmallocspace, diff);
|
||||||
|
bp->b_bufsize = bufsize;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* runningwakeup:
|
* runningwakeup:
|
||||||
*
|
*
|
||||||
@ -817,6 +859,8 @@ bufinit(void)
|
|||||||
for (i = 0; i < BUFFER_QUEUES; i++)
|
for (i = 0; i < BUFFER_QUEUES; i++)
|
||||||
TAILQ_INIT(&bufqueues[i]);
|
TAILQ_INIT(&bufqueues[i]);
|
||||||
|
|
||||||
|
unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
|
||||||
|
|
||||||
/* finally, initialize each buffer header and stick on empty q */
|
/* finally, initialize each buffer header and stick on empty q */
|
||||||
for (i = 0; i < nbuf; i++) {
|
for (i = 0; i < nbuf; i++) {
|
||||||
bp = &buf[i];
|
bp = &buf[i];
|
||||||
@ -826,6 +870,7 @@ bufinit(void)
|
|||||||
bp->b_wcred = NOCRED;
|
bp->b_wcred = NOCRED;
|
||||||
bp->b_qindex = QUEUE_EMPTY;
|
bp->b_qindex = QUEUE_EMPTY;
|
||||||
bp->b_xflags = 0;
|
bp->b_xflags = 0;
|
||||||
|
bp->b_data = bp->b_kvabase = unmapped_buf;
|
||||||
LIST_INIT(&bp->b_dep);
|
LIST_INIT(&bp->b_dep);
|
||||||
BUF_LOCKINIT(bp);
|
BUF_LOCKINIT(bp);
|
||||||
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
|
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
|
||||||
@ -900,7 +945,6 @@ bufinit(void)
|
|||||||
|
|
||||||
bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
|
bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
|
||||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
|
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
|
||||||
unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
@ -908,8 +952,6 @@ static inline void
|
|||||||
vfs_buf_check_mapped(struct buf *bp)
|
vfs_buf_check_mapped(struct buf *bp)
|
||||||
{
|
{
|
||||||
|
|
||||||
KASSERT((bp->b_flags & B_UNMAPPED) == 0,
|
|
||||||
("mapped buf %p %x", bp, bp->b_flags));
|
|
||||||
KASSERT(bp->b_kvabase != unmapped_buf,
|
KASSERT(bp->b_kvabase != unmapped_buf,
|
||||||
("mapped buf: b_kvabase was not updated %p", bp));
|
("mapped buf: b_kvabase was not updated %p", bp));
|
||||||
KASSERT(bp->b_data != unmapped_buf,
|
KASSERT(bp->b_data != unmapped_buf,
|
||||||
@ -920,10 +962,6 @@ static inline void
|
|||||||
vfs_buf_check_unmapped(struct buf *bp)
|
vfs_buf_check_unmapped(struct buf *bp)
|
||||||
{
|
{
|
||||||
|
|
||||||
KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED,
|
|
||||||
("unmapped buf %p %x", bp, bp->b_flags));
|
|
||||||
KASSERT(bp->b_kvabase == unmapped_buf,
|
|
||||||
("unmapped buf: corrupted b_kvabase %p", bp));
|
|
||||||
KASSERT(bp->b_data == unmapped_buf,
|
KASSERT(bp->b_data == unmapped_buf,
|
||||||
("unmapped buf: corrupted b_data %p", bp));
|
("unmapped buf: corrupted b_data %p", bp));
|
||||||
}
|
}
|
||||||
@ -951,37 +989,6 @@ bpmap_qenter(struct buf *bp)
|
|||||||
(vm_offset_t)(bp->b_offset & PAGE_MASK));
|
(vm_offset_t)(bp->b_offset & PAGE_MASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* bfreekva() - free the kva allocation for a buffer.
|
|
||||||
*
|
|
||||||
* Since this call frees up buffer space, we call bufspacewakeup().
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
bfreekva(struct buf *bp)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (bp->b_kvasize == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
atomic_add_int(&buffreekvacnt, 1);
|
|
||||||
atomic_subtract_long(&bufspace, bp->b_kvasize);
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
|
||||||
BUF_CHECK_MAPPED(bp);
|
|
||||||
vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase,
|
|
||||||
bp->b_kvasize);
|
|
||||||
} else {
|
|
||||||
BUF_CHECK_UNMAPPED(bp);
|
|
||||||
if ((bp->b_flags & B_KVAALLOC) != 0) {
|
|
||||||
vmem_free(buffer_arena, (vm_offset_t)bp->b_kvaalloc,
|
|
||||||
bp->b_kvasize);
|
|
||||||
}
|
|
||||||
atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize);
|
|
||||||
bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
|
|
||||||
}
|
|
||||||
bp->b_kvasize = 0;
|
|
||||||
bufspacewakeup();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* binsfree:
|
* binsfree:
|
||||||
*
|
*
|
||||||
@ -1103,6 +1110,75 @@ bremfreel(struct buf *bp)
|
|||||||
bufcountsub(bp);
|
bufcountsub(bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bufkvafree:
|
||||||
|
*
|
||||||
|
* Free the kva allocation for a buffer.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
bufkvafree(struct buf *bp)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef INVARIANTS
|
||||||
|
if (bp->b_kvasize == 0) {
|
||||||
|
KASSERT(bp->b_kvabase == unmapped_buf &&
|
||||||
|
bp->b_data == unmapped_buf,
|
||||||
|
("Leaked KVA space on %p", bp));
|
||||||
|
} else if (buf_mapped(bp))
|
||||||
|
BUF_CHECK_MAPPED(bp);
|
||||||
|
else
|
||||||
|
BUF_CHECK_UNMAPPED(bp);
|
||||||
|
#endif
|
||||||
|
if (bp->b_kvasize == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
|
||||||
|
atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
|
||||||
|
atomic_add_int(&buffreekvacnt, 1);
|
||||||
|
bp->b_data = bp->b_kvabase = unmapped_buf;
|
||||||
|
bp->b_kvasize = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bufkvaalloc:
|
||||||
|
*
|
||||||
|
* Allocate the buffer KVA and set b_kvasize and b_kvabase.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
bufkvaalloc(struct buf *bp, int maxsize, int gbflags)
|
||||||
|
{
|
||||||
|
vm_offset_t addr;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
|
||||||
|
("Invalid gbflags 0x%x in %s", gbflags, __func__));
|
||||||
|
|
||||||
|
bufkvafree(bp);
|
||||||
|
|
||||||
|
addr = 0;
|
||||||
|
error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
|
||||||
|
if (error != 0) {
|
||||||
|
/*
|
||||||
|
* Buffer map is too fragmented. Request the caller
|
||||||
|
* to defragment the map.
|
||||||
|
*/
|
||||||
|
atomic_add_int(&bufdefragcnt, 1);
|
||||||
|
return (error);
|
||||||
|
}
|
||||||
|
bp->b_kvabase = (caddr_t)addr;
|
||||||
|
bp->b_kvasize = maxsize;
|
||||||
|
atomic_add_long(&bufkvaspace, bp->b_kvasize);
|
||||||
|
if ((gbflags & GB_UNMAPPED) != 0) {
|
||||||
|
bp->b_data = unmapped_buf;
|
||||||
|
BUF_CHECK_UNMAPPED(bp);
|
||||||
|
} else {
|
||||||
|
bp->b_data = bp->b_kvabase;
|
||||||
|
BUF_CHECK_MAPPED(bp);
|
||||||
|
}
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempt to initiate asynchronous I/O on read-ahead blocks. We must
|
* Attempt to initiate asynchronous I/O on read-ahead blocks. We must
|
||||||
* clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
|
* clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
|
||||||
@ -1715,7 +1791,8 @@ brelse(struct buf *bp)
|
|||||||
}
|
}
|
||||||
VM_OBJECT_RUNLOCK(obj);
|
VM_OBJECT_RUNLOCK(obj);
|
||||||
|
|
||||||
if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) {
|
if ((bp->b_flags & B_INVAL) == 0 &&
|
||||||
|
buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
pmap_qenter(
|
pmap_qenter(
|
||||||
trunc_page((vm_offset_t)bp->b_data),
|
trunc_page((vm_offset_t)bp->b_data),
|
||||||
@ -1872,7 +1949,7 @@ vfs_vmio_release(struct buf *bp)
|
|||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
|
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
|
||||||
} else
|
} else
|
||||||
@ -1905,10 +1982,8 @@ vfs_vmio_release(struct buf *bp)
|
|||||||
if (obj != NULL)
|
if (obj != NULL)
|
||||||
VM_OBJECT_WUNLOCK(obj);
|
VM_OBJECT_WUNLOCK(obj);
|
||||||
|
|
||||||
if (bp->b_bufsize) {
|
if (bp->b_bufsize)
|
||||||
bufspacewakeup();
|
bufspaceadjust(bp, 0);
|
||||||
bp->b_bufsize = 0;
|
|
||||||
}
|
|
||||||
bp->b_npages = 0;
|
bp->b_npages = 0;
|
||||||
bp->b_flags &= ~B_VMIO;
|
bp->b_flags &= ~B_VMIO;
|
||||||
if (bp->b_vp)
|
if (bp->b_vp)
|
||||||
@ -1977,7 +2052,7 @@ vfs_bio_awrite(struct buf *bp)
|
|||||||
int gbflags;
|
int gbflags;
|
||||||
|
|
||||||
bo = &vp->v_bufobj;
|
bo = &vp->v_bufobj;
|
||||||
gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
|
gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
|
||||||
/*
|
/*
|
||||||
* right now we support clustered writing only to regular files. If
|
* right now we support clustered writing only to regular files. If
|
||||||
* we find a clusterable block we could be in the middle of a cluster
|
* we find a clusterable block we could be in the middle of a cluster
|
||||||
@ -2026,49 +2101,6 @@ vfs_bio_awrite(struct buf *bp)
|
|||||||
return (nwritten);
|
return (nwritten);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags)
|
|
||||||
{
|
|
||||||
|
|
||||||
KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
|
|
||||||
bp->b_kvasize == 0, ("call bfreekva(%p)", bp));
|
|
||||||
if ((gbflags & GB_UNMAPPED) == 0) {
|
|
||||||
bp->b_kvabase = (caddr_t)addr;
|
|
||||||
} else if ((gbflags & GB_KVAALLOC) != 0) {
|
|
||||||
KASSERT((gbflags & GB_UNMAPPED) != 0,
|
|
||||||
("GB_KVAALLOC without GB_UNMAPPED"));
|
|
||||||
bp->b_kvaalloc = (caddr_t)addr;
|
|
||||||
bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
|
|
||||||
atomic_add_long(&unmapped_bufspace, bp->b_kvasize);
|
|
||||||
}
|
|
||||||
bp->b_kvasize = maxsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate the buffer KVA and set b_kvasize. Also set b_kvabase if
|
|
||||||
* needed.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
allocbufkva(struct buf *bp, int maxsize, int gbflags)
|
|
||||||
{
|
|
||||||
vm_offset_t addr;
|
|
||||||
|
|
||||||
bfreekva(bp);
|
|
||||||
addr = 0;
|
|
||||||
|
|
||||||
if (vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr)) {
|
|
||||||
/*
|
|
||||||
* Buffer map is too fragmented. Request the caller
|
|
||||||
* to defragment the map.
|
|
||||||
*/
|
|
||||||
atomic_add_int(&bufdefragcnt, 1);
|
|
||||||
return (1);
|
|
||||||
}
|
|
||||||
setbufkva(bp, addr, maxsize, gbflags);
|
|
||||||
atomic_add_long(&bufspace, bp->b_kvasize);
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ask the bufdaemon for help, or act as bufdaemon itself, when a
|
* Ask the bufdaemon for help, or act as bufdaemon itself, when a
|
||||||
* locked vnode is supplied.
|
* locked vnode is supplied.
|
||||||
@ -2192,7 +2224,7 @@ getnewbuf_reuse_bp(struct buf *bp, int qindex)
|
|||||||
if (bp->b_bufsize)
|
if (bp->b_bufsize)
|
||||||
allocbuf(bp, 0);
|
allocbuf(bp, 0);
|
||||||
|
|
||||||
bp->b_flags &= B_UNMAPPED | B_KVAALLOC;
|
bp->b_flags = 0;
|
||||||
bp->b_ioflags = 0;
|
bp->b_ioflags = 0;
|
||||||
bp->b_xflags = 0;
|
bp->b_xflags = 0;
|
||||||
KASSERT((bp->b_flags & B_INFREECNT) == 0,
|
KASSERT((bp->b_flags & B_INFREECNT) == 0,
|
||||||
@ -2328,14 +2360,11 @@ getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
|
|||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If we are defragging then we need a buffer with
|
* If we are defragging then we need a buffer with
|
||||||
* b_kvasize != 0. XXX this situation should no longer
|
* b_kvasize != 0. This situation occurs when we
|
||||||
* occur, if defrag is non-zero the buffer's b_kvasize
|
* have many unmapped bufs.
|
||||||
* should also be non-zero at this point. XXX
|
|
||||||
*/
|
*/
|
||||||
if (defrag && bp->b_kvasize == 0) {
|
if (defrag && bp->b_kvasize == 0)
|
||||||
printf("Warning: defrag empty buffer %p\n", bp);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start freeing the bp. This is somewhat involved. nbp
|
* Start freeing the bp. This is somewhat involved. nbp
|
||||||
@ -2380,7 +2409,7 @@ getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
|
|||||||
*/
|
*/
|
||||||
if (defrag) {
|
if (defrag) {
|
||||||
bp->b_flags |= B_INVAL;
|
bp->b_flags |= B_INVAL;
|
||||||
bfreekva(bp);
|
bufkvafree(bp);
|
||||||
brelse(bp);
|
brelse(bp);
|
||||||
defrag = 0;
|
defrag = 0;
|
||||||
goto restart;
|
goto restart;
|
||||||
@ -2392,7 +2421,7 @@ getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
|
|||||||
*/
|
*/
|
||||||
if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
|
if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
|
||||||
bp->b_flags |= B_INVAL;
|
bp->b_flags |= B_INVAL;
|
||||||
bfreekva(bp);
|
bufkvafree(bp);
|
||||||
brelse(bp);
|
brelse(bp);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
@ -2409,7 +2438,7 @@ getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
|
|||||||
flushingbufs = 1;
|
flushingbufs = 1;
|
||||||
if (flushingbufs && bp->b_kvasize != 0) {
|
if (flushingbufs && bp->b_kvasize != 0) {
|
||||||
bp->b_flags |= B_INVAL;
|
bp->b_flags |= B_INVAL;
|
||||||
bfreekva(bp);
|
bufkvafree(bp);
|
||||||
brelse(bp);
|
brelse(bp);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
@ -2480,65 +2509,27 @@ getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
|
|||||||
} else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) {
|
} else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) {
|
||||||
mtx_assert(&bqclean, MA_NOTOWNED);
|
mtx_assert(&bqclean, MA_NOTOWNED);
|
||||||
|
|
||||||
bfreekva(bp);
|
bufkvafree(bp);
|
||||||
bp->b_flags |= B_UNMAPPED;
|
|
||||||
bp->b_kvabase = bp->b_data = unmapped_buf;
|
|
||||||
bp->b_kvasize = maxsize;
|
|
||||||
atomic_add_long(&bufspace, bp->b_kvasize);
|
|
||||||
atomic_add_long(&unmapped_bufspace, bp->b_kvasize);
|
|
||||||
atomic_add_int(&bufreusecnt, 1);
|
atomic_add_int(&bufreusecnt, 1);
|
||||||
} else {
|
} else {
|
||||||
mtx_assert(&bqclean, MA_NOTOWNED);
|
mtx_assert(&bqclean, MA_NOTOWNED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We finally have a valid bp. We aren't quite out of the
|
* We finally have a valid bp. We aren't quite out of the
|
||||||
* woods, we still have to reserve kva space. In order
|
* woods, we still have to reserve kva space. In order to
|
||||||
* to keep fragmentation sane we only allocate kva in
|
* keep fragmentation sane we only allocate kva in BKVASIZE
|
||||||
* BKVASIZE chunks.
|
* chunks.
|
||||||
*/
|
*/
|
||||||
maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
|
maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
|
||||||
|
|
||||||
if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED |
|
if (maxsize != bp->b_kvasize &&
|
||||||
B_KVAALLOC)) == B_UNMAPPED) {
|
bufkvaalloc(bp, maxsize, gbflags)) {
|
||||||
if (allocbufkva(bp, maxsize, gbflags)) {
|
defrag = 1;
|
||||||
defrag = 1;
|
bp->b_flags |= B_INVAL;
|
||||||
bp->b_flags |= B_INVAL;
|
brelse(bp);
|
||||||
brelse(bp);
|
goto restart;
|
||||||
goto restart;
|
|
||||||
}
|
|
||||||
atomic_add_int(&bufreusecnt, 1);
|
|
||||||
} else if ((bp->b_flags & B_KVAALLOC) != 0 &&
|
|
||||||
(gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) {
|
|
||||||
/*
|
|
||||||
* If the reused buffer has KVA allocated,
|
|
||||||
* reassign b_kvaalloc to b_kvabase.
|
|
||||||
*/
|
|
||||||
bp->b_kvabase = bp->b_kvaalloc;
|
|
||||||
bp->b_flags &= ~B_KVAALLOC;
|
|
||||||
atomic_subtract_long(&unmapped_bufspace,
|
|
||||||
bp->b_kvasize);
|
|
||||||
atomic_add_int(&bufreusecnt, 1);
|
|
||||||
} else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 &&
|
|
||||||
(gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED |
|
|
||||||
GB_KVAALLOC)) {
|
|
||||||
/*
|
|
||||||
* The case of reused buffer already have KVA
|
|
||||||
* mapped, but the request is for unmapped
|
|
||||||
* buffer with KVA allocated.
|
|
||||||
*/
|
|
||||||
bp->b_kvaalloc = bp->b_kvabase;
|
|
||||||
bp->b_data = bp->b_kvabase = unmapped_buf;
|
|
||||||
bp->b_flags |= B_UNMAPPED | B_KVAALLOC;
|
|
||||||
atomic_add_long(&unmapped_bufspace,
|
|
||||||
bp->b_kvasize);
|
|
||||||
atomic_add_int(&bufreusecnt, 1);
|
|
||||||
}
|
|
||||||
if ((gbflags & GB_UNMAPPED) == 0) {
|
|
||||||
bp->b_saveaddr = bp->b_kvabase;
|
|
||||||
bp->b_data = bp->b_saveaddr;
|
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
|
||||||
BUF_CHECK_MAPPED(bp);
|
|
||||||
}
|
}
|
||||||
|
atomic_add_int(&bufreusecnt, 1);
|
||||||
}
|
}
|
||||||
return (bp);
|
return (bp);
|
||||||
}
|
}
|
||||||
@ -2968,9 +2959,9 @@ vfs_setdirty_locked_object(struct buf *bp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate the KVA mapping for an existing buffer. It handles the
|
* Allocate the KVA mapping for an existing buffer.
|
||||||
* cases of both B_UNMAPPED buffer, and buffer with the preallocated
|
* If an unmapped buffer is provided but a mapped buffer is requested, take
|
||||||
* KVA which is not mapped (B_KVAALLOC).
|
* also care to properly setup mappings between pages and KVA.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
|
bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
|
||||||
@ -2979,25 +2970,22 @@ bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
|
|||||||
int bsize, maxsize, need_mapping, need_kva;
|
int bsize, maxsize, need_mapping, need_kva;
|
||||||
off_t offset;
|
off_t offset;
|
||||||
|
|
||||||
need_mapping = (bp->b_flags & B_UNMAPPED) != 0 &&
|
need_mapping = bp->b_data == unmapped_buf &&
|
||||||
(gbflags & GB_UNMAPPED) == 0;
|
(gbflags & GB_UNMAPPED) == 0;
|
||||||
need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED &&
|
need_kva = bp->b_kvabase == unmapped_buf &&
|
||||||
|
bp->b_data == unmapped_buf &&
|
||||||
(gbflags & GB_KVAALLOC) != 0;
|
(gbflags & GB_KVAALLOC) != 0;
|
||||||
if (!need_mapping && !need_kva)
|
if (!need_mapping && !need_kva)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
BUF_CHECK_UNMAPPED(bp);
|
BUF_CHECK_UNMAPPED(bp);
|
||||||
|
|
||||||
if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) {
|
if (need_mapping && bp->b_kvabase != unmapped_buf) {
|
||||||
/*
|
/*
|
||||||
* Buffer is not mapped, but the KVA was already
|
* Buffer is not mapped, but the KVA was already
|
||||||
* reserved at the time of the instantiation. Use the
|
* reserved at the time of the instantiation. Use the
|
||||||
* allocated space.
|
* allocated space.
|
||||||
*/
|
*/
|
||||||
bp->b_flags &= ~B_KVAALLOC;
|
|
||||||
KASSERT(bp->b_kvaalloc != 0, ("kvaalloc == 0"));
|
|
||||||
bp->b_kvabase = bp->b_kvaalloc;
|
|
||||||
atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize);
|
|
||||||
goto has_addr;
|
goto has_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3012,7 +3000,7 @@ bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
|
|||||||
maxsize = imax(maxsize, bsize);
|
maxsize = imax(maxsize, bsize);
|
||||||
|
|
||||||
mapping_loop:
|
mapping_loop:
|
||||||
if (allocbufkva(bp, maxsize, gbflags)) {
|
if (bufkvaalloc(bp, maxsize, gbflags)) {
|
||||||
/*
|
/*
|
||||||
* Request defragmentation. getnewbuf() returns us the
|
* Request defragmentation. getnewbuf() returns us the
|
||||||
* allocated space by the scratch buffer KVA.
|
* allocated space by the scratch buffer KVA.
|
||||||
@ -3025,31 +3013,31 @@ bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
|
|||||||
* XXXKIB: defragmentation cannot
|
* XXXKIB: defragmentation cannot
|
||||||
* succeed, not sure what else to do.
|
* succeed, not sure what else to do.
|
||||||
*/
|
*/
|
||||||
panic("GB_NOWAIT_BD and B_UNMAPPED %p", bp);
|
panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
|
||||||
}
|
}
|
||||||
atomic_add_int(&mappingrestarts, 1);
|
atomic_add_int(&mappingrestarts, 1);
|
||||||
goto mapping_loop;
|
goto mapping_loop;
|
||||||
}
|
}
|
||||||
KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0,
|
KASSERT(scratch_bp->b_kvabase != unmapped_buf,
|
||||||
("scratch bp !B_KVAALLOC %p", scratch_bp));
|
("scratch bp has no KVA %p", scratch_bp));
|
||||||
setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc,
|
/* Grab pointers. */
|
||||||
scratch_bp->b_kvasize, gbflags);
|
bp->b_kvabase = scratch_bp->b_kvabase;
|
||||||
|
bp->b_kvasize = scratch_bp->b_kvasize;
|
||||||
|
bp->b_data = scratch_bp->b_data;
|
||||||
|
|
||||||
/* Get rid of the scratch buffer. */
|
/* Get rid of the scratch buffer. */
|
||||||
scratch_bp->b_kvasize = 0;
|
scratch_bp->b_kvasize = 0;
|
||||||
scratch_bp->b_flags |= B_INVAL;
|
scratch_bp->b_flags |= B_INVAL;
|
||||||
scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC);
|
scratch_bp->b_data = scratch_bp->b_kvabase = unmapped_buf;
|
||||||
brelse(scratch_bp);
|
brelse(scratch_bp);
|
||||||
}
|
}
|
||||||
if (!need_mapping)
|
|
||||||
return;
|
|
||||||
|
|
||||||
has_addr:
|
has_addr:
|
||||||
bp->b_saveaddr = bp->b_kvabase;
|
if (need_mapping) {
|
||||||
bp->b_data = bp->b_saveaddr; /* b_offset is handled by bpmap_qenter */
|
/* b_offset is handled by bpmap_qenter. */
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
bp->b_data = bp->b_kvabase;
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
bpmap_qenter(bp);
|
bpmap_qenter(bp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3265,7 +3253,7 @@ getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
|||||||
} else {
|
} else {
|
||||||
maxsize = size;
|
maxsize = size;
|
||||||
/* Do not allow non-VMIO notmapped buffers. */
|
/* Do not allow non-VMIO notmapped buffers. */
|
||||||
flags &= ~GB_UNMAPPED;
|
flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
|
||||||
}
|
}
|
||||||
maxsize = imax(maxsize, bsize);
|
maxsize = imax(maxsize, bsize);
|
||||||
|
|
||||||
@ -3358,7 +3346,6 @@ geteblk(int size, int flags)
|
|||||||
return (bp);
|
return (bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This code constitutes the buffer memory from either anonymous system
|
* This code constitutes the buffer memory from either anonymous system
|
||||||
* memory (in the case of non-VMIO operations) or from an associated
|
* memory (in the case of non-VMIO operations) or from an associated
|
||||||
@ -3382,7 +3369,7 @@ allocbuf(struct buf *bp, int size)
|
|||||||
|
|
||||||
BUF_ASSERT_HELD(bp);
|
BUF_ASSERT_HELD(bp);
|
||||||
|
|
||||||
if (bp->b_kvasize < size)
|
if (bp->b_kvasize != 0 && bp->b_kvasize < size)
|
||||||
panic("allocbuf: buffer too small");
|
panic("allocbuf: buffer too small");
|
||||||
|
|
||||||
if ((bp->b_flags & B_VMIO) == 0) {
|
if ((bp->b_flags & B_VMIO) == 0) {
|
||||||
@ -3407,15 +3394,8 @@ allocbuf(struct buf *bp, int size)
|
|||||||
bp->b_bcount = size;
|
bp->b_bcount = size;
|
||||||
} else {
|
} else {
|
||||||
free(bp->b_data, M_BIOBUF);
|
free(bp->b_data, M_BIOBUF);
|
||||||
if (bp->b_bufsize) {
|
bufmallocadjust(bp, 0);
|
||||||
atomic_subtract_long(
|
bp->b_data = bp->b_kvabase;
|
||||||
&bufmallocspace,
|
|
||||||
bp->b_bufsize);
|
|
||||||
bufspacewakeup();
|
|
||||||
bp->b_bufsize = 0;
|
|
||||||
}
|
|
||||||
bp->b_saveaddr = bp->b_kvabase;
|
|
||||||
bp->b_data = bp->b_saveaddr;
|
|
||||||
bp->b_bcount = 0;
|
bp->b_bcount = 0;
|
||||||
bp->b_flags &= ~B_MALLOC;
|
bp->b_flags &= ~B_MALLOC;
|
||||||
}
|
}
|
||||||
@ -3434,33 +3414,28 @@ allocbuf(struct buf *bp, int size)
|
|||||||
* is probably extremely rare and not worth worrying
|
* is probably extremely rare and not worth worrying
|
||||||
* over.
|
* over.
|
||||||
*/
|
*/
|
||||||
if ( (bufmallocspace < maxbufmallocspace) &&
|
if ((bufmallocspace < maxbufmallocspace) &&
|
||||||
(bp->b_bufsize == 0) &&
|
(bp->b_bufsize == 0) &&
|
||||||
(mbsize <= PAGE_SIZE/2)) {
|
(mbsize <= PAGE_SIZE/2)) {
|
||||||
|
|
||||||
bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
|
bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
|
||||||
bp->b_bufsize = mbsize;
|
|
||||||
bp->b_bcount = size;
|
bp->b_bcount = size;
|
||||||
bp->b_flags |= B_MALLOC;
|
bp->b_flags |= B_MALLOC;
|
||||||
atomic_add_long(&bufmallocspace, mbsize);
|
bufmallocadjust(bp, mbsize);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
origbuf = NULL;
|
origbuf = NULL;
|
||||||
origbufsize = 0;
|
origbufsize = 0;
|
||||||
/*
|
/*
|
||||||
* If the buffer is growing on its other-than-first allocation,
|
* If the buffer is growing on its other-than-first
|
||||||
* then we revert to the page-allocation scheme.
|
* allocation then we revert to the page-allocation
|
||||||
|
* scheme.
|
||||||
*/
|
*/
|
||||||
if (bp->b_flags & B_MALLOC) {
|
if (bp->b_flags & B_MALLOC) {
|
||||||
origbuf = bp->b_data;
|
origbuf = bp->b_data;
|
||||||
origbufsize = bp->b_bufsize;
|
origbufsize = bp->b_bufsize;
|
||||||
bp->b_data = bp->b_kvabase;
|
bp->b_data = bp->b_kvabase;
|
||||||
if (bp->b_bufsize) {
|
bufmallocadjust(bp, 0);
|
||||||
atomic_subtract_long(&bufmallocspace,
|
|
||||||
bp->b_bufsize);
|
|
||||||
bufspacewakeup();
|
|
||||||
bp->b_bufsize = 0;
|
|
||||||
}
|
|
||||||
bp->b_flags &= ~B_MALLOC;
|
bp->b_flags &= ~B_MALLOC;
|
||||||
newbsize = round_page(newbsize);
|
newbsize = round_page(newbsize);
|
||||||
}
|
}
|
||||||
@ -3498,7 +3473,7 @@ allocbuf(struct buf *bp, int size)
|
|||||||
if (desiredpages < bp->b_npages) {
|
if (desiredpages < bp->b_npages) {
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
pmap_qremove((vm_offset_t)trunc_page(
|
pmap_qremove((vm_offset_t)trunc_page(
|
||||||
(vm_offset_t)bp->b_data) +
|
(vm_offset_t)bp->b_data) +
|
||||||
@ -3611,18 +3586,18 @@ allocbuf(struct buf *bp, int size)
|
|||||||
VM_OBJECT_WUNLOCK(obj);
|
VM_OBJECT_WUNLOCK(obj);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Step 3, fixup the KVM pmap.
|
* Step 3, fixup the KVA pmap.
|
||||||
*/
|
*/
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0)
|
if (buf_mapped(bp))
|
||||||
bpmap_qenter(bp);
|
bpmap_qenter(bp);
|
||||||
else
|
else
|
||||||
BUF_CHECK_UNMAPPED(bp);
|
BUF_CHECK_UNMAPPED(bp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (newbsize < bp->b_bufsize)
|
/* Record changes in allocation size. */
|
||||||
bufspacewakeup();
|
if (bp->b_bufsize != newbsize)
|
||||||
bp->b_bufsize = newbsize; /* actual buffer allocation */
|
bufspaceadjust(bp, newbsize);
|
||||||
bp->b_bcount = size; /* requested buffer size */
|
bp->b_bcount = size; /* requested buffer size. */
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3919,7 +3894,7 @@ bufdone_finish(struct buf *bp)
|
|||||||
}
|
}
|
||||||
vm_object_pip_wakeupn(obj, 0);
|
vm_object_pip_wakeupn(obj, 0);
|
||||||
VM_OBJECT_WUNLOCK(obj);
|
VM_OBJECT_WUNLOCK(obj);
|
||||||
if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
|
if (bogus && buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
||||||
bp->b_pages, bp->b_npages);
|
bp->b_pages, bp->b_npages);
|
||||||
@ -3966,7 +3941,7 @@ vfs_unbusy_pages(struct buf *bp)
|
|||||||
if (!m)
|
if (!m)
|
||||||
panic("vfs_unbusy_pages: page missing\n");
|
panic("vfs_unbusy_pages: page missing\n");
|
||||||
bp->b_pages[i] = m;
|
bp->b_pages[i] = m;
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
||||||
bp->b_pages, bp->b_npages);
|
bp->b_pages, bp->b_npages);
|
||||||
@ -4140,7 +4115,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
|
|||||||
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
|
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
|
||||||
}
|
}
|
||||||
VM_OBJECT_WUNLOCK(obj);
|
VM_OBJECT_WUNLOCK(obj);
|
||||||
if (bogus && (bp->b_flags & B_UNMAPPED) == 0) {
|
if (bogus && buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
||||||
bp->b_pages, bp->b_npages);
|
bp->b_pages, bp->b_npages);
|
||||||
@ -4260,7 +4235,7 @@ vfs_bio_bzero_buf(struct buf *bp, int base, int size)
|
|||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
int i, n;
|
int i, n;
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
BUF_CHECK_MAPPED(bp);
|
BUF_CHECK_MAPPED(bp);
|
||||||
bzero(bp->b_data + base, size);
|
bzero(bp->b_data + base, size);
|
||||||
} else {
|
} else {
|
||||||
@ -4353,11 +4328,12 @@ vm_hold_free_pages(struct buf *bp, int newbsize)
|
|||||||
* be valid, a race or a smaller-file mapped into a larger space may
|
* be valid, a race or a smaller-file mapped into a larger space may
|
||||||
* actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
|
* actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
|
||||||
* check the return value.
|
* check the return value.
|
||||||
|
*
|
||||||
|
* This function only works with pager buffers.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vmapbuf(struct buf *bp, int mapbuf)
|
vmapbuf(struct buf *bp, int mapbuf)
|
||||||
{
|
{
|
||||||
caddr_t kva;
|
|
||||||
vm_prot_t prot;
|
vm_prot_t prot;
|
||||||
int pidx;
|
int pidx;
|
||||||
|
|
||||||
@ -4371,24 +4347,20 @@ vmapbuf(struct buf *bp, int mapbuf)
|
|||||||
btoc(MAXPHYS))) < 0)
|
btoc(MAXPHYS))) < 0)
|
||||||
return (-1);
|
return (-1);
|
||||||
bp->b_npages = pidx;
|
bp->b_npages = pidx;
|
||||||
|
bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
|
||||||
if (mapbuf || !unmapped_buf_allowed) {
|
if (mapbuf || !unmapped_buf_allowed) {
|
||||||
pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
|
pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
|
||||||
kva = bp->b_saveaddr;
|
bp->b_data = bp->b_kvabase + bp->b_offset;
|
||||||
bp->b_saveaddr = bp->b_data;
|
} else
|
||||||
bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
|
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
|
||||||
} else {
|
|
||||||
bp->b_flags |= B_UNMAPPED;
|
|
||||||
bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
|
|
||||||
bp->b_saveaddr = bp->b_data;
|
|
||||||
bp->b_data = unmapped_buf;
|
bp->b_data = unmapped_buf;
|
||||||
}
|
|
||||||
return(0);
|
return(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the io map PTEs associated with this IO operation.
|
* Free the io map PTEs associated with this IO operation.
|
||||||
* We also invalidate the TLB entries and restore the original b_addr.
|
* We also invalidate the TLB entries and restore the original b_addr.
|
||||||
|
*
|
||||||
|
* This function only works with pager buffers.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vunmapbuf(struct buf *bp)
|
vunmapbuf(struct buf *bp)
|
||||||
@ -4396,13 +4368,11 @@ vunmapbuf(struct buf *bp)
|
|||||||
int npages;
|
int npages;
|
||||||
|
|
||||||
npages = bp->b_npages;
|
npages = bp->b_npages;
|
||||||
if (bp->b_flags & B_UNMAPPED)
|
if (buf_mapped(bp))
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
|
||||||
else
|
|
||||||
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
|
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
|
||||||
vm_page_unhold_pages(bp->b_pages, npages);
|
vm_page_unhold_pages(bp->b_pages, npages);
|
||||||
|
|
||||||
bp->b_data = bp->b_saveaddr;
|
bp->b_data = unmapped_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -4543,7 +4513,7 @@ void
|
|||||||
bdata2bio(struct buf *bp, struct bio *bip)
|
bdata2bio(struct buf *bp, struct bio *bip)
|
||||||
{
|
{
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) != 0) {
|
if (!buf_mapped(bp)) {
|
||||||
KASSERT(unmapped_buf_allowed, ("unmapped"));
|
KASSERT(unmapped_buf_allowed, ("unmapped"));
|
||||||
bip->bio_ma = bp->b_pages;
|
bip->bio_ma = bp->b_pages;
|
||||||
bip->bio_ma_n = bp->b_npages;
|
bip->bio_ma_n = bp->b_npages;
|
||||||
@ -4586,6 +4556,8 @@ DB_SHOW_COMMAND(buffer, db_show_buffer)
|
|||||||
bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
|
bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
|
||||||
bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
|
bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
|
||||||
(intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
|
(intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
|
||||||
|
db_printf("b_kvabase = %p, b_kvasize = %d\n",
|
||||||
|
bp->b_kvabase, bp->b_kvasize);
|
||||||
if (bp->b_npages) {
|
if (bp->b_npages) {
|
||||||
int i;
|
int i;
|
||||||
db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
|
db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
|
||||||
|
@ -354,7 +354,6 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
|
|||||||
*/
|
*/
|
||||||
bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
|
bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
|
||||||
if ((gbflags & GB_UNMAPPED) != 0) {
|
if ((gbflags & GB_UNMAPPED) != 0) {
|
||||||
bp->b_flags |= B_UNMAPPED;
|
|
||||||
bp->b_data = unmapped_buf;
|
bp->b_data = unmapped_buf;
|
||||||
} else {
|
} else {
|
||||||
bp->b_data = (char *)((vm_offset_t)bp->b_data |
|
bp->b_data = (char *)((vm_offset_t)bp->b_data |
|
||||||
@ -517,9 +516,8 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
|
|||||||
if (bp->b_bufsize > bp->b_kvasize)
|
if (bp->b_bufsize > bp->b_kvasize)
|
||||||
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
|
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
|
||||||
bp->b_bufsize, bp->b_kvasize);
|
bp->b_bufsize, bp->b_kvasize);
|
||||||
bp->b_kvasize = bp->b_bufsize;
|
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
|
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
|
||||||
(vm_page_t *)bp->b_pages, bp->b_npages);
|
(vm_page_t *)bp->b_pages, bp->b_npages);
|
||||||
}
|
}
|
||||||
@ -545,7 +543,7 @@ cluster_callback(bp)
|
|||||||
if (bp->b_ioflags & BIO_ERROR)
|
if (bp->b_ioflags & BIO_ERROR)
|
||||||
error = bp->b_error;
|
error = bp->b_error;
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
|
pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
|
||||||
bp->b_npages);
|
bp->b_npages);
|
||||||
}
|
}
|
||||||
@ -871,7 +869,6 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
|
|||||||
bp->b_data = (char *)((vm_offset_t)bp->b_data |
|
bp->b_data = (char *)((vm_offset_t)bp->b_data |
|
||||||
((vm_offset_t)tbp->b_data & PAGE_MASK));
|
((vm_offset_t)tbp->b_data & PAGE_MASK));
|
||||||
} else {
|
} else {
|
||||||
bp->b_flags |= B_UNMAPPED;
|
|
||||||
bp->b_data = unmapped_buf;
|
bp->b_data = unmapped_buf;
|
||||||
}
|
}
|
||||||
bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
|
bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
|
||||||
@ -1004,7 +1001,7 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
|
|||||||
tbp, b_cluster.cluster_entry);
|
tbp, b_cluster.cluster_entry);
|
||||||
}
|
}
|
||||||
finishcluster:
|
finishcluster:
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
|
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
|
||||||
(vm_page_t *)bp->b_pages, bp->b_npages);
|
(vm_page_t *)bp->b_pages, bp->b_npages);
|
||||||
}
|
}
|
||||||
@ -1012,7 +1009,6 @@ cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
|
|||||||
panic(
|
panic(
|
||||||
"cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
|
"cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
|
||||||
bp->b_bufsize, bp->b_kvasize);
|
bp->b_bufsize, bp->b_kvasize);
|
||||||
bp->b_kvasize = bp->b_bufsize;
|
|
||||||
totalwritten += bp->b_bufsize;
|
totalwritten += bp->b_bufsize;
|
||||||
bp->b_dirtyoff = 0;
|
bp->b_dirtyoff = 0;
|
||||||
bp->b_dirtyend = bp->b_bufsize;
|
bp->b_dirtyend = bp->b_bufsize;
|
||||||
|
@ -112,17 +112,15 @@ struct buf {
|
|||||||
b_xflags_t b_xflags; /* extra flags */
|
b_xflags_t b_xflags; /* extra flags */
|
||||||
struct lock b_lock; /* Buffer lock */
|
struct lock b_lock; /* Buffer lock */
|
||||||
long b_bufsize; /* Allocated buffer size. */
|
long b_bufsize; /* Allocated buffer size. */
|
||||||
long b_runningbufspace; /* when I/O is running, pipelining */
|
int b_runningbufspace; /* when I/O is running, pipelining */
|
||||||
caddr_t b_kvabase; /* base kva for buffer */
|
|
||||||
caddr_t b_kvaalloc; /* allocated kva for B_KVAALLOC */
|
|
||||||
int b_kvasize; /* size of kva for buffer */
|
int b_kvasize; /* size of kva for buffer */
|
||||||
daddr_t b_lblkno; /* Logical block number. */
|
|
||||||
struct vnode *b_vp; /* Device vnode. */
|
|
||||||
int b_dirtyoff; /* Offset in buffer of dirty region. */
|
int b_dirtyoff; /* Offset in buffer of dirty region. */
|
||||||
int b_dirtyend; /* Offset of end of dirty region. */
|
int b_dirtyend; /* Offset of end of dirty region. */
|
||||||
|
caddr_t b_kvabase; /* base kva for buffer */
|
||||||
|
daddr_t b_lblkno; /* Logical block number. */
|
||||||
|
struct vnode *b_vp; /* Device vnode. */
|
||||||
struct ucred *b_rcred; /* Read credentials reference. */
|
struct ucred *b_rcred; /* Read credentials reference. */
|
||||||
struct ucred *b_wcred; /* Write credentials reference. */
|
struct ucred *b_wcred; /* Write credentials reference. */
|
||||||
void *b_saveaddr; /* Original b_addr for physio. */
|
|
||||||
union {
|
union {
|
||||||
TAILQ_ENTRY(buf) bu_freelist; /* (Q) */
|
TAILQ_ENTRY(buf) bu_freelist; /* (Q) */
|
||||||
struct {
|
struct {
|
||||||
@ -206,8 +204,8 @@ struct buf {
|
|||||||
#define B_PERSISTENT 0x00000100 /* Perm. ref'ed while EXT2FS mounted. */
|
#define B_PERSISTENT 0x00000100 /* Perm. ref'ed while EXT2FS mounted. */
|
||||||
#define B_DONE 0x00000200 /* I/O completed. */
|
#define B_DONE 0x00000200 /* I/O completed. */
|
||||||
#define B_EINTR 0x00000400 /* I/O was interrupted */
|
#define B_EINTR 0x00000400 /* I/O was interrupted */
|
||||||
#define B_UNMAPPED 0x00000800 /* KVA is not mapped. */
|
#define B_00000800 0x00000800 /* Available flag. */
|
||||||
#define B_KVAALLOC 0x00001000 /* But allocated. */
|
#define B_00001000 0x00001000 /* Available flag. */
|
||||||
#define B_INVAL 0x00002000 /* Does not contain valid info. */
|
#define B_INVAL 0x00002000 /* Does not contain valid info. */
|
||||||
#define B_BARRIER 0x00004000 /* Write this and all preceeding first. */
|
#define B_BARRIER 0x00004000 /* Write this and all preceeding first. */
|
||||||
#define B_NOCACHE 0x00008000 /* Do not cache block after use. */
|
#define B_NOCACHE 0x00008000 /* Do not cache block after use. */
|
||||||
@ -231,7 +229,7 @@ struct buf {
|
|||||||
#define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \
|
#define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \
|
||||||
"\33paging\32infreecnt\31nocopy\30b23\27relbuf\26dirty\25b20" \
|
"\33paging\32infreecnt\31nocopy\30b23\27relbuf\26dirty\25b20" \
|
||||||
"\24b19\23b18\22clusterok\21malloc\20nocache\17b14\16inval" \
|
"\24b19\23b18\22clusterok\21malloc\20nocache\17b14\16inval" \
|
||||||
"\15kvaalloc\14unmapped\13eintr\12done\11persist\10delwri" \
|
"\15b12\14b11\13eintr\12done\11persist\10delwri" \
|
||||||
"\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age"
|
"\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -374,15 +372,11 @@ struct buf_queue_head {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This structure describes a clustered I/O. It is stored in the b_saveaddr
|
* This structure describes a clustered I/O.
|
||||||
* field of the buffer on which I/O is done. At I/O completion, cluster
|
|
||||||
* callback uses the structure to parcel I/O's to individual buffers, and
|
|
||||||
* then free's this structure.
|
|
||||||
*/
|
*/
|
||||||
struct cluster_save {
|
struct cluster_save {
|
||||||
long bs_bcount; /* Saved b_bcount. */
|
long bs_bcount; /* Saved b_bcount. */
|
||||||
long bs_bufsize; /* Saved b_bufsize. */
|
long bs_bufsize; /* Saved b_bufsize. */
|
||||||
void *bs_saveaddr; /* Saved b_addr. */
|
|
||||||
int bs_nchildren; /* Number of associated buffers. */
|
int bs_nchildren; /* Number of associated buffers. */
|
||||||
struct buf **bs_children; /* List of associated buffers. */
|
struct buf **bs_children; /* List of associated buffers. */
|
||||||
};
|
};
|
||||||
@ -478,7 +472,14 @@ extern int cluster_pbuf_freecnt; /* Number of pbufs for clusters */
|
|||||||
extern int vnode_pbuf_freecnt; /* Number of pbufs for vnode pager */
|
extern int vnode_pbuf_freecnt; /* Number of pbufs for vnode pager */
|
||||||
extern int vnode_async_pbuf_freecnt; /* Number of pbufs for vnode pager,
|
extern int vnode_async_pbuf_freecnt; /* Number of pbufs for vnode pager,
|
||||||
asynchronous reads */
|
asynchronous reads */
|
||||||
extern caddr_t unmapped_buf;
|
extern caddr_t unmapped_buf; /* Data address for unmapped buffers. */
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
buf_mapped(struct buf *bp)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (bp->b_data != unmapped_buf);
|
||||||
|
}
|
||||||
|
|
||||||
void runningbufwakeup(struct buf *);
|
void runningbufwakeup(struct buf *);
|
||||||
void waitrunningbufspace(void);
|
void waitrunningbufspace(void);
|
||||||
|
@ -62,8 +62,7 @@ static int ffs_rawread_readahead(struct vnode *vp,
|
|||||||
off_t offset,
|
off_t offset,
|
||||||
size_t len,
|
size_t len,
|
||||||
struct thread *td,
|
struct thread *td,
|
||||||
struct buf *bp,
|
struct buf *bp);
|
||||||
caddr_t sa);
|
|
||||||
static int ffs_rawread_main(struct vnode *vp,
|
static int ffs_rawread_main(struct vnode *vp,
|
||||||
struct uio *uio);
|
struct uio *uio);
|
||||||
|
|
||||||
@ -190,8 +189,7 @@ ffs_rawread_readahead(struct vnode *vp,
|
|||||||
off_t offset,
|
off_t offset,
|
||||||
size_t len,
|
size_t len,
|
||||||
struct thread *td,
|
struct thread *td,
|
||||||
struct buf *bp,
|
struct buf *bp)
|
||||||
caddr_t sa)
|
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
u_int iolen;
|
u_int iolen;
|
||||||
@ -219,7 +217,6 @@ ffs_rawread_readahead(struct vnode *vp,
|
|||||||
bp->b_iocmd = BIO_READ;
|
bp->b_iocmd = BIO_READ;
|
||||||
bp->b_iodone = bdone;
|
bp->b_iodone = bdone;
|
||||||
bp->b_data = udata;
|
bp->b_data = udata;
|
||||||
bp->b_saveaddr = sa;
|
|
||||||
blockno = offset / bsize;
|
blockno = offset / bsize;
|
||||||
blockoff = (offset % bsize) / DEV_BSIZE;
|
blockoff = (offset % bsize) / DEV_BSIZE;
|
||||||
if ((daddr_t) blockno != blockno) {
|
if ((daddr_t) blockno != blockno) {
|
||||||
@ -272,7 +269,6 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
{
|
{
|
||||||
int error, nerror;
|
int error, nerror;
|
||||||
struct buf *bp, *nbp, *tbp;
|
struct buf *bp, *nbp, *tbp;
|
||||||
caddr_t sa, nsa, tsa;
|
|
||||||
u_int iolen;
|
u_int iolen;
|
||||||
int spl;
|
int spl;
|
||||||
caddr_t udata;
|
caddr_t udata;
|
||||||
@ -295,18 +291,15 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
|
|
||||||
bp = NULL;
|
bp = NULL;
|
||||||
nbp = NULL;
|
nbp = NULL;
|
||||||
sa = NULL;
|
|
||||||
nsa = NULL;
|
|
||||||
|
|
||||||
while (resid > 0) {
|
while (resid > 0) {
|
||||||
|
|
||||||
if (bp == NULL) { /* Setup first read */
|
if (bp == NULL) { /* Setup first read */
|
||||||
/* XXX: Leave some bufs for swap */
|
/* XXX: Leave some bufs for swap */
|
||||||
bp = getpbuf(&ffsrawbufcnt);
|
bp = getpbuf(&ffsrawbufcnt);
|
||||||
sa = bp->b_data;
|
|
||||||
pbgetvp(vp, bp);
|
pbgetvp(vp, bp);
|
||||||
error = ffs_rawread_readahead(vp, udata, offset,
|
error = ffs_rawread_readahead(vp, udata, offset,
|
||||||
resid, td, bp, sa);
|
resid, td, bp);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -317,7 +310,6 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
else
|
else
|
||||||
nbp = NULL;
|
nbp = NULL;
|
||||||
if (nbp != NULL) {
|
if (nbp != NULL) {
|
||||||
nsa = nbp->b_data;
|
|
||||||
pbgetvp(vp, nbp);
|
pbgetvp(vp, nbp);
|
||||||
|
|
||||||
nerror = ffs_rawread_readahead(vp,
|
nerror = ffs_rawread_readahead(vp,
|
||||||
@ -328,8 +320,7 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
resid -
|
resid -
|
||||||
bp->b_bufsize,
|
bp->b_bufsize,
|
||||||
td,
|
td,
|
||||||
nbp,
|
nbp);
|
||||||
nsa);
|
|
||||||
if (nerror) {
|
if (nerror) {
|
||||||
pbrelvp(nbp);
|
pbrelvp(nbp);
|
||||||
relpbuf(nbp, &ffsrawbufcnt);
|
relpbuf(nbp, &ffsrawbufcnt);
|
||||||
@ -365,8 +356,7 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
offset,
|
offset,
|
||||||
bp->b_bufsize - iolen,
|
bp->b_bufsize - iolen,
|
||||||
td,
|
td,
|
||||||
bp,
|
bp);
|
||||||
sa);
|
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
break;
|
break;
|
||||||
} else if (nbp != NULL) { /* Complete read with readahead */
|
} else if (nbp != NULL) { /* Complete read with readahead */
|
||||||
@ -375,10 +365,6 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
bp = nbp;
|
bp = nbp;
|
||||||
nbp = tbp;
|
nbp = tbp;
|
||||||
|
|
||||||
tsa = sa;
|
|
||||||
sa = nsa;
|
|
||||||
nsa = tsa;
|
|
||||||
|
|
||||||
if (resid <= bp->b_bufsize) { /* No more readaheads */
|
if (resid <= bp->b_bufsize) { /* No more readaheads */
|
||||||
pbrelvp(nbp);
|
pbrelvp(nbp);
|
||||||
relpbuf(nbp, &ffsrawbufcnt);
|
relpbuf(nbp, &ffsrawbufcnt);
|
||||||
@ -392,8 +378,7 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
resid -
|
resid -
|
||||||
bp->b_bufsize,
|
bp->b_bufsize,
|
||||||
td,
|
td,
|
||||||
nbp,
|
nbp);
|
||||||
nsa);
|
|
||||||
if (nerror != 0) {
|
if (nerror != 0) {
|
||||||
pbrelvp(nbp);
|
pbrelvp(nbp);
|
||||||
relpbuf(nbp, &ffsrawbufcnt);
|
relpbuf(nbp, &ffsrawbufcnt);
|
||||||
@ -404,7 +389,7 @@ ffs_rawread_main(struct vnode *vp,
|
|||||||
break;
|
break;
|
||||||
} else if (resid > 0) { /* More to read, no readahead */
|
} else if (resid > 0) { /* More to read, no readahead */
|
||||||
error = ffs_rawread_readahead(vp, udata, offset,
|
error = ffs_rawread_readahead(vp, udata, offset,
|
||||||
resid, td, bp, sa);
|
resid, td, bp);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2094,7 +2094,7 @@ ffs_bufwrite(struct buf *bp)
|
|||||||
if (newbp == NULL)
|
if (newbp == NULL)
|
||||||
goto normal_write;
|
goto normal_write;
|
||||||
|
|
||||||
KASSERT((bp->b_flags & B_UNMAPPED) == 0, ("Unmapped cg"));
|
KASSERT(buf_mapped(bp), ("Unmapped cg"));
|
||||||
memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
|
memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
|
||||||
BO_LOCK(bp->b_bufobj);
|
BO_LOCK(bp->b_bufobj);
|
||||||
bp->b_vflags |= BV_BKGRDINPROG;
|
bp->b_vflags |= BV_BKGRDINPROG;
|
||||||
|
@ -581,7 +581,7 @@ ffs_read(ap)
|
|||||||
xfersize = size;
|
xfersize = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
error = vn_io_fault_uiomove((char *)bp->b_data +
|
error = vn_io_fault_uiomove((char *)bp->b_data +
|
||||||
blkoffset, (int)xfersize, uio);
|
blkoffset, (int)xfersize, uio);
|
||||||
} else {
|
} else {
|
||||||
@ -758,7 +758,7 @@ ffs_write(ap)
|
|||||||
if (size < xfersize)
|
if (size < xfersize)
|
||||||
xfersize = size;
|
xfersize = size;
|
||||||
|
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0) {
|
if (buf_mapped(bp)) {
|
||||||
error = vn_io_fault_uiomove((char *)bp->b_data +
|
error = vn_io_fault_uiomove((char *)bp->b_data +
|
||||||
blkoffset, (int)xfersize, uio);
|
blkoffset, (int)xfersize, uio);
|
||||||
} else {
|
} else {
|
||||||
|
@ -772,11 +772,8 @@ swp_pager_strategy(struct buf *bp)
|
|||||||
mtx_unlock(&sw_dev_mtx);
|
mtx_unlock(&sw_dev_mtx);
|
||||||
if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
|
if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
|
||||||
unmapped_buf_allowed) {
|
unmapped_buf_allowed) {
|
||||||
bp->b_kvaalloc = bp->b_data;
|
|
||||||
bp->b_data = unmapped_buf;
|
bp->b_data = unmapped_buf;
|
||||||
bp->b_kvabase = unmapped_buf;
|
|
||||||
bp->b_offset = 0;
|
bp->b_offset = 0;
|
||||||
bp->b_flags |= B_UNMAPPED;
|
|
||||||
} else {
|
} else {
|
||||||
pmap_qenter((vm_offset_t)bp->b_data,
|
pmap_qenter((vm_offset_t)bp->b_data,
|
||||||
&bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
|
&bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
|
||||||
@ -1496,12 +1493,10 @@ swp_pager_async_iodone(struct buf *bp)
|
|||||||
/*
|
/*
|
||||||
* remove the mapping for kernel virtual
|
* remove the mapping for kernel virtual
|
||||||
*/
|
*/
|
||||||
if ((bp->b_flags & B_UNMAPPED) != 0) {
|
if (buf_mapped(bp))
|
||||||
bp->b_data = bp->b_kvaalloc;
|
|
||||||
bp->b_kvabase = bp->b_kvaalloc;
|
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
|
||||||
} else
|
|
||||||
pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
|
pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
|
||||||
|
else
|
||||||
|
bp->b_data = bp->b_kvabase;
|
||||||
|
|
||||||
if (bp->b_npages) {
|
if (bp->b_npages) {
|
||||||
object = bp->b_pages[0]->object;
|
object = bp->b_pages[0]->object;
|
||||||
@ -2597,7 +2592,7 @@ swapgeom_strategy(struct buf *bp, struct swdevt *sp)
|
|||||||
bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
|
bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
|
||||||
bio->bio_length = bp->b_bcount;
|
bio->bio_length = bp->b_bcount;
|
||||||
bio->bio_done = swapgeom_done;
|
bio->bio_done = swapgeom_done;
|
||||||
if ((bp->b_flags & B_UNMAPPED) != 0) {
|
if (!buf_mapped(bp)) {
|
||||||
bio->bio_ma = bp->b_pages;
|
bio->bio_ma = bp->b_pages;
|
||||||
bio->bio_data = unmapped_buf;
|
bio->bio_data = unmapped_buf;
|
||||||
bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
|
bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
|
||||||
|
@ -400,12 +400,11 @@ initpbuf(struct buf *bp)
|
|||||||
bp->b_rcred = NOCRED;
|
bp->b_rcred = NOCRED;
|
||||||
bp->b_wcred = NOCRED;
|
bp->b_wcred = NOCRED;
|
||||||
bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
|
bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
|
||||||
bp->b_saveaddr = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
|
bp->b_kvabase = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
|
||||||
bp->b_data = bp->b_saveaddr;
|
bp->b_data = bp->b_kvabase;
|
||||||
bp->b_kvabase = bp->b_saveaddr;
|
|
||||||
bp->b_kvasize = MAXPHYS;
|
bp->b_kvasize = MAXPHYS;
|
||||||
bp->b_xflags = 0;
|
|
||||||
bp->b_flags = 0;
|
bp->b_flags = 0;
|
||||||
|
bp->b_xflags = 0;
|
||||||
bp->b_ioflags = 0;
|
bp->b_ioflags = 0;
|
||||||
bp->b_iodone = NULL;
|
bp->b_iodone = NULL;
|
||||||
bp->b_error = 0;
|
bp->b_error = 0;
|
||||||
|
@ -965,8 +965,6 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
|
|||||||
size = (size + secmask) & ~secmask;
|
size = (size + secmask) & ~secmask;
|
||||||
}
|
}
|
||||||
|
|
||||||
bp->b_kvaalloc = bp->b_data;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* and map the pages to be read into the kva, if the filesystem
|
* and map the pages to be read into the kva, if the filesystem
|
||||||
* requires mapped buffers.
|
* requires mapped buffers.
|
||||||
@ -974,11 +972,11 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
|
|||||||
if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
|
if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
|
||||||
unmapped_buf_allowed) {
|
unmapped_buf_allowed) {
|
||||||
bp->b_data = unmapped_buf;
|
bp->b_data = unmapped_buf;
|
||||||
bp->b_kvabase = unmapped_buf;
|
|
||||||
bp->b_offset = 0;
|
bp->b_offset = 0;
|
||||||
bp->b_flags |= B_UNMAPPED;
|
} else {
|
||||||
} else
|
bp->b_data = bp->b_kvabase;
|
||||||
pmap_qenter((vm_offset_t)bp->b_kvaalloc, m, count);
|
pmap_qenter((vm_offset_t)bp->b_data, m, count);
|
||||||
|
}
|
||||||
|
|
||||||
/* build a minimal buffer header */
|
/* build a minimal buffer header */
|
||||||
bp->b_iocmd = BIO_READ;
|
bp->b_iocmd = BIO_READ;
|
||||||
@ -1053,20 +1051,17 @@ vnode_pager_generic_getpages_done(struct buf *bp)
|
|||||||
object = bp->b_vp->v_object;
|
object = bp->b_vp->v_object;
|
||||||
|
|
||||||
if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
|
if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
|
||||||
if ((bp->b_flags & B_UNMAPPED) != 0) {
|
if (!buf_mapped(bp)) {
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
bp->b_data = bp->b_kvabase;
|
||||||
pmap_qenter((vm_offset_t)bp->b_kvaalloc, bp->b_pages,
|
pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages,
|
||||||
bp->b_npages);
|
bp->b_npages);
|
||||||
}
|
}
|
||||||
bzero(bp->b_kvaalloc + bp->b_bcount,
|
bzero(bp->b_data + bp->b_bcount,
|
||||||
PAGE_SIZE * bp->b_npages - bp->b_bcount);
|
PAGE_SIZE * bp->b_npages - bp->b_bcount);
|
||||||
}
|
}
|
||||||
if ((bp->b_flags & B_UNMAPPED) == 0)
|
if (buf_mapped(bp)) {
|
||||||
pmap_qremove((vm_offset_t)bp->b_kvaalloc, bp->b_npages);
|
pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
|
||||||
if ((bp->b_vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0) {
|
bp->b_data = unmapped_buf;
|
||||||
bp->b_data = bp->b_kvaalloc;
|
|
||||||
bp->b_kvabase = bp->b_kvaalloc;
|
|
||||||
bp->b_flags &= ~B_UNMAPPED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VM_OBJECT_WLOCK(object);
|
VM_OBJECT_WLOCK(object);
|
||||||
|
Loading…
Reference in New Issue
Block a user