Provide a lock free alternative to resolve bogus pages. This is not likely
to be much of a perf win, just a nice code simplification. Reviewed by: markj, kib Differential Revision: https://reviews.freebsd.org/D23866
This commit is contained in:
parent
d5151ea87a
commit
6be21eb778
@ -350,7 +350,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
{
|
||||
vm_page_t *pa = sfio->pa;
|
||||
int grabbed;
|
||||
bool locked;
|
||||
|
||||
*nios = 0;
|
||||
flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
|
||||
@ -359,8 +358,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
* First grab all the pages and wire them. Note that we grab
|
||||
* only required pages. Readahead pages are dealt with later.
|
||||
*/
|
||||
locked = false;
|
||||
|
||||
grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off),
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
|
||||
if (grabbed < npages) {
|
||||
@ -381,10 +378,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (!locked) {
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
locked = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Next page is invalid. Check if it belongs to pager. It
|
||||
@ -396,8 +389,10 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
* stored in 'a', about how many pages we can pagein after
|
||||
* this page in a single I/O.
|
||||
*/
|
||||
VM_OBJECT_RLOCK(obj);
|
||||
if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
|
||||
&a)) {
|
||||
VM_OBJECT_RUNLOCK(obj);
|
||||
pmap_zero_page(pa[i]);
|
||||
vm_page_valid(pa[i]);
|
||||
MPASS(pa[i]->dirty == 0);
|
||||
@ -405,6 +400,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
VM_OBJECT_RUNLOCK(obj);
|
||||
|
||||
/*
|
||||
* We want to pagein as many pages as possible, limited only
|
||||
@ -435,11 +431,9 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
}
|
||||
|
||||
refcount_acquire(&sfio->nios);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
|
||||
i + count == npages ? &rhpages : NULL,
|
||||
&sendfile_iodone, sfio);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if (__predict_false(rv != VM_PAGER_OK)) {
|
||||
/*
|
||||
* Perform full pages recovery before returning EIO.
|
||||
@ -451,7 +445,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
for (j = 0; j < npages; j++) {
|
||||
if (j > i && j < i + count - 1 &&
|
||||
pa[j] == bogus_page)
|
||||
pa[j] = vm_page_lookup(obj,
|
||||
pa[j] = vm_page_relookup(obj,
|
||||
OFF_TO_IDX(vmoff(j, off)));
|
||||
else if (j >= i)
|
||||
vm_page_xunbusy(pa[j]);
|
||||
@ -460,7 +454,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
__func__, pa, j));
|
||||
vm_page_unwire(pa[j], PQ_INACTIVE);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (EIO);
|
||||
}
|
||||
|
||||
@ -475,7 +468,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
*/
|
||||
for (j = i + 1; j < i + count - 1; j++)
|
||||
if (pa[j] == bogus_page) {
|
||||
pa[j] = vm_page_lookup(obj,
|
||||
pa[j] = vm_page_relookup(obj,
|
||||
OFF_TO_IDX(vmoff(j, off)));
|
||||
KASSERT(pa[j], ("%s: page %p[%d] disappeared",
|
||||
__func__, pa, j));
|
||||
@ -485,9 +478,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
|
||||
(*nios)++;
|
||||
}
|
||||
|
||||
if (locked)
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
if (*nios == 0 && npages != 0)
|
||||
SFSTAT_INC(sf_noiocnt);
|
||||
|
||||
|
@ -2878,11 +2878,8 @@ vfs_vmio_iodone(struct buf *bp)
|
||||
*/
|
||||
m = bp->b_pages[i];
|
||||
if (m == bogus_page) {
|
||||
if (bogus == false) {
|
||||
bogus = true;
|
||||
VM_OBJECT_RLOCK(obj);
|
||||
}
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(foff));
|
||||
bogus = true;
|
||||
m = vm_page_relookup(obj, OFF_TO_IDX(foff));
|
||||
if (m == NULL)
|
||||
panic("biodone: page disappeared!");
|
||||
bp->b_pages[i] = m;
|
||||
@ -2905,8 +2902,6 @@ vfs_vmio_iodone(struct buf *bp)
|
||||
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
|
||||
iosize -= resid;
|
||||
}
|
||||
if (bogus)
|
||||
VM_OBJECT_RUNLOCK(obj);
|
||||
vm_object_pip_wakeupn(obj, bp->b_npages);
|
||||
if (bogus && buf_mapped(bp)) {
|
||||
BUF_CHECK_MAPPED(bp);
|
||||
@ -4470,22 +4465,16 @@ vfs_unbusy_pages(struct buf *bp)
|
||||
int i;
|
||||
vm_object_t obj;
|
||||
vm_page_t m;
|
||||
bool bogus;
|
||||
|
||||
runningbufwakeup(bp);
|
||||
if (!(bp->b_flags & B_VMIO))
|
||||
return;
|
||||
|
||||
obj = bp->b_bufobj->bo_object;
|
||||
bogus = false;
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
m = bp->b_pages[i];
|
||||
if (m == bogus_page) {
|
||||
if (bogus == false) {
|
||||
bogus = true;
|
||||
VM_OBJECT_RLOCK(obj);
|
||||
}
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
|
||||
m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
|
||||
if (!m)
|
||||
panic("vfs_unbusy_pages: page missing\n");
|
||||
bp->b_pages[i] = m;
|
||||
@ -4498,8 +4487,6 @@ vfs_unbusy_pages(struct buf *bp)
|
||||
}
|
||||
vm_page_sunbusy(m);
|
||||
}
|
||||
if (bogus)
|
||||
VM_OBJECT_RUNLOCK(obj);
|
||||
vm_object_pip_wakeupn(obj, bp->b_npages);
|
||||
}
|
||||
|
||||
|
@ -1670,6 +1670,24 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
return (vm_radix_lookup(&object->rtree, pindex));
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_relookup:
|
||||
*
|
||||
* Returns a page that must already have been busied by
|
||||
* the caller. Used for bogus page replacement.
|
||||
*/
|
||||
vm_page_t
|
||||
vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
m = vm_radix_lookup_unlocked(&object->rtree, pindex);
|
||||
KASSERT(m != NULL && vm_page_busied(m) &&
|
||||
m->object == object && m->pindex == pindex,
|
||||
("vm_page_relookup: Invalid page %p", m));
|
||||
return (m);
|
||||
}
|
||||
|
||||
/*
|
||||
* This should only be used by lockless functions for releasing transient
|
||||
* incorrect acquires. The page may have been freed after we acquired a
|
||||
|
@ -653,6 +653,7 @@ void vm_page_reference(vm_page_t m);
|
||||
#define VPR_NOREUSE 0x02
|
||||
void vm_page_release(vm_page_t m, int flags);
|
||||
void vm_page_release_locked(vm_page_t m, int flags);
|
||||
vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
|
||||
bool vm_page_remove(vm_page_t);
|
||||
bool vm_page_remove_xbusy(vm_page_t);
|
||||
int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
|
||||
|
Loading…
Reference in New Issue
Block a user