Eliminate object locking in zfs where possible with the new lockless grab

APIs.

Reviewed by:	kib, markj, mmacy
Differential Revision:	https://reviews.freebsd.org/D23848
This commit is contained in:
Jeff Roberson 2020-02-28 20:29:53 +00:00
parent cd1da6ff8b
commit 9defe1c076
2 changed files with 19 additions and 33 deletions

View File

@ -1738,11 +1738,10 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
#endif #endif
vmobj = ma[0]->object; vmobj = ma[0]->object;
zfs_vmobject_wlock(vmobj);
db = dbp[0]; db = dbp[0];
for (i = 0; i < *rbehind; i++) { for (i = 0; i < *rbehind; i++) {
m = vm_page_grab(vmobj, ma[0]->pindex - 1 - i, m = vm_page_grab_unlocked(vmobj, ma[0]->pindex - 1 - i,
VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT |
VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
if (m == NULL) if (m == NULL)
@ -1857,7 +1856,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
} }
for (i = 0; i < *rahead; i++) { for (i = 0; i < *rahead; i++) {
m = vm_page_grab(vmobj, ma[count - 1]->pindex + 1 + i, m = vm_page_grab_unlocked(vmobj, ma[count - 1]->pindex + 1 + i,
VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT |
VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
if (m == NULL) if (m == NULL)
@ -1889,7 +1888,6 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m); vm_page_sunbusy(m);
} }
*rahead = i; *rahead = i;
zfs_vmobject_wunlock(vmobj);
dmu_buf_rele_array(dbp, numbufs, FTAG); dmu_buf_rele_array(dbp, numbufs, FTAG);
return (0); return (0);

View File

@ -410,10 +410,10 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
nbytes = end - off; nbytes = end - off;
obj = vp->v_object; obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj);
vm_page_grab_valid(&pp, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT | vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY); VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
VM_ALLOC_IGN_SBUSY);
if (pp != NULL) { if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL); ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1); vm_object_pip_add(obj, 1);
@ -439,10 +439,9 @@ page_wire(vnode_t *vp, int64_t start)
vm_page_t m; vm_page_t m;
obj = vp->v_object; obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj); vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
vm_page_grab_valid(&m, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT | VM_ALLOC_NOBUSY);
VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY);
return (m); return (m);
} }
@ -475,28 +474,22 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
ASSERT(obj != NULL); ASSERT(obj != NULL);
off = start & PAGEOFFSET; off = start & PAGEOFFSET;
zfs_vmobject_wlock(obj);
vm_object_pip_add(obj, 1); vm_object_pip_add(obj, 1);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) { for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp; vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len); int nbytes = imin(PAGESIZE - off, len);
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) { if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf); va = zfs_map_page(pp, &sf);
(void) dmu_read(os, oid, start+off, nbytes, (void) dmu_read(os, oid, start+off, nbytes,
va+off, DMU_READ_PREFETCH);; va+off, DMU_READ_PREFETCH);;
zfs_unmap_page(sf); zfs_unmap_page(sf);
zfs_vmobject_wlock(obj);
page_unbusy(pp); page_unbusy(pp);
} }
len -= nbytes; len -= nbytes;
off = 0; off = 0;
} }
vm_object_pip_wakeup(obj); vm_object_pip_wakeup(obj);
zfs_vmobject_wunlock(obj);
} }
/* /*
@ -528,29 +521,31 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
ASSERT(obj != NULL); ASSERT(obj != NULL);
ASSERT((uio->uio_loffset & PAGEOFFSET) == 0); ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
zfs_vmobject_wlock(obj);
for (start = uio->uio_loffset; len > 0; start += PAGESIZE) { for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len); int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY | pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY); VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
if (vm_page_none_valid(pp)) { if (vm_page_none_valid(pp)) {
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf); va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va, error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH); DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0) if (bytes != PAGESIZE && error == 0)
bzero(va + bytes, PAGESIZE - bytes); bzero(va + bytes, PAGESIZE - bytes);
zfs_unmap_page(sf); zfs_unmap_page(sf);
zfs_vmobject_wlock(obj);
if (error == 0) { if (error == 0) {
vm_page_valid(pp); vm_page_valid(pp);
vm_page_activate(pp); vm_page_activate(pp);
vm_page_sunbusy(pp);
} else {
zfs_vmobject_wlock(obj);
if (!vm_page_wired(pp) && pp->valid == 0 &&
vm_page_busy_tryupgrade(pp))
vm_page_free(pp);
else
vm_page_sunbusy(pp);
zfs_vmobject_wunlock(obj);
} }
vm_page_sunbusy(pp);
if (error != 0 && !vm_page_wired(pp) &&
pp->valid == 0 && vm_page_tryxbusy(pp))
vm_page_free(pp);
} else { } else {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL); ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_sunbusy(pp); vm_page_sunbusy(pp);
@ -561,7 +556,6 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
uio->uio_offset += bytes; uio->uio_offset += bytes;
len -= bytes; len -= bytes;
} }
zfs_vmobject_wunlock(obj);
return (error); return (error);
} }
@ -592,7 +586,6 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
start = uio->uio_loffset; start = uio->uio_loffset;
off = start & PAGEOFFSET; off = start & PAGEOFFSET;
zfs_vmobject_wlock(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) { for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp; vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len); uint64_t bytes = MIN(PAGESIZE - off, len);
@ -601,7 +594,6 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
struct sf_buf *sf; struct sf_buf *sf;
caddr_t va; caddr_t va;
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf); va = zfs_map_page(pp, &sf);
#ifdef illumos #ifdef illumos
error = uiomove(va + off, bytes, UIO_READ, uio); error = uiomove(va + off, bytes, UIO_READ, uio);
@ -609,20 +601,16 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
error = vn_io_fault_uiomove(va + off, bytes, uio); error = vn_io_fault_uiomove(va + off, bytes, uio);
#endif #endif
zfs_unmap_page(sf); zfs_unmap_page(sf);
zfs_vmobject_wlock(obj);
page_unwire(pp); page_unwire(pp);
} else { } else {
zfs_vmobject_wunlock(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes); uio, bytes);
zfs_vmobject_wlock(obj);
} }
len -= bytes; len -= bytes;
off = 0; off = 0;
if (error) if (error)
break; break;
} }
zfs_vmobject_wunlock(obj);
return (error); return (error);
} }