zfs: add zfs_freebsd_putpages
this should be more optimal than writing pages one-by-one via zfs_write -> update_pages in the case of multi-page putpages call MFC after: 16 days
This commit is contained in:
parent
6c5b7fffce
commit
f77ffe1b22
@ -40,6 +40,8 @@ __FBSDID("$FreeBSD$");
|
||||
const int zfs_vm_pagerret_bad = VM_PAGER_BAD;
|
||||
const int zfs_vm_pagerret_error = VM_PAGER_ERROR;
|
||||
const int zfs_vm_pagerret_ok = VM_PAGER_OK;
|
||||
const int zfs_vm_pagerput_sync = VM_PAGER_PUT_SYNC;
|
||||
const int zfs_vm_pagerput_inval = VM_PAGER_PUT_INVAL;
|
||||
|
||||
void
|
||||
zfs_vmobject_assert_wlocked(vm_object_t object)
|
||||
|
@ -36,6 +36,8 @@
|
||||
extern const int zfs_vm_pagerret_bad;
|
||||
extern const int zfs_vm_pagerret_error;
|
||||
extern const int zfs_vm_pagerret_ok;
|
||||
extern const int zfs_vm_pagerput_sync;
|
||||
extern const int zfs_vm_pagerput_inval;
|
||||
|
||||
void zfs_vmobject_assert_wlocked(vm_object_t object);
|
||||
void zfs_vmobject_wlock(vm_object_t object);
|
||||
|
@ -467,6 +467,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
caddr_t va;
|
||||
int off;
|
||||
|
||||
ASSERT(segflg != UIO_NOCOPY);
|
||||
ASSERT(vp->v_mount != NULL);
|
||||
obj = vp->v_object;
|
||||
ASSERT(obj != NULL);
|
||||
@ -477,27 +478,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
vm_page_t pp;
|
||||
int nbytes = imin(PAGESIZE - off, len);
|
||||
|
||||
if (segflg == UIO_NOCOPY) {
|
||||
pp = vm_page_lookup(obj, OFF_TO_IDX(start));
|
||||
KASSERT(pp != NULL,
|
||||
("zfs update_pages: NULL page in putpages case"));
|
||||
KASSERT(off == 0,
|
||||
("zfs update_pages: unaligned data in putpages case"));
|
||||
KASSERT(pp->valid == VM_PAGE_BITS_ALL,
|
||||
("zfs update_pages: invalid page in putpages case"));
|
||||
KASSERT(vm_page_sbusied(pp),
|
||||
("zfs update_pages: unbusy page in putpages case"));
|
||||
KASSERT(!pmap_page_is_write_mapped(pp),
|
||||
("zfs update_pages: writable page in putpages case"));
|
||||
zfs_vmobject_wunlock(obj);
|
||||
|
||||
va = zfs_map_page(pp, &sf);
|
||||
(void) dmu_write(os, oid, start, nbytes, va, tx);
|
||||
zfs_unmap_page(sf);
|
||||
|
||||
zfs_vmobject_wlock(obj);
|
||||
vm_page_undirty(pp);
|
||||
} else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
|
||||
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
|
||||
zfs_vmobject_wunlock(obj);
|
||||
|
||||
va = zfs_map_page(pp, &sf);
|
||||
@ -511,8 +492,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
len -= nbytes;
|
||||
off = 0;
|
||||
}
|
||||
if (segflg != UIO_NOCOPY)
|
||||
vm_object_pip_wakeupn(obj, 0);
|
||||
vm_object_pip_wakeupn(obj, 0);
|
||||
zfs_vmobject_wunlock(obj);
|
||||
}
|
||||
|
||||
@ -5803,6 +5783,168 @@ zfs_freebsd_getpages(ap)
|
||||
return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_reqpage));
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
|
||||
int *rtvals)
|
||||
{
|
||||
znode_t *zp = VTOZ(vp);
|
||||
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
|
||||
rl_t *rl;
|
||||
dmu_tx_t *tx;
|
||||
struct sf_buf *sf;
|
||||
vm_object_t object;
|
||||
vm_page_t m;
|
||||
caddr_t va;
|
||||
size_t tocopy;
|
||||
size_t lo_len;
|
||||
vm_ooffset_t lo_off;
|
||||
vm_ooffset_t off;
|
||||
uint_t blksz;
|
||||
int ncount;
|
||||
int pcount;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
ZFS_ENTER(zfsvfs);
|
||||
ZFS_VERIFY_ZP(zp);
|
||||
|
||||
object = vp->v_object;
|
||||
pcount = btoc(len);
|
||||
ncount = pcount;
|
||||
|
||||
KASSERT(ma[0]->object == object, ("mismatching object"));
|
||||
KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
|
||||
|
||||
for (i = 0; i < pcount; i++)
|
||||
rtvals[i] = zfs_vm_pagerret_error;
|
||||
|
||||
off = IDX_TO_OFF(ma[0]->pindex);
|
||||
blksz = zp->z_blksz;
|
||||
lo_off = rounddown(off, blksz);
|
||||
lo_len = roundup(len + (off - lo_off), blksz);
|
||||
rl = zfs_range_lock(zp, lo_off, lo_len, RL_WRITER);
|
||||
|
||||
zfs_vmobject_wlock(object);
|
||||
if (len + off > object->un_pager.vnp.vnp_size) {
|
||||
if (object->un_pager.vnp.vnp_size > off) {
|
||||
int pgoff;
|
||||
|
||||
len = object->un_pager.vnp.vnp_size - off;
|
||||
ncount = btoc(len);
|
||||
if ((pgoff = (int)len & PAGE_MASK) != 0) {
|
||||
/*
|
||||
* If the object is locked and the following
|
||||
* conditions hold, then the page's dirty
|
||||
* field cannot be concurrently changed by a
|
||||
* pmap operation.
|
||||
*/
|
||||
m = ma[ncount - 1];
|
||||
vm_page_assert_sbusied(m);
|
||||
KASSERT(!pmap_page_is_write_mapped(m),
|
||||
("zfs_putpages: page %p is not read-only", m));
|
||||
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
|
||||
pgoff);
|
||||
}
|
||||
} else {
|
||||
len = 0;
|
||||
ncount = 0;
|
||||
}
|
||||
if (ncount < pcount) {
|
||||
for (i = ncount; i < pcount; i++) {
|
||||
rtvals[i] = zfs_vm_pagerret_bad;
|
||||
}
|
||||
}
|
||||
}
|
||||
zfs_vmobject_wunlock(object);
|
||||
|
||||
if (ncount == 0)
|
||||
goto out;
|
||||
|
||||
if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
|
||||
zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
top:
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_write(tx, zp->z_id, off, len);
|
||||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
err = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
if (err != 0) {
|
||||
if (err == ERESTART) {
|
||||
dmu_tx_wait(tx);
|
||||
dmu_tx_abort(tx);
|
||||
goto top;
|
||||
}
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (zp->z_blksz < PAGE_SIZE) {
|
||||
i = 0;
|
||||
for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
|
||||
tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
|
||||
va = zfs_map_page(ma[i], &sf);
|
||||
dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
|
||||
zfs_unmap_page(sf);
|
||||
}
|
||||
} else {
|
||||
err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
|
||||
}
|
||||
|
||||
if (err == 0) {
|
||||
uint64_t mtime[2], ctime[2];
|
||||
sa_bulk_attr_t bulk[3];
|
||||
int count = 0;
|
||||
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
|
||||
&mtime, 16);
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
|
||||
&ctime, 16);
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
|
||||
&zp->z_pflags, 8);
|
||||
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
|
||||
B_TRUE);
|
||||
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
|
||||
|
||||
zfs_vmobject_wlock(object);
|
||||
for (i = 0; i < ncount; i++) {
|
||||
rtvals[i] = zfs_vm_pagerret_ok;
|
||||
vm_page_undirty(ma[i]);
|
||||
}
|
||||
zfs_vmobject_wunlock(object);
|
||||
PCPU_INC(cnt.v_vnodeout);
|
||||
PCPU_ADD(cnt.v_vnodepgsout, ncount);
|
||||
}
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
out:
|
||||
zfs_range_unlock(rl);
|
||||
if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
|
||||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
||||
zil_commit(zfsvfs->z_log, zp->z_id);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
return (rtvals[0]);
|
||||
}
|
||||
|
||||
int
|
||||
zfs_freebsd_putpages(ap)
|
||||
struct vop_putpages_args /* {
|
||||
struct vnode *a_vp;
|
||||
vm_page_t *a_m;
|
||||
int a_count;
|
||||
int a_sync;
|
||||
int *a_rtvals;
|
||||
vm_ooffset_t a_offset;
|
||||
} */ *ap;
|
||||
{
|
||||
|
||||
return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
|
||||
ap->a_rtvals));
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_freebsd_bmap(ap)
|
||||
struct vop_bmap_args /* {
|
||||
@ -6949,6 +7091,7 @@ struct vop_vector zfs_vnodeops = {
|
||||
.vop_setacl = zfs_freebsd_setacl,
|
||||
.vop_aclcheck = zfs_freebsd_aclcheck,
|
||||
.vop_getpages = zfs_freebsd_getpages,
|
||||
.vop_putpages = zfs_freebsd_putpages,
|
||||
};
|
||||
|
||||
struct vop_vector zfs_fifoops = {
|
||||
|
Loading…
Reference in New Issue
Block a user