zfs: add vop_getpages method implementation
This should make vnode_pager_getpages path a bit shorter and clearer. Also this should eliminate problems with partially valid pages. Having this method opens room for future optimizations. To do: try to satisfy other pages besides the required one taking into account tradeofs between number of page faults, read throughput and read latency. Also, eventually vop_putpages should be added too. Reviewed by: kib, mm, pjd MFC after: 3 weeks
This commit is contained in:
parent
12112cf676
commit
23a1bcf8c6
@ -4198,6 +4198,96 @@ ioflags(int ioflags)
|
||||
return (flags);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
{
|
||||
znode_t *zp = VTOZ(vp);
|
||||
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
|
||||
objset_t *os = zp->z_zfsvfs->z_os;
|
||||
vm_page_t mreq;
|
||||
vm_object_t object;
|
||||
caddr_t va;
|
||||
struct sf_buf *sf;
|
||||
int i, error;
|
||||
int pcount, size;
|
||||
|
||||
ZFS_ENTER(zfsvfs);
|
||||
ZFS_VERIFY_ZP(zp);
|
||||
|
||||
pcount = round_page(count) / PAGE_SIZE;
|
||||
mreq = m[reqpage];
|
||||
object = mreq->object;
|
||||
error = 0;
|
||||
|
||||
KASSERT(vp->v_object == object, ("mismatching object"));
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
|
||||
for (i = 0; i < pcount; i++) {
|
||||
if (i != reqpage) {
|
||||
vm_page_lock(m[i]);
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (mreq->valid) {
|
||||
if (mreq->valid != VM_PAGE_BITS_ALL)
|
||||
vm_page_zero_invalid(mreq, TRUE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
return (VM_PAGER_OK);
|
||||
}
|
||||
|
||||
PCPU_INC(cnt.v_vnodein);
|
||||
PCPU_INC(cnt.v_vnodepgsin);
|
||||
|
||||
if (IDX_TO_OFF(mreq->pindex) >= object->un_pager.vnp.vnp_size) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
return (VM_PAGER_BAD);
|
||||
}
|
||||
|
||||
size = PAGE_SIZE;
|
||||
if (IDX_TO_OFF(mreq->pindex) + size > object->un_pager.vnp.vnp_size)
|
||||
size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(mreq->pindex);
|
||||
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
|
||||
va = zfs_map_page(mreq, &sf);
|
||||
error = dmu_read(os, zp->z_id, IDX_TO_OFF(mreq->pindex),
|
||||
size, va, DMU_READ_PREFETCH);
|
||||
if (size != PAGE_SIZE)
|
||||
bzero(va + size, PAGE_SIZE - size);
|
||||
zfs_unmap_page(sf);
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
|
||||
if (!error)
|
||||
mreq->valid = VM_PAGE_BITS_ALL;
|
||||
KASSERT(mreq->dirty == 0, ("zfs_getpages: page %p is dirty", mreq));
|
||||
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
|
||||
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_freebsd_getpages(ap)
|
||||
struct vop_getpages_args /* {
|
||||
struct vnode *a_vp;
|
||||
vm_page_t *a_m;
|
||||
int a_count;
|
||||
int a_reqpage;
|
||||
vm_ooffset_t a_offset;
|
||||
} */ *ap;
|
||||
{
|
||||
|
||||
return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_reqpage));
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_freebsd_open(ap)
|
||||
struct vop_open_args /* {
|
||||
@ -5314,6 +5404,7 @@ struct vop_vector zfs_vnodeops = {
|
||||
.vop_getacl = zfs_freebsd_getacl,
|
||||
.vop_setacl = zfs_freebsd_setacl,
|
||||
.vop_aclcheck = zfs_freebsd_aclcheck,
|
||||
.vop_getpages = zfs_freebsd_getpages,
|
||||
};
|
||||
|
||||
struct vop_vector zfs_fifoops = {
|
||||
|
Loading…
Reference in New Issue
Block a user