Fix breakage caused by r292373 in ZFS/FUSE/NFS/SMBFS.

With the new VOP_GETPAGES() KPI the "count" argument counts pages already,
and doesn't need to be translated from bytes to pages.

While here make it consistent that *rbehind and *rahead are updated only
if we doesn't return error.

Pointy hat to:	glebius
This commit is contained in:
glebius 2015-12-16 23:48:50 +00:00
parent 66d1f9182b
commit 910a73cc44
4 changed files with 47 additions and 60 deletions
sys
cddl/contrib/opensolaris/uts/common/fs/zfs
fs

@ -5775,27 +5775,21 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int *rbehind,
off_t startoff, endoff; off_t startoff, endoff;
int i, error; int i, error;
vm_pindex_t reqstart, reqend; vm_pindex_t reqstart, reqend;
int pcount, lsize, reqsize, size; int lsize, reqsize, size;
if (rbehind) object = m[0]->object;
*rbehind = 0; error = 0;
if (rahead)
*rahead = 0;
ZFS_ENTER(zfsvfs); ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp); ZFS_VERIFY_ZP(zp);
pcount = OFF_TO_IDX(round_page(count));
zfs_vmobject_wlock(object); zfs_vmobject_wlock(object);
if (m[pcount - 1]->valid != 0 && --pcount == 0) { if (m[count - 1]->valid != 0 && --count == 0) {
zfs_vmobject_wunlock(object); zfs_vmobject_wunlock(object);
ZFS_EXIT(zfsvfs); goto out;
return (zfs_vm_pagerret_ok);
} }
object = m[0]->object; mlast = m[count - 1];
mlast = m[pcount - 1];
if (IDX_TO_OFF(mlast->pindex) >= if (IDX_TO_OFF(mlast->pindex) >=
object->un_pager.vnp.vnp_size) { object->un_pager.vnp.vnp_size) {
@ -5813,10 +5807,9 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int *rbehind,
IDX_TO_OFF(mlast->pindex); IDX_TO_OFF(mlast->pindex);
zfs_vmobject_wunlock(object); zfs_vmobject_wunlock(object);
error = 0; for (i = 0; i < count; i++) {
for (i = 0; i < pcount; i++) {
size = PAGE_SIZE; size = PAGE_SIZE;
if (i == pcount - 1) if (i == count - 1)
size = lsize; size = lsize;
va = zfs_map_page(m[i], &sf); va = zfs_map_page(m[i], &sf);
error = dmu_read(os, zp->z_id, IDX_TO_OFF(m[i]->pindex), error = dmu_read(os, zp->z_id, IDX_TO_OFF(m[i]->pindex),
@ -5829,14 +5822,21 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int *rbehind,
} }
zfs_vmobject_wlock(object); zfs_vmobject_wlock(object);
for (i = 0; i < pcount; i++) for (i = 0; i < count; i++)
m[i]->valid = VM_PAGE_BITS_ALL; m[i]->valid = VM_PAGE_BITS_ALL;
zfs_vmobject_wunlock(object); zfs_vmobject_wunlock(object);
out: out:
ZFS_ACCESSTIME_STAMP(zfsvfs, zp); ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (error ? zfs_vm_pagerret_error : zfs_vm_pagerret_ok); if (error == 0) {
if (rbehind)
*rbehind = 0;
if (rahead)
*rahead = 0;
return (zfs_vm_pagerret_ok);
} else
return (zfs_vm_pagerret_error);
} }
static int static int

@ -1752,17 +1752,12 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
td = curthread; /* XXX */ td = curthread; /* XXX */
cred = curthread->td_ucred; /* XXX */ cred = curthread->td_ucred; /* XXX */
pages = ap->a_m; pages = ap->a_m;
count = ap->a_count; npages = ap->a_count;
if (ap->a_rbehind)
*ap->a_rbehind = 0;
if (ap->a_rahead)
*ap->a_rahead = 0;
if (!fsess_opt_mmap(vnode_mount(vp))) { if (!fsess_opt_mmap(vnode_mount(vp))) {
FS_DEBUG("called on non-cacheable vnode??\n"); FS_DEBUG("called on non-cacheable vnode??\n");
return (VM_PAGER_ERROR); return (VM_PAGER_ERROR);
} }
npages = btoc(count);
/* /*
* If the last page is partially valid, just return it and allow * If the last page is partially valid, just return it and allow
@ -1773,13 +1768,8 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
* but still somewhat disconnected from the kernel? * but still somewhat disconnected from the kernel?
*/ */
VM_OBJECT_WLOCK(vp->v_object); VM_OBJECT_WLOCK(vp->v_object);
if (pages[npages - 1]->valid != 0) { if (pages[npages - 1]->valid != 0 && --npages == 0)
if (--npages == 0) { goto out;
VM_OBJECT_WUNLOCK(vp->v_object);
return (VM_PAGER_OK);
}
count = npages << PAGE_SHIFT;
}
VM_OBJECT_WUNLOCK(vp->v_object); VM_OBJECT_WUNLOCK(vp->v_object);
/* /*
@ -1793,6 +1783,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodein);
PCPU_ADD(cnt.v_vnodepgsin, npages); PCPU_ADD(cnt.v_vnodepgsin, npages);
count = npages << PAGE_SHIFT;
iov.iov_base = (caddr_t)kva; iov.iov_base = (caddr_t)kva;
iov.iov_len = count; iov.iov_len = count;
uio.uio_iov = &iov; uio.uio_iov = &iov;
@ -1852,8 +1843,13 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
} }
} }
fuse_vm_page_unlock_queues(); fuse_vm_page_unlock_queues();
out:
VM_OBJECT_WUNLOCK(vp->v_object); VM_OBJECT_WUNLOCK(vp->v_object);
return 0; if (ap->a_rbehind)
*ap->a_rbehind = 0;
if (ap->a_rahead)
*ap->a_rahead = 0;
return (VM_PAGER_OK);
} }
/* /*

@ -100,11 +100,7 @@ ncl_getpages(struct vop_getpages_args *ap)
cred = curthread->td_ucred; /* XXX */ cred = curthread->td_ucred; /* XXX */
nmp = VFSTONFS(vp->v_mount); nmp = VFSTONFS(vp->v_mount);
pages = ap->a_m; pages = ap->a_m;
count = ap->a_count; npages = ap->a_count;
if (ap->a_rbehind)
*ap->a_rbehind = 0;
if (ap->a_rahead)
*ap->a_rahead = 0;
if ((object = vp->v_object) == NULL) { if ((object = vp->v_object) == NULL) {
ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
@ -130,8 +126,6 @@ ncl_getpages(struct vop_getpages_args *ap)
} else } else
mtx_unlock(&nmp->nm_mtx); mtx_unlock(&nmp->nm_mtx);
npages = btoc(count);
/* /*
* If the requested page is partially valid, just return it and * If the requested page is partially valid, just return it and
* allow the pager to zero-out the blanks. Partially valid pages * allow the pager to zero-out the blanks. Partially valid pages
@ -140,13 +134,8 @@ ncl_getpages(struct vop_getpages_args *ap)
* XXXGL: is that true for NFS, where short read can occur??? * XXXGL: is that true for NFS, where short read can occur???
*/ */
VM_OBJECT_WLOCK(object); VM_OBJECT_WLOCK(object);
if (pages[npages - 1]->valid != 0) { if (pages[npages - 1]->valid != 0 && --npages == 0)
if (--npages == 0) { goto out;
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_OK);
}
count = npages << PAGE_SHIFT;
}
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
/* /*
@ -160,6 +149,7 @@ ncl_getpages(struct vop_getpages_args *ap)
PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodein);
PCPU_ADD(cnt.v_vnodepgsin, npages); PCPU_ADD(cnt.v_vnodepgsin, npages);
count = npages << PAGE_SHIFT;
iov.iov_base = (caddr_t) kva; iov.iov_base = (caddr_t) kva;
iov.iov_len = count; iov.iov_len = count;
uio.uio_iov = &iov; uio.uio_iov = &iov;
@ -221,8 +211,13 @@ ncl_getpages(struct vop_getpages_args *ap)
; ;
} }
} }
out:
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
return (0); if (ap->a_rbehind)
*ap->a_rbehind = 0;
if (ap->a_rahead)
*ap->a_rahead = 0;
return (VM_PAGER_OK);
} }
/* /*

@ -449,12 +449,7 @@ smbfs_getpages(ap)
np = VTOSMB(vp); np = VTOSMB(vp);
smp = VFSTOSMBFS(vp->v_mount); smp = VFSTOSMBFS(vp->v_mount);
pages = ap->a_m; pages = ap->a_m;
count = ap->a_count; npages = ap->a_count;
npages = btoc(count);
if (ap->a_rbehind)
*ap->a_rbehind = 0;
if (ap->a_rahead)
*ap->a_rahead = 0;
/* /*
* If the requested page is partially valid, just return it and * If the requested page is partially valid, just return it and
@ -464,13 +459,8 @@ smbfs_getpages(ap)
* XXXGL: is that true for SMB filesystem? * XXXGL: is that true for SMB filesystem?
*/ */
VM_OBJECT_WLOCK(object); VM_OBJECT_WLOCK(object);
if (pages[npages - 1]->valid != 0) { if (pages[npages - 1]->valid != 0 && --npages == 0)
if (--npages == 0) { goto out;
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_OK);
}
count = npages << PAGE_SHIFT;
}
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
scred = smbfs_malloc_scred(); scred = smbfs_malloc_scred();
@ -483,6 +473,7 @@ smbfs_getpages(ap)
PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodein);
PCPU_ADD(cnt.v_vnodepgsin, npages); PCPU_ADD(cnt.v_vnodepgsin, npages);
count = npages << PAGE_SHIFT;
iov.iov_base = (caddr_t) kva; iov.iov_base = (caddr_t) kva;
iov.iov_len = count; iov.iov_len = count;
uio.uio_iov = &iov; uio.uio_iov = &iov;
@ -536,8 +527,13 @@ smbfs_getpages(ap)
; ;
} }
} }
out:
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
return 0; if (ap->a_rbehind)
*ap->a_rbehind = 0;
if (ap->a_rahead)
*ap->a_rahead = 0;
return (VM_PAGER_OK);
#endif /* SMBFS_RWGENERIC */ #endif /* SMBFS_RWGENERIC */
} }