Extract calculation of ioflags from the vm_pager_putpages flags into a

helper.

Reviewed by:	markj
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
X-Differential revision:	https://reviews.freebsd.org/D10241
This commit is contained in:
Konstantin Belousov 2017-04-05 16:56:04 +00:00
parent 3dbb0ca646
commit 65b9599a76
2 changed files with 28 additions and 19 deletions

View File

@ -1198,7 +1198,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
vm_ooffset_t poffset;
struct uio auio;
struct iovec aiov;
int count, error, i, ioflags, maxsize, ncount, ppscheck;
int count, error, i, maxsize, ncount, ppscheck;
static struct timeval lastfail;
static int curfail;
@ -1265,22 +1265,6 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
}
VM_OBJECT_WUNLOCK(object);
/*
* pageouts are already clustered, use IO_ASYNC to force a bawrite()
* rather then a bdwrite() to prevent paging I/O from saturating
* the buffer cache. Dummy-up the sequential heuristic to cause
* large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set,
* the system decides how to cluster.
*/
ioflags = IO_VMIO;
if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
ioflags |= IO_SYNC;
else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
ioflags |= IO_ASYNC;
ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
ioflags |= (flags & VM_PAGER_PUT_NOREUSE) ? IO_NOREUSE : 0;
ioflags |= IO_SEQMAX << IO_SEQSHIFT;
aiov.iov_base = (caddr_t) 0;
aiov.iov_len = maxsize;
auio.uio_iov = &aiov;
@ -1290,7 +1274,8 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
auio.uio_rw = UIO_WRITE;
auio.uio_resid = maxsize;
auio.uio_td = (struct thread *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
error = VOP_WRITE(vp, &auio, vnode_pager_putpages_ioflags(flags),
curthread->td_ucred);
PCPU_INC(cnt.v_vnodeout);
PCPU_ADD(cnt.v_vnodepgsout, ncount);
@ -1310,6 +1295,30 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
return rtvals[0];
}
int
vnode_pager_putpages_ioflags(int pager_flags)
{
int ioflags;
/*
* Pageouts are already clustered, use IO_ASYNC to force a
* bawrite() rather then a bdwrite() to prevent paging I/O
* from saturating the buffer cache. Dummy-up the sequential
* heuristic to cause large ranges to cluster. If neither
* IO_SYNC or IO_ASYNC is set, the system decides how to
* cluster.
*/
ioflags = IO_VMIO;
if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0)
ioflags |= IO_SYNC;
else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0)
ioflags |= IO_ASYNC;
ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0;
ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0;
ioflags |= IO_SEQMAX << IO_SEQSHIFT;
return (ioflags);
}
void
vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
{

View File

@ -47,7 +47,7 @@ int vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *m,
int count, int flags, int *rtvals);
int vnode_pager_local_getpages(struct vop_getpages_args *ap);
int vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap);
int vnode_pager_putpages_ioflags(int pager_flags);
void vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
vm_offset_t end);
void vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written);