In the VOP_PUTPAGES() implementations, change the default error from
VM_PAGER_AGAIN to VM_PAGER_ERROR for the uwritten pages. Return VM_PAGER_AGAIN for the partially written page. Always forward at least one page in the loop of vm_object_page_clean(). VM_PAGER_ERROR causes the page reactivation and does not clear the page dirty state, so the write is not lost. The change fixes an infinite loop in vm_object_page_clean() when the filesystem returns permanent errors for some page writes. Reported and tested by: gavin Reviewed by: alc, rmacklem MFC after: 1 week
This commit is contained in:
parent
48237774e4
commit
031ec8c10a
@ -302,7 +302,7 @@ ncl_putpages(struct vop_putpages_args *ap)
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; i++)
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
rtvals[i] = VM_PAGER_ERROR;
|
||||
|
||||
/*
|
||||
* When putting pages, do not extend file past EOF.
|
||||
@ -345,16 +345,9 @@ ncl_putpages(struct vop_putpages_args *ap)
|
||||
pmap_qremove(kva, npages);
|
||||
relpbuf(bp, &ncl_pbuf_freecnt);
|
||||
|
||||
if (!error) {
|
||||
int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
|
||||
for (i = 0; i < nwritten; i++) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
vm_page_undirty(pages[i]);
|
||||
}
|
||||
if (must_commit) {
|
||||
ncl_clearcommit(vp->v_mount);
|
||||
}
|
||||
}
|
||||
vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
|
||||
if (must_commit)
|
||||
ncl_clearcommit(vp->v_mount);
|
||||
return rtvals[0];
|
||||
}
|
||||
|
||||
|
@ -544,7 +544,7 @@ nwfs_putpages(ap)
|
||||
npages = btoc(count);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
rtvals[i] = VM_PAGER_ERROR;
|
||||
}
|
||||
|
||||
bp = getpbuf(&nwfs_pbuf_freecnt);
|
||||
@ -569,13 +569,8 @@ nwfs_putpages(ap)
|
||||
pmap_qremove(kva, npages);
|
||||
relpbuf(bp, &nwfs_pbuf_freecnt);
|
||||
|
||||
if (!error) {
|
||||
int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
|
||||
for (i = 0; i < nwritten; i++) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
vm_page_undirty(pages[i]);
|
||||
}
|
||||
}
|
||||
if (!error)
|
||||
vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
|
||||
return rtvals[0];
|
||||
#endif /* NWFS_RWCACHE */
|
||||
}
|
||||
|
@ -609,7 +609,7 @@ smbfs_putpages(ap)
|
||||
npages = btoc(count);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
rtvals[i] = VM_PAGER_ERROR;
|
||||
}
|
||||
|
||||
bp = getpbuf(&smbfs_pbuf_freecnt);
|
||||
@ -639,13 +639,8 @@ smbfs_putpages(ap)
|
||||
|
||||
relpbuf(bp, &smbfs_pbuf_freecnt);
|
||||
|
||||
if (!error) {
|
||||
int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
|
||||
for (i = 0; i < nwritten; i++) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
vm_page_undirty(pages[i]);
|
||||
}
|
||||
}
|
||||
if (!error)
|
||||
vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
|
||||
return rtvals[0];
|
||||
#endif /* SMBFS_RWGENERIC */
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ nfs_putpages(struct vop_putpages_args *ap)
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; i++)
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
rtvals[i] = VM_PAGER_ERROR;
|
||||
|
||||
/*
|
||||
* When putting pages, do not extend file past EOF.
|
||||
@ -344,11 +344,7 @@ nfs_putpages(struct vop_putpages_args *ap)
|
||||
relpbuf(bp, &nfs_pbuf_freecnt);
|
||||
|
||||
if (!error) {
|
||||
int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
|
||||
for (i = 0; i < nwritten; i++) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
vm_page_undirty(pages[i]);
|
||||
}
|
||||
vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
|
||||
if (must_commit) {
|
||||
nfs_clearcommit(vp->v_mount);
|
||||
}
|
||||
|
@ -852,6 +852,21 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
|
||||
flags, &clearobjflags);
|
||||
if (object->generation != curgeneration)
|
||||
goto rescan;
|
||||
|
||||
/*
|
||||
* If the VOP_PUTPAGES() did a truncated write, so
|
||||
* that even the first page of the run is not fully
|
||||
* written, vm_pageout_flush() returns 0 as the run
|
||||
* length. Since the condition that caused truncated
|
||||
* write may be permanent, e.g. exhausted free space,
|
||||
* accepting n == 0 would cause an infinite loop.
|
||||
*
|
||||
* Forwarding the iterator leaves the unwritten page
|
||||
* behind, but there is not much we can do there if
|
||||
* filesystem refuses to write it.
|
||||
*/
|
||||
if (n == 0)
|
||||
n = 1;
|
||||
np = vm_page_find_least(object, pi + n);
|
||||
}
|
||||
#if 0
|
||||
|
@ -1089,7 +1089,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
|
||||
count = bytecount / PAGE_SIZE;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
rtvals[i] = VM_PAGER_ERROR;
|
||||
|
||||
if ((int64_t)ma[0]->pindex < 0) {
|
||||
printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
|
||||
@ -1191,3 +1191,20 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
|
||||
}
|
||||
return rtvals[0];
|
||||
}
|
||||
|
||||
void
|
||||
vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
|
||||
{
|
||||
int i, pos;
|
||||
|
||||
for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
|
||||
if (pos < trunc_page(written)) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
vm_page_undirty(ma[i]);
|
||||
} else {
|
||||
/* Partially written page. */
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -49,5 +49,8 @@ int vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m,
|
||||
int vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *m,
|
||||
int count, boolean_t sync,
|
||||
int *rtvals);
|
||||
|
||||
void vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* _VNODE_PAGER_ */
|
||||
|
Loading…
Reference in New Issue
Block a user