- Always try to write one whole page at a time.

- vm_page_undirty() is enough (instead of vm_page_set_validclean()), but it has
  to be called before we write the data in case someone makes page dirty after
  our write, but before our vm_page_undirty() call.
- Always dmu_write, not matter if uiomove() succeeded, because it could
  partially be ok and we would lose some changes.

All good ideas from:	ups
This commit is contained in:
Pawel Jakub Dawidek 2007-04-26 12:58:17 +00:00
parent caa93cc3a0
commit 171c4b21e2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=169057
2 changed files with 18 additions and 14 deletions

View File

@ -298,29 +298,31 @@ mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx)
VM_OBJECT_LOCK(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
uint64_t bytes = MIN(PAGESIZE - off, len);
uint64_t woff = uio->uio_loffset;
again:
if ((m = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
vm_page_is_valid(m, (vm_offset_t)off, bytes)) {
uint64_t woff, dmubytes;
caddr_t va;
if (vm_page_sleep_if_busy(m, FALSE, "zfsmwb"))
goto again;
woff = uio->uio_loffset;
dmubytes = MIN(PAGESIZE,
obj->un_pager.vnp.vnp_size - (woff - off));
vm_page_busy(m);
vm_page_lock_queues();
vm_page_undirty(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(obj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
va = (caddr_t)sf_buf_kva(sf);
error = uiomove(va+off, bytes, UIO_WRITE, uio);
if (error == 0)
dmu_write(os, zp->z_id, woff, bytes, va+off, tx);
error = uiomove(va + off, bytes, UIO_WRITE, uio);
dmu_write(os, zp->z_id, woff - off, dmubytes, va, tx);
sf_buf_free(sf);
sched_unpin();
VM_OBJECT_LOCK(obj);
vm_page_lock_queues();
vm_page_set_validclean(m, off, bytes);
vm_page_unlock_queues();
vm_page_wakeup(m);
} else {
VM_OBJECT_UNLOCK(obj);

View File

@ -298,29 +298,31 @@ mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx)
VM_OBJECT_LOCK(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
uint64_t bytes = MIN(PAGESIZE - off, len);
uint64_t woff = uio->uio_loffset;
again:
if ((m = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
vm_page_is_valid(m, (vm_offset_t)off, bytes)) {
uint64_t woff, dmubytes;
caddr_t va;
if (vm_page_sleep_if_busy(m, FALSE, "zfsmwb"))
goto again;
woff = uio->uio_loffset;
dmubytes = MIN(PAGESIZE,
obj->un_pager.vnp.vnp_size - (woff - off));
vm_page_busy(m);
vm_page_lock_queues();
vm_page_undirty(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(obj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
va = (caddr_t)sf_buf_kva(sf);
error = uiomove(va+off, bytes, UIO_WRITE, uio);
if (error == 0)
dmu_write(os, zp->z_id, woff, bytes, va+off, tx);
error = uiomove(va + off, bytes, UIO_WRITE, uio);
dmu_write(os, zp->z_id, woff - off, dmubytes, va, tx);
sf_buf_free(sf);
sched_unpin();
VM_OBJECT_LOCK(obj);
vm_page_lock_queues();
vm_page_set_validclean(m, off, bytes);
vm_page_unlock_queues();
vm_page_wakeup(m);
} else {
VM_OBJECT_UNLOCK(obj);