Use unlocked grab for uipc_shm/tmpfs.

Reviewed by:	markj
Differential Revision:	https://reviews.freebsd.org/D23865
This commit is contained in:
Jeff Roberson 2020-02-28 20:33:28 +00:00
parent 3f39f80ab3
commit f72eaaeb03

View File

@ -176,23 +176,25 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
offset = uio->uio_offset & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
VM_OBJECT_WLOCK(obj);
rv = vm_page_grab_valid_unlocked(&m, obj, idx,
VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
if (rv == VM_PAGER_OK)
goto found;
/*
* Read I/O without either a corresponding resident page or swap
* page: use zero_region. This is intended to avoid instantiating
* pages on read from a sparse region.
*/
if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
VM_OBJECT_WLOCK(obj);
m = vm_page_lookup(obj, idx);
if (uio->uio_rw == UIO_READ && m == NULL &&
!vm_pager_has_page(obj, idx, NULL, NULL)) {
VM_OBJECT_WUNLOCK(obj);
return (uiomove(__DECONST(void *, zero_region), tlen, uio));
}
/*
* Parallel reads of the page content from disk are prevented
* by exclusive busy.
*
* Although the tmpfs vnode lock is held here, it is
* nonetheless safe to sleep waiting for a free page. The
* pageout daemon does not need to acquire the tmpfs vnode
@ -208,6 +210,8 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
return (EIO);
}
VM_OBJECT_WUNLOCK(obj);
found:
error = uiomove_fromphys(&m, offset, tlen, uio);
if (uio->uio_rw == UIO_WRITE && error == 0)
vm_page_set_dirty(m);