From 7aaf252c964f1cecd008adaf07ea0b8659805721 Mon Sep 17 00:00:00 2001 From: Jeff Roberson Date: Fri, 28 Feb 2020 20:34:30 +0000 Subject: [PATCH] Convert a few triviail consumers to the new unlocked grab API. Reviewed by: kib, markj Differential Revision: https://reviews.freebsd.org/D23847 --- sys/dev/drm2/ttm/ttm_tt.c | 9 ++++----- sys/dev/md/md.c | 4 +--- sys/kern/kern_exec.c | 10 ++++++---- sys/kern/kern_sendfile.c | 12 +++++++++--- sys/kern/vfs_bio.c | 13 ++++++------- sys/vm/vm_glue.c | 4 +--- 6 files changed, 27 insertions(+), 25 deletions(-) diff --git a/sys/dev/drm2/ttm/ttm_tt.c b/sys/dev/drm2/ttm/ttm_tt.c index 7b2a2d03e678..1fc162d360cd 100644 --- a/sys/dev/drm2/ttm/ttm_tt.c +++ b/sys/dev/drm2/ttm/ttm_tt.c @@ -285,24 +285,24 @@ int ttm_tt_swapin(struct ttm_tt *ttm) obj = ttm->swap_storage; - VM_OBJECT_WLOCK(obj); vm_object_pip_add(obj, 1); for (i = 0; i < ttm->num_pages; ++i) { - rv = vm_page_grab_valid(&from_page, obj, i, - VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); + rv = vm_page_grab_valid_unlocked(&from_page, obj, i, + VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); if (rv != VM_PAGER_OK) { ret = -EIO; goto err_ret; } to_page = ttm->pages[i]; if (unlikely(to_page == NULL)) { + vm_page_sunbusy(from_page); ret = -ENOMEM; goto err_ret; } pmap_copy_page(from_page, to_page); + vm_page_sunbusy(from_page); } vm_object_pip_wakeup(obj); - VM_OBJECT_WUNLOCK(obj); if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) vm_object_deallocate(obj); @@ -312,7 +312,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm) err_ret: vm_object_pip_wakeup(obj); - VM_OBJECT_WUNLOCK(obj); return (ret); } diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 4a07591ed51d..3c8f8de90e16 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -1060,9 +1060,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp) vm_object_pip_add(sc->object, 1); for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; - VM_OBJECT_WLOCK(sc->object); - m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); - VM_OBJECT_WUNLOCK(sc->object); + m = vm_page_grab_unlocked(sc->object, i, VM_ALLOC_SYSTEM); if (bp->bio_cmd == BIO_READ) { if (vm_page_all_valid(m)) rv = VM_PAGER_OK; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 0dedcf45926d..0ade46649bd0 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -984,14 +984,16 @@ exec_map_first_page(struct image_params *imgp) object = imgp->vp->v_object; if (object == NULL) return (EACCES); - VM_OBJECT_WLOCK(object); #if VM_NRESERVLEVEL > 0 - vm_object_color(object, 0); + if ((object->flags & OBJ_COLORED) == 0) { + VM_OBJECT_WLOCK(object); + vm_object_color(object, 0); + VM_OBJECT_WUNLOCK(object); + } #endif - error = vm_page_grab_valid(&m, object, 0, + error = vm_page_grab_valid_unlocked(&m, object, 0, VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) | VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); - VM_OBJECT_WUNLOCK(object); if (error != VM_PAGER_OK) return (EIO); diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c index ac981a0ec03b..cb604cc55164 100644 --- a/sys/kern/kern_sendfile.c +++ b/sys/kern/kern_sendfile.c @@ -350,6 +350,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off, { vm_page_t *pa = sfio->pa; int grabbed; + bool locked; *nios = 0; flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0; @@ -358,9 +359,9 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off, * First grab all the pages and wire them. Note that we grab * only required pages. Readahead pages are dealt with later. */ - VM_OBJECT_WLOCK(obj); + locked = false; - grabbed = vm_page_grab_pages(obj, OFF_TO_IDX(off), + grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off), VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages); if (grabbed < npages) { for (int i = grabbed; i < npages; i++) @@ -380,6 +381,10 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off, i++; continue; } + if (!locked) { + VM_OBJECT_WLOCK(obj); + locked = true; + } /* * Next page is invalid. Check if it belongs to pager. It @@ -480,7 +485,8 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off, (*nios)++; } - VM_OBJECT_WUNLOCK(obj); + if (locked) + VM_OBJECT_WUNLOCK(obj); if (*nios == 0 && npages != 0) SFSTAT_INC(sf_noiocnt); diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index a4da72d208fe..aeff2b5b259f 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -3046,13 +3046,11 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int size) * deadlocks once allocbuf() is called after * pages are vfs_busy_pages(). */ - VM_OBJECT_WLOCK(obj); - (void)vm_page_grab_pages(obj, + (void)vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages, VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED, &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages); - VM_OBJECT_WUNLOCK(obj); bp->b_npages = desiredpages; } @@ -5237,11 +5235,13 @@ next_page:; } end_pages: - VM_OBJECT_WLOCK(object); redo = false; for (i = 0; i < count; i++) { - vm_page_sunbusy(ma[i]); - ma[i] = vm_page_grab(object, ma[i]->pindex, VM_ALLOC_NORMAL); + if (vm_page_busy_tryupgrade(ma[i]) == 0) { + vm_page_sunbusy(ma[i]); + ma[i] = vm_page_grab_unlocked(object, ma[i]->pindex, + VM_ALLOC_NORMAL); + } /* * Since the pages were only sbusy while neither the @@ -5259,7 +5259,6 @@ next_page:; if (!vm_page_all_valid(ma[i])) redo = true; } - VM_OBJECT_WUNLOCK(object); if (redo && error == 0) goto again; return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 8465fba5eb8d..3a649da01779 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -222,10 +222,8 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) vm_pindex_t pindex; pindex = OFF_TO_IDX(offset); - VM_OBJECT_WLOCK(object); - (void)vm_page_grab_valid(&m, object, pindex, + (void)vm_page_grab_valid_unlocked(&m, object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); - VM_OBJECT_WUNLOCK(object); return (m); }