From ee3d0fb8ef5def49a12c51a3030af447009a1ec5 Mon Sep 17 00:00:00 2001 From: kib Date: Tue, 20 Feb 2018 10:13:13 +0000 Subject: [PATCH] vm_wait() rework. Make vm_wait() take the vm_object argument which specifies the domain set to wait for the min condition pass. If there is no object associated with the wait, use curthread' policy domainset. The mechanics of the wait in vm_wait() and vm_wait_domain() is supplied by the new helper vm_wait_doms(), which directly takes the bitmask of the domains to wait for passing min condition. Eliminate pagedaemon_wait(). vm_domain_clear() handles the same operations. Eliminate VM_WAIT and VM_WAITPFAULT macros, the direct functions calls are enough. Eliminate several control state variables from vm_domain, unneeded after the vm_wait() conversion. Scetched and reviewed by: jeff Tested by: pho Sponsored by: The FreeBSD Foundation, Mellanox Technologies Differential revision: https://reviews.freebsd.org/D14384 --- sys/amd64/amd64/pmap.c | 2 +- sys/arm/arm/pmap-v4.c | 2 +- sys/arm/arm/pmap-v6.c | 2 +- sys/arm/nvidia/drm2/tegra_bo.c | 2 +- sys/arm64/arm64/pmap.c | 4 +- sys/compat/linuxkpi/common/src/linux_page.c | 2 +- sys/dev/drm2/i915/i915_gem.c | 2 +- sys/dev/drm2/i915/i915_gem_gtt.c | 2 +- sys/dev/drm2/ttm/ttm_bo_vm.c | 2 +- sys/dev/drm2/ttm/ttm_page_alloc.c | 4 +- sys/i386/i386/pmap.c | 7 +- sys/mips/mips/pmap.c | 4 +- sys/mips/mips/uma_machdep.c | 10 +- sys/powerpc/aim/mmu_oea.c | 2 +- sys/powerpc/aim/mmu_oea64.c | 2 +- sys/powerpc/booke/pmap.c | 6 +- sys/riscv/riscv/pmap.c | 4 +- sys/vm/vm_fault.c | 4 +- sys/vm/vm_page.c | 110 +++++++++++--------- sys/vm/vm_pageout.c | 59 +---------- sys/vm/vm_pageout.h | 5 +- sys/vm/vm_pagequeue.h | 2 - 22 files changed, 95 insertions(+), 144 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 8c0b372eb4d3..dca5e385cf59 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2675,7 +2675,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); PMAP_ASSERT_NOT_IN_DI(); - VM_WAIT; + vm_wait(NULL); PMAP_LOCK(pmap); } diff --git a/sys/arm/arm/pmap-v4.c b/sys/arm/arm/pmap-v4.c index 27c235cb403b..b04c733c4718 100644 --- a/sys/arm/arm/pmap-v4.c +++ b/sys/arm/arm/pmap-v4.c @@ -3248,7 +3248,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); - VM_WAIT; + vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); goto do_l2b_alloc; diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index c09c9d6898a0..e70cef19a848 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -2478,7 +2478,7 @@ _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); - VM_WAIT; + vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } diff --git a/sys/arm/nvidia/drm2/tegra_bo.c b/sys/arm/nvidia/drm2/tegra_bo.c index 251f683ff14e..278d9758ee2e 100644 --- a/sys/arm/nvidia/drm2/tegra_bo.c +++ b/sys/arm/nvidia/drm2/tegra_bo.c @@ -114,7 +114,7 @@ tegra_bo_alloc_contig(size_t npages, u_long alignment, vm_memattr_t memattr, if (tries < 3) { if (!vm_page_reclaim_contig(pflags, npages, low, high, alignment, boundary)) - VM_WAIT; + vm_wait(NULL); tries++; goto retry; } diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 21b95b063def..4994201edbbd 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -1409,7 +1409,7 @@ pmap_pinit(pmap_t pmap) */ while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) - VM_WAIT; + vm_wait(NULL); l0phys = VM_PAGE_TO_PHYS(l0pt); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys); @@ -1449,7 +1449,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); - VM_WAIT; + vm_wait(NULL); PMAP_LOCK(pmap); } diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c index dcc5be457916..c22a871279e7 100644 --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -101,7 +101,7 @@ linux_alloc_pages(gfp_t flags, unsigned int order) if (flags & M_WAITOK) { if (!vm_page_reclaim_contig(req, npages, 0, pmax, PAGE_SIZE, 0)) { - VM_WAIT; + vm_wait(NULL); } flags &= ~M_WAITOK; goto retry; diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c index 4470c921c08e..33f0d9953ecd 100644 --- a/sys/dev/drm2/i915/i915_gem.c +++ b/sys/dev/drm2/i915/i915_gem.c @@ -1561,7 +1561,7 @@ i915_gem_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, i915_gem_object_unpin(obj); DRM_UNLOCK(dev); VM_OBJECT_WUNLOCK(vm_obj); - VM_WAIT; + vm_wait(vm_obj); goto retry; } page->valid = VM_PAGE_BITS_ALL; diff --git a/sys/dev/drm2/i915/i915_gem_gtt.c b/sys/dev/drm2/i915/i915_gem_gtt.c index 35b5900ac7ec..7e81577c4ecf 100644 --- a/sys/dev/drm2/i915/i915_gem_gtt.c +++ b/sys/dev/drm2/i915/i915_gem_gtt.c @@ -589,7 +589,7 @@ static int setup_scratch_page(struct drm_device *dev) if (tries < 1) { if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff, PAGE_SIZE, 0)) - VM_WAIT; + vm_wait(NULL); tries++; goto retry; } diff --git a/sys/dev/drm2/ttm/ttm_bo_vm.c b/sys/dev/drm2/ttm/ttm_bo_vm.c index 6d8bb1129bcd..6f7184857ce6 100644 --- a/sys/dev/drm2/ttm/ttm_bo_vm.c +++ b/sys/dev/drm2/ttm/ttm_bo_vm.c @@ -246,7 +246,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset, if (m1 == NULL) { if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) { VM_OBJECT_WUNLOCK(vm_obj); - VM_WAIT; + vm_wait(vm_obj); VM_OBJECT_WLOCK(vm_obj); ttm_mem_io_unlock(man); ttm_bo_unreserve(bo); diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c index fb6d18c81f2b..4a5b9ed37d97 100644 --- a/sys/dev/drm2/ttm/ttm_page_alloc.c +++ b/sys/dev/drm2/ttm/ttm_page_alloc.c @@ -168,7 +168,7 @@ ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr) return (p); if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff, PAGE_SIZE, 0)) - VM_WAIT; + vm_wait(NULL); } } @@ -181,7 +181,7 @@ ttm_vm_page_alloc_any(int req, vm_memattr_t memattr) p = vm_page_alloc(NULL, 0, req); if (p != NULL) break; - VM_WAIT; + vm_wait(NULL); } pmap_page_set_memattr(p, memattr); return (p); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index c0895cabcc2e..da63a0cd960d 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1893,10 +1893,9 @@ pmap_pinit(pmap_t pmap) m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (m == NULL) - VM_WAIT; - else { + vm_wait(NULL); + else ptdpg[i++] = m; - } } pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); @@ -1945,7 +1944,7 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); - VM_WAIT; + vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index 4df99fdd66fb..387661d429a6 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -1050,11 +1050,11 @@ pmap_grow_direct_page(int req) { #ifdef __mips_n64 - VM_WAIT; + vm_wait(NULL); #else if (!vm_page_reclaim_contig(req, 1, 0, MIPS_KSEG0_LARGEST_PHYS, PAGE_SIZE, 0)) - VM_WAIT; + vm_wait(NULL); #endif } diff --git a/sys/mips/mips/uma_machdep.c b/sys/mips/mips/uma_machdep.c index eb97ea1887fe..917e5a085ebd 100644 --- a/sys/mips/mips/uma_machdep.c +++ b/sys/mips/mips/uma_machdep.c @@ -67,13 +67,11 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, 0, MIPS_KSEG0_LARGEST_PHYS, PAGE_SIZE, 0)) continue; #endif - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - else - VM_WAIT; - } else + if (m != NULL) break; + if ((wait & M_NOWAIT) != 0) + return (NULL); + vm_wait(NULL); } pa = VM_PAGE_TO_PHYS(m); diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index 2f990479ae2e..d4c88eb8941f 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -1124,7 +1124,7 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if ((flags & PMAP_ENTER_NOSLEEP) != 0) return (KERN_RESOURCE_SHORTAGE); VM_OBJECT_ASSERT_UNLOCKED(m->object); - VM_WAIT; + vm_wait(NULL); } } diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index b544a3468518..8e3e2034d84d 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -1384,7 +1384,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, if ((flags & PMAP_ENTER_NOSLEEP) != 0) return (KERN_RESOURCE_SHORTAGE); VM_OBJECT_ASSERT_UNLOCKED(m->object); - VM_WAIT; + vm_wait(NULL); } /* diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index e001c4e5ad29..ff31ec9544b6 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -789,7 +789,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, vm_wire_sub(i); return (NULL); } - VM_WAIT; + vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } @@ -1033,7 +1033,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) vm_wire_sub(i); return (NULL); } - VM_WAIT; + vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } @@ -1346,7 +1346,7 @@ pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep) req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) { PMAP_UNLOCK(pmap); - VM_WAIT; + vm_wait(NULL); PMAP_LOCK(pmap); } mtbl[i] = m; diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index 141eb2da59e6..5987480253b4 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -1203,7 +1203,7 @@ pmap_pinit(pmap_t pmap) */ while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) - VM_WAIT; + vm_wait(NULL); l1phys = VM_PAGE_TO_PHYS(l1pt); pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys); @@ -1252,7 +1252,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); rw_runlock(&pvh_global_lock); - VM_WAIT; + vm_wait(NULL); rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index e97c93b930dd..fecd14ba622a 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -787,7 +787,7 @@ RetryFault:; } if (fs.m == NULL) { unlock_and_deallocate(&fs); - VM_WAITPFAULT; + vm_waitpfault(); goto RetryFault; } } @@ -1685,7 +1685,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, if (dst_m == NULL) { VM_OBJECT_WUNLOCK(dst_object); VM_OBJECT_RUNLOCK(object); - VM_WAIT; + vm_wait(dst_object); VM_OBJECT_WLOCK(dst_object); goto again; } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index c09d3dd0ddd0..dcf57dc7ddce 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2567,7 +2567,7 @@ CTASSERT(powerof2(NRUNS)); * Returns true if reclamation is successful and false otherwise. Since * relocation requires the allocation of physical pages, reclamation may * fail due to a shortage of free pages. When reclamation fails, callers - * are expected to perform VM_WAIT before retrying a failed allocation + * are expected to perform vm_wait() before retrying a failed allocation * operation, e.g., vm_page_alloc_contig(). * * The caller must always specify an allocation class through "req". @@ -2767,50 +2767,12 @@ vm_wait_severe(void) u_int vm_wait_count(void) { - u_int cnt; - int i; - cnt = 0; - for (i = 0; i < vm_ndomains; i++) - cnt += VM_DOMAIN(i)->vmd_waiters; - cnt += vm_severe_waiters + vm_min_waiters; - - return (cnt); + return (vm_severe_waiters + vm_min_waiters); } -/* - * vm_wait_domain: - * - * Sleep until free pages are available for allocation. - * - Called in various places after failed memory allocations. - */ -void -vm_wait_domain(int domain) -{ - struct vm_domain *vmd; - - vmd = VM_DOMAIN(domain); - vm_domain_free_assert_locked(vmd); - - if (curproc == pageproc) { - vmd->vmd_pageout_pages_needed = 1; - msleep(&vmd->vmd_pageout_pages_needed, - vm_domain_free_lockptr(vmd), PDROP | PSWP, "VMWait", 0); - } else { - if (pageproc == NULL) - panic("vm_wait in early boot"); - pagedaemon_wait(domain, PVM, "vmwait"); - } -} - -/* - * vm_wait: (also see VM_WAIT macro) - * - * Sleep until free pages are available for allocation. - * - Called in various places after failed memory allocations. - */ -void -vm_wait(void) +static void +vm_wait_doms(const domainset_t *wdoms) { /* @@ -2834,7 +2796,7 @@ vm_wait(void) * consume all freed pages while old allocators wait. */ mtx_lock(&vm_domainset_lock); - if (vm_page_count_min()) { + if (DOMAINSET_SUBSET(&vm_min_domains, wdoms)) { vm_min_waiters++; msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); @@ -2843,6 +2805,62 @@ vm_wait(void) } } +/* + * vm_wait_domain: + * + * Sleep until free pages are available for allocation. + * - Called in various places after failed memory allocations. + */ +void +vm_wait_domain(int domain) +{ + struct vm_domain *vmd; + domainset_t wdom; + + vmd = VM_DOMAIN(domain); + vm_domain_free_assert_locked(vmd); + + if (curproc == pageproc) { + vmd->vmd_pageout_pages_needed = 1; + msleep(&vmd->vmd_pageout_pages_needed, + vm_domain_free_lockptr(vmd), PDROP | PSWP, "VMWait", 0); + } else { + vm_domain_free_unlock(vmd); + if (pageproc == NULL) + panic("vm_wait in early boot"); + DOMAINSET_ZERO(&wdom); + DOMAINSET_SET(vmd->vmd_domain, &wdom); + vm_wait_doms(&wdom); + } +} + +/* + * vm_wait: + * + * Sleep until free pages are available for allocation in the + * affinity domains of the obj. If obj is NULL, the domain set + * for the calling thread is used. + * Called in various places after failed memory allocations. + */ +void +vm_wait(vm_object_t obj) +{ + struct domainset *d; + + d = NULL; + + /* + * Carefully fetch pointers only once: the struct domainset + * itself is ummutable but the pointer might change. + */ + if (obj != NULL) + d = obj->domain.dr_policy; + if (d == NULL) + d = curthread->td_domain.dr_policy; + + vm_wait_doms(&d->ds_mask); +} + /* * vm_domain_alloc_fail: * @@ -2877,7 +2895,7 @@ vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) } /* - * vm_waitpfault: (also see VM_WAITPFAULT macro) + * vm_waitpfault: * * Sleep until free pages are available for allocation. * - Called only in vm_fault so that processes page faulting @@ -3071,10 +3089,6 @@ vm_domain_free_wakeup(struct vm_domain *vmd) * high water mark. And wakeup scheduler process if we have * lots of memory. this process will swapin processes. */ - if (vmd->vmd_pages_needed && !vm_paging_min(vmd)) { - vmd->vmd_pages_needed = false; - wakeup(&vmd->vmd_free_count); - } if ((vmd->vmd_minset && !vm_paging_min(vmd)) || (vmd->vmd_severeset && !vm_paging_severe(vmd))) vm_domain_clear(vmd); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 3993a5641707..a2c6cb7f7bf2 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -1750,8 +1750,6 @@ vm_pageout_oom(int shortage) } sx_sunlock(&allproc_lock); if (bigproc != NULL) { - int i; - if (vm_panic_on_oom != 0) panic("out of swap space"); PROC_LOCK(bigproc); @@ -1759,8 +1757,6 @@ vm_pageout_oom(int shortage) sched_nice(bigproc, PRIO_MIN); _PRELE(bigproc); PROC_UNLOCK(bigproc); - for (i = 0; i < vm_ndomains; i++) - wakeup(&VM_DOMAIN(i)->vmd_free_count); } } @@ -1795,23 +1791,6 @@ vm_pageout_worker(void *arg) while (TRUE) { vm_domain_free_lock(vmd); - /* - * Generally, after a level >= 1 scan, if there are enough - * free pages to wakeup the waiters, then they are already - * awake. A call to vm_page_free() during the scan awakened - * them. However, in the following case, this wakeup serves - * to bound the amount of time that a thread might wait. - * Suppose a thread's call to vm_page_alloc() fails, but - * before that thread calls VM_WAIT, enough pages are freed by - * other threads to alleviate the free page shortage. The - * thread will, nonetheless, wait until another page is freed - * or this wakeup is performed. - */ - if (vmd->vmd_pages_needed && !vm_paging_min(vmd)) { - vmd->vmd_pages_needed = false; - wakeup(&vmd->vmd_free_count); - } - /* * Do not clear vmd_pageout_wanted until we reach our free page * target. Otherwise, we may be awakened over and over again, @@ -1840,16 +1819,12 @@ vm_pageout_worker(void *arg) pass++; } else { /* - * Yes. If threads are still sleeping in VM_WAIT + * Yes. If threads are still sleeping in vm_wait() * then we immediately start a new scan. Otherwise, * sleep until the next wakeup or until pages need to * have their reference stats updated. */ - if (vmd->vmd_pages_needed) { - vm_domain_free_unlock(vmd); - if (pass == 0) - pass++; - } else if (mtx_sleep(&vmd->vmd_pageout_wanted, + if (mtx_sleep(&vmd->vmd_pageout_wanted, vm_domain_free_lockptr(vmd), PDROP | PVM, "psleep", hz) == 0) { VM_CNT_INC(v_pdwakeups); @@ -2000,33 +1975,3 @@ pagedaemon_wakeup(int domain) wakeup(&vmd->vmd_pageout_wanted); } } - -/* - * Wake up the page daemon and wait for it to reclaim free pages. - * - * This function returns with the free queues mutex unlocked. - */ -void -pagedaemon_wait(int domain, int pri, const char *wmesg) -{ - struct vm_domain *vmd; - - vmd = VM_DOMAIN(domain); - vm_domain_free_assert_locked(vmd); - - /* - * vmd_pageout_wanted may have been set by an advisory wakeup, but if - * the page daemon is running on a CPU, the wakeup will have been lost. - * Thus, deliver a potentially spurious wakeup to ensure that the page - * daemon has been notified of the shortage. - */ - if (!vmd->vmd_pageout_wanted || !vmd->vmd_pages_needed) { - vmd->vmd_pageout_wanted = true; - wakeup(&vmd->vmd_pageout_wanted); - } - vmd->vmd_pages_needed = true; - vmd->vmd_waiters++; - msleep(&vmd->vmd_free_count, vm_domain_free_lockptr(vmd), PDROP | pri, - wmesg, 0); - vmd->vmd_waiters--; -} diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h index 0894cd1541aa..644287872a28 100644 --- a/sys/vm/vm_pageout.h +++ b/sys/vm/vm_pageout.h @@ -93,11 +93,8 @@ extern int vm_pageout_page_count; * Signal pageout-daemon and wait for it. */ -void pagedaemon_wait(int domain, int pri, const char *wmesg); void pagedaemon_wakeup(int domain); -#define VM_WAIT vm_wait() -#define VM_WAITPFAULT vm_waitpfault() -void vm_wait(void); +void vm_wait(vm_object_t obj); void vm_waitpfault(void); void vm_wait_domain(int domain); void vm_wait_min(void); diff --git a/sys/vm/vm_pagequeue.h b/sys/vm/vm_pagequeue.h index d7bff9bc5eff..3e3654cb0cfc 100644 --- a/sys/vm/vm_pagequeue.h +++ b/sys/vm/vm_pagequeue.h @@ -93,8 +93,6 @@ struct vm_domain { int vmd_pageout_pages_needed; /* page daemon waiting for pages? */ int vmd_pageout_deficit; /* Estimated number of pages deficit */ - int vmd_waiters; /* Pageout waiters. */ - bool vmd_pages_needed; /* Are threads waiting for free pages? */ bool vmd_pageout_wanted; /* pageout daemon wait channel */ bool vmd_minset; /* Are we in vm_min_domains? */ bool vmd_severeset; /* Are we in vm_severe_domains? */