diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 11007135392b..455a8bcf49f9 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -3611,7 +3611,7 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) break; if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { if ((flags & M_WAITOK) != 0) { - vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); + vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0); goto restart; } break; @@ -4755,7 +4755,7 @@ uma_prealloc(uma_zone_t zone, int items) break; } if (vm_domainset_iter_policy(&di, &domain) != 0) - vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); + vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0); } } } diff --git a/sys/vm/vm_domainset.c b/sys/vm/vm_domainset.c index b26e9697dc6a..748770c83449 100644 --- a/sys/vm/vm_domainset.c +++ b/sys/vm/vm_domainset.c @@ -245,7 +245,7 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, /* Wait for one of the domains to accumulate some free pages. */ if (obj != NULL) VM_OBJECT_WUNLOCK(obj); - vm_wait_doms(&di->di_domain->ds_mask); + vm_wait_doms(&di->di_domain->ds_mask, 0); if (obj != NULL) VM_OBJECT_WLOCK(obj); if ((di->di_flags & VM_ALLOC_WAITFAIL) != 0) @@ -310,7 +310,7 @@ vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain) return (ENOMEM); /* Wait for one of the domains to accumulate some free pages. */ - vm_wait_doms(&di->di_domain->ds_mask); + vm_wait_doms(&di->di_domain->ds_mask, 0); /* Restart the search. */ vm_domainset_iter_first(di, domain); diff --git a/sys/vm/vm_domainset.h b/sys/vm/vm_domainset.h index b70a027fb0cd..6dceb6fe59c8 100644 --- a/sys/vm/vm_domainset.h +++ b/sys/vm/vm_domainset.h @@ -50,6 +50,6 @@ void vm_domainset_iter_policy_init(struct vm_domainset_iter *, void vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *, struct domainset_ref *, int *, int *); -void vm_wait_doms(const domainset_t *); +int vm_wait_doms(const domainset_t *, int mflags); #endif /* __VM_DOMAINSET_H__ */ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 8947348dbc93..27bac7682f06 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -565,7 +565,7 @@ vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2, } dset = td2->td_domain.dr_policy; while (vm_page_count_severe_set(&dset->ds_mask)) { - vm_wait_doms(&dset->ds_mask); + vm_wait_doms(&dset->ds_mask, 0); } if ((flags & RFMEM) == 0) { diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 951bd19549d6..e120c0d29099 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -3147,9 +3147,12 @@ vm_wait_count(void) return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); } -void -vm_wait_doms(const domainset_t *wdoms) +int +vm_wait_doms(const domainset_t *wdoms, int mflags) { + int error; + + error = 0; /* * We use racey wakeup synchronization to avoid expensive global @@ -3162,8 +3165,8 @@ vm_wait_doms(const domainset_t *wdoms) if (curproc == pageproc) { mtx_lock(&vm_domainset_lock); vm_pageproc_waiters++; - msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP, - "pageprocwait", 1); + error = msleep(&vm_pageproc_waiters, &vm_domainset_lock, + PVM | PDROP | mflags, "pageprocwait", 1); } else { /* * XXX Ideally we would wait only until the allocation could @@ -3173,11 +3176,12 @@ vm_wait_doms(const domainset_t *wdoms) mtx_lock(&vm_domainset_lock); if (vm_page_count_min_set(wdoms)) { vm_min_waiters++; - msleep(&vm_min_domains, &vm_domainset_lock, - PVM | PDROP, "vmwait", 0); + error = msleep(&vm_min_domains, &vm_domainset_lock, + PVM | PDROP | mflags, "vmwait", 0); } else mtx_unlock(&vm_domainset_lock); } + return (error); } /* @@ -3208,20 +3212,12 @@ vm_wait_domain(int domain) panic("vm_wait in early boot"); DOMAINSET_ZERO(&wdom); DOMAINSET_SET(vmd->vmd_domain, &wdom); - vm_wait_doms(&wdom); + vm_wait_doms(&wdom, 0); } } -/* - * vm_wait: - * - * Sleep until free pages are available for allocation in the - * affinity domains of the obj. If obj is NULL, the domain set - * for the calling thread is used. - * Called in various places after failed memory allocations. - */ -void -vm_wait(vm_object_t obj) +static int +vm_wait_flags(vm_object_t obj, int mflags) { struct domainset *d; @@ -3236,7 +3232,27 @@ vm_wait(vm_object_t obj) if (d == NULL) d = curthread->td_domain.dr_policy; - vm_wait_doms(&d->ds_mask); + return (vm_wait_doms(&d->ds_mask, mflags)); +} + +/* + * vm_wait: + * + * Sleep until free pages are available for allocation in the + * affinity domains of the obj. If obj is NULL, the domain set + * for the calling thread is used. + * Called in various places after failed memory allocations. + */ +void +vm_wait(vm_object_t obj) +{ + (void)vm_wait_flags(obj, 0); +} + +int +vm_wait_intr(vm_object_t obj) +{ + return (vm_wait_flags(obj, PCATCH)); } /* diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h index ab7d6a945fa1..82ba3c81ef1b 100644 --- a/sys/vm/vm_pageout.h +++ b/sys/vm/vm_pageout.h @@ -97,6 +97,7 @@ extern int vm_pageout_page_count; */ void vm_wait(vm_object_t obj); +int vm_wait_intr(vm_object_t obj); void vm_waitpfault(struct domainset *, int timo); void vm_wait_domain(int domain); void vm_wait_min(void);