Add interruptible variant of vm_wait(9), vm_wait_intr(9).
Also add msleep flags argument to vm_wait_doms(9). Reviewed by: markj Tested by: pho Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D24652
This commit is contained in:
parent
3e91d8268f
commit
89d2fb14d5
@ -3611,7 +3611,7 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
|
||||
break;
|
||||
if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
|
||||
if ((flags & M_WAITOK) != 0) {
|
||||
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
|
||||
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
|
||||
goto restart;
|
||||
}
|
||||
break;
|
||||
@ -4755,7 +4755,7 @@ uma_prealloc(uma_zone_t zone, int items)
|
||||
break;
|
||||
}
|
||||
if (vm_domainset_iter_policy(&di, &domain) != 0)
|
||||
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
|
||||
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj,
|
||||
/* Wait for one of the domains to accumulate some free pages. */
|
||||
if (obj != NULL)
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
vm_wait_doms(&di->di_domain->ds_mask);
|
||||
vm_wait_doms(&di->di_domain->ds_mask, 0);
|
||||
if (obj != NULL)
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if ((di->di_flags & VM_ALLOC_WAITFAIL) != 0)
|
||||
@ -310,7 +310,7 @@ vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
|
||||
return (ENOMEM);
|
||||
|
||||
/* Wait for one of the domains to accumulate some free pages. */
|
||||
vm_wait_doms(&di->di_domain->ds_mask);
|
||||
vm_wait_doms(&di->di_domain->ds_mask, 0);
|
||||
|
||||
/* Restart the search. */
|
||||
vm_domainset_iter_first(di, domain);
|
||||
|
@ -50,6 +50,6 @@ void vm_domainset_iter_policy_init(struct vm_domainset_iter *,
|
||||
void vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *,
|
||||
struct domainset_ref *, int *, int *);
|
||||
|
||||
void vm_wait_doms(const domainset_t *);
|
||||
int vm_wait_doms(const domainset_t *, int mflags);
|
||||
|
||||
#endif /* __VM_DOMAINSET_H__ */
|
||||
|
@ -565,7 +565,7 @@ vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
|
||||
}
|
||||
dset = td2->td_domain.dr_policy;
|
||||
while (vm_page_count_severe_set(&dset->ds_mask)) {
|
||||
vm_wait_doms(&dset->ds_mask);
|
||||
vm_wait_doms(&dset->ds_mask, 0);
|
||||
}
|
||||
|
||||
if ((flags & RFMEM) == 0) {
|
||||
|
@ -3147,9 +3147,12 @@ vm_wait_count(void)
|
||||
return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
|
||||
}
|
||||
|
||||
void
|
||||
vm_wait_doms(const domainset_t *wdoms)
|
||||
int
|
||||
vm_wait_doms(const domainset_t *wdoms, int mflags)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
|
||||
/*
|
||||
* We use racey wakeup synchronization to avoid expensive global
|
||||
@ -3162,8 +3165,8 @@ vm_wait_doms(const domainset_t *wdoms)
|
||||
if (curproc == pageproc) {
|
||||
mtx_lock(&vm_domainset_lock);
|
||||
vm_pageproc_waiters++;
|
||||
msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP,
|
||||
"pageprocwait", 1);
|
||||
error = msleep(&vm_pageproc_waiters, &vm_domainset_lock,
|
||||
PVM | PDROP | mflags, "pageprocwait", 1);
|
||||
} else {
|
||||
/*
|
||||
* XXX Ideally we would wait only until the allocation could
|
||||
@ -3173,11 +3176,12 @@ vm_wait_doms(const domainset_t *wdoms)
|
||||
mtx_lock(&vm_domainset_lock);
|
||||
if (vm_page_count_min_set(wdoms)) {
|
||||
vm_min_waiters++;
|
||||
msleep(&vm_min_domains, &vm_domainset_lock,
|
||||
PVM | PDROP, "vmwait", 0);
|
||||
error = msleep(&vm_min_domains, &vm_domainset_lock,
|
||||
PVM | PDROP | mflags, "vmwait", 0);
|
||||
} else
|
||||
mtx_unlock(&vm_domainset_lock);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3208,20 +3212,12 @@ vm_wait_domain(int domain)
|
||||
panic("vm_wait in early boot");
|
||||
DOMAINSET_ZERO(&wdom);
|
||||
DOMAINSET_SET(vmd->vmd_domain, &wdom);
|
||||
vm_wait_doms(&wdom);
|
||||
vm_wait_doms(&wdom, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_wait:
|
||||
*
|
||||
* Sleep until free pages are available for allocation in the
|
||||
* affinity domains of the obj. If obj is NULL, the domain set
|
||||
* for the calling thread is used.
|
||||
* Called in various places after failed memory allocations.
|
||||
*/
|
||||
void
|
||||
vm_wait(vm_object_t obj)
|
||||
static int
|
||||
vm_wait_flags(vm_object_t obj, int mflags)
|
||||
{
|
||||
struct domainset *d;
|
||||
|
||||
@ -3236,7 +3232,27 @@ vm_wait(vm_object_t obj)
|
||||
if (d == NULL)
|
||||
d = curthread->td_domain.dr_policy;
|
||||
|
||||
vm_wait_doms(&d->ds_mask);
|
||||
return (vm_wait_doms(&d->ds_mask, mflags));
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_wait:
|
||||
*
|
||||
* Sleep until free pages are available for allocation in the
|
||||
* affinity domains of the obj. If obj is NULL, the domain set
|
||||
* for the calling thread is used.
|
||||
* Called in various places after failed memory allocations.
|
||||
*/
|
||||
void
|
||||
vm_wait(vm_object_t obj)
|
||||
{
|
||||
(void)vm_wait_flags(obj, 0);
|
||||
}
|
||||
|
||||
int
|
||||
vm_wait_intr(vm_object_t obj)
|
||||
{
|
||||
return (vm_wait_flags(obj, PCATCH));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -97,6 +97,7 @@ extern int vm_pageout_page_count;
|
||||
*/
|
||||
|
||||
void vm_wait(vm_object_t obj);
|
||||
int vm_wait_intr(vm_object_t obj);
|
||||
void vm_waitpfault(struct domainset *, int timo);
|
||||
void vm_wait_domain(int domain);
|
||||
void vm_wait_min(void);
|
||||
|
Loading…
Reference in New Issue
Block a user