Add more NUMA-specific low memory predicates.

Use these predicates instead of inline references to vm_min_domains.
Also add a global all_domains set, akin to all_cpus.

Reviewed by:	alc, jeff, kib
Approved by:	re (gjb)
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D17278
This commit is contained in:
Mark Johnston 2018-09-24 19:24:17 +00:00
parent 7362ac9ca9
commit 463406ac4a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338919
5 changed files with 38 additions and 14 deletions

View File

@ -145,6 +145,7 @@ struct vmmeter {
#include <sys/domainset.h>
extern struct vmmeter vm_cnt;
extern domainset_t all_domains;
extern domainset_t vm_min_domains;
extern domainset_t vm_severe_domains;
@ -177,7 +178,7 @@ vm_wire_count(void)
/*
* Return TRUE if we are under our severe low-free-pages threshold
*
* This routine is typically used at the user<->system interface to determine
* These routines are typically used at the user<->system interface to determine
* whether we need to block in order to avoid a low memory deadlock.
*/
static inline int
@ -188,7 +189,14 @@ vm_page_count_severe(void)
}
static inline int
vm_page_count_severe_set(domainset_t *mask)
vm_page_count_severe_domain(int domain)
{
return (DOMAINSET_ISSET(domain, &vm_severe_domains));
}
static inline int
vm_page_count_severe_set(const domainset_t *mask)
{
return (DOMAINSET_SUBSET(&vm_severe_domains, mask));
@ -197,7 +205,7 @@ vm_page_count_severe_set(domainset_t *mask)
/*
* Return TRUE if we are under our minimum low-free-pages threshold.
*
* This routine is typically used within the system to determine whether
* These routines are typically used within the system to determine whether
* we can execute potentially very expensive code in terms of memory. It
* is also used by the pageout daemon to calculate when to sleep, when
* to wake waiters up, and when (after making a pass) to become more
@ -210,5 +218,19 @@ vm_page_count_min(void)
return (!DOMAINSET_EMPTY(&vm_min_domains));
}
static inline int
vm_page_count_min_domain(int domain)
{
return (DOMAINSET_ISSET(domain, &vm_min_domains));
}
static inline int
vm_page_count_min_set(const domainset_t *mask)
{
return (DOMAINSET_SUBSET(&vm_min_domains, mask));
}
#endif /* _KERNEL */
#endif /* _SYS_VMMETER_H_ */

View File

@ -66,6 +66,7 @@ vm_domainset_iter_init(struct vm_domainset_iter *di, struct vm_object *obj,
vm_pindex_t pindex)
{
struct domainset *domain;
struct thread *td;
/*
* object policy takes precedence over thread policy. The policies
@ -76,8 +77,9 @@ vm_domainset_iter_init(struct vm_domainset_iter *di, struct vm_object *obj,
di->di_domain = domain;
di->di_iter = &obj->domain.dr_iterator;
} else {
di->di_domain = curthread->td_domain.dr_policy;
di->di_iter = &curthread->td_domain.dr_iterator;
td = curthread;
di->di_domain = td->td_domain.dr_policy;
di->di_iter = &td->td_domain.dr_iterator;
}
di->di_policy = di->di_domain->ds_policy;
if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE) {
@ -215,7 +217,7 @@ vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
*req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
VM_ALLOC_NOWAIT;
vm_domainset_iter_first(di, domain);
if (DOMAINSET_ISSET(*domain, &vm_min_domains))
if (vm_page_count_min_domain(*domain))
vm_domainset_iter_page(di, domain, req);
}
@ -233,8 +235,7 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, int *domain, int *req)
/* If there are more domains to visit we run the iterator. */
while (--di->di_n != 0) {
vm_domainset_iter_next(di, domain);
if (!di->di_minskip ||
!DOMAINSET_ISSET(*domain, &vm_min_domains))
if (!di->di_minskip || !vm_page_count_min_domain(*domain))
return (0);
}
if (di->di_minskip) {
@ -269,7 +270,7 @@ vm_domainset_iter_malloc_init(struct vm_domainset_iter *di,
di->di_flags = *flags;
*flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT;
vm_domainset_iter_first(di, domain);
if (DOMAINSET_ISSET(*domain, &vm_min_domains))
if (vm_page_count_min_domain(*domain))
vm_domainset_iter_malloc(di, domain, flags);
}
@ -280,8 +281,7 @@ vm_domainset_iter_malloc(struct vm_domainset_iter *di, int *domain, int *flags)
/* If there are more domains to visit we run the iterator. */
while (--di->di_n != 0) {
vm_domainset_iter_next(di, domain);
if (!di->di_minskip ||
!DOMAINSET_ISSET(*domain, &vm_min_domains))
if (!di->di_minskip || !vm_page_count_min_domain(*domain))
return (0);
}

View File

@ -2959,7 +2959,7 @@ vm_wait_doms(const domainset_t *wdoms)
* consume all freed pages while old allocators wait.
*/
mtx_lock(&vm_domainset_lock);
if (DOMAINSET_SUBSET(&vm_min_domains, wdoms)) {
if (vm_page_count_min_set(wdoms)) {
vm_min_waiters++;
msleep(&vm_min_domains, &vm_domainset_lock,
PVM | PDROP, "vmwait", 0);
@ -3078,7 +3078,7 @@ vm_waitpfault(struct domainset *dset)
* consume all freed pages while old allocators wait.
*/
mtx_lock(&vm_domainset_lock);
if (DOMAINSET_SUBSET(&vm_min_domains, &dset->ds_mask)) {
if (vm_page_count_min_set(&dset->ds_mask)) {
vm_min_waiters++;
msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
"pfault", 0);

View File

@ -78,6 +78,7 @@ int __read_mostly *mem_locality;
#endif
int __read_mostly vm_ndomains = 1;
domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
int __read_mostly vm_phys_nsegs;

View File

@ -470,8 +470,9 @@ parse_srat(void)
}
#ifdef NUMA
/* Point vm_phys at our memory affinity table. */
vm_ndomains = ndomain;
for (int i = 0; i < vm_ndomains; i++)
DOMAINSET_SET(i, &all_domains);
mem_affinity = mem_info;
#endif