Migrate the feature of excluding RAM pages to use "excludelist"

as its nomenclature.

MFC after:	1 week
This commit is contained in:
Scott Long 2020-07-07 20:33:11 +00:00
parent 3c67996ca9
commit b302c2e5c9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362998
5 changed files with 37 additions and 36 deletions

View File

@ -49,12 +49,12 @@ entropy_cache_type="boot_entropy_cache" # Required for the kernel to find
# must not change value even if the
# _name above does change!
### RAM Blacklist configuration ############################
ram_blacklist_load="NO" # Set this to YES to load a file
### RAM Excludelist configuration ############################
ram_excludelist_load="NO" # Set this to YES to load a file
# containing a list of addresses to
# exclude from the running system.
ram_blacklist_name="/boot/blacklist.txt" # Set this to the name of the file
ram_blacklist_type="ram_blacklist" # Required for the kernel to find
ram_excludeist_name="/boot/excludelist.txt" # Set this to the name of the file
ram_excludelist_type="ram_excludelist" # Required for the kernel to find
# the blacklist module
### Microcode loading configuration ########################

View File

@ -2060,7 +2060,7 @@ pmap_init(void)
int error, i, ret, skz63;
/* L1TF, reserve page @0 unconditionally */
vm_page_blacklist_add(0, bootverbose);
vm_page_excludelist_add(0, bootverbose);
/* Detect bare-metal Skylake Server and Skylake-X. */
if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_INTEL &&
@ -2081,7 +2081,7 @@ pmap_init(void)
printf("SKZ63: skipping 4M RAM starting "
"at physical 1G\n");
for (i = 0; i < atop(0x400000); i++) {
ret = vm_page_blacklist_add(0x40000000 +
ret = vm_page_excludelist_add(0x40000000 +
ptoa(i), FALSE);
if (!ret && bootverbose)
printf("page at %#lx already used\n",

View File

@ -3557,7 +3557,7 @@ mmu_radix_init()
int error, i, pv_npg;
/* L1TF, reserve page @0 unconditionally */
vm_page_blacklist_add(0, bootverbose);
vm_page_excludelist_add(0, bootverbose);
zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
RADIX_PGD_SIZE, NULL, NULL,

View File

@ -155,10 +155,11 @@ vm_page_t vm_page_array;
long vm_page_array_size;
long first_page;
static TAILQ_HEAD(, vm_page) blacklist_head;
static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
static TAILQ_HEAD(, vm_page) excludelist_head;
static int sysctl_vm_page_excludelist(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_vm, OID_AUTO, page_excludelist, CTLTYPE_STRING | CTLFLAG_RD |
CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_excludelist, "A",
"Blacklist pages");
static uma_zone_t fakepg_zone;
@ -258,16 +259,16 @@ vm_set_page_size(void)
}
/*
* vm_page_blacklist_next:
* vm_page_excludelist_next:
*
* Find the next entry in the provided string of blacklist
* Find the next entry in the provided string of excludelist
* addresses. Entries are separated by space, comma, or newline.
* If an invalid integer is encountered then the rest of the
* string is skipped. Updates the list pointer to the next
* character, or NULL if the string is exhausted or invalid.
*/
static vm_paddr_t
vm_page_blacklist_next(char **list, char *end)
vm_page_excludelist_next(char **list, char *end)
{
vm_paddr_t bad;
char *cp, *pos;
@ -314,13 +315,13 @@ vm_page_blacklist_next(char **list, char *end)
*list = cp;
return (trunc_page(bad));
}
printf("Garbage in RAM blacklist, skipping\n");
printf("Garbage in RAM excludelist, skipping\n");
*list = NULL;
return (0);
}
bool
vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
vm_page_excludelist_add(vm_paddr_t pa, bool verbose)
{
struct vm_domain *vmd;
vm_page_t m;
@ -336,7 +337,7 @@ vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
vm_domain_free_unlock(vmd);
if (ret != 0) {
vm_domain_freecnt_inc(vmd, -1);
TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
TAILQ_INSERT_TAIL(&excludelist_head, m, listq);
if (verbose)
printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
}
@ -344,35 +345,35 @@ vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
}
/*
* vm_page_blacklist_check:
* vm_page_excludelist_check:
*
* Iterate through the provided string of blacklist addresses, pulling
* Iterate through the provided string of excludelist addresses, pulling
* each entry out of the physical allocator free list and putting it
* onto a list for reporting via the vm.page_blacklist sysctl.
* onto a list for reporting via the vm.page_excludelist sysctl.
*/
static void
vm_page_blacklist_check(char *list, char *end)
vm_page_excludelist_check(char *list, char *end)
{
vm_paddr_t pa;
char *next;
next = list;
while (next != NULL) {
if ((pa = vm_page_blacklist_next(&next, end)) == 0)
if ((pa = vm_page_excludelist_next(&next, end)) == 0)
continue;
vm_page_blacklist_add(pa, bootverbose);
vm_page_excludelist_add(pa, bootverbose);
}
}
/*
* vm_page_blacklist_load:
* vm_page_excludelist_load:
*
* Search for a special module named "ram_blacklist". It'll be a
* Search for a special module named "ram_excludelist". It'll be a
* plain text file provided by the user via the loader directive
* of the same name.
*/
static void
vm_page_blacklist_load(char **list, char **end)
vm_page_excludelist_load(char **list, char **end)
{
void *mod;
u_char *ptr;
@ -381,7 +382,7 @@ vm_page_blacklist_load(char **list, char **end)
mod = NULL;
ptr = NULL;
mod = preload_search_by_type("ram_blacklist");
mod = preload_search_by_type("ram_excludelist");
if (mod != NULL) {
ptr = preload_fetch_addr(mod);
len = preload_fetch_size(mod);
@ -395,7 +396,7 @@ vm_page_blacklist_load(char **list, char **end)
}
static int
sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
sysctl_vm_page_excludelist(SYSCTL_HANDLER_ARGS)
{
vm_page_t m;
struct sbuf sbuf;
@ -406,7 +407,7 @@ sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
TAILQ_FOREACH(m, &blacklist_head, listq) {
TAILQ_FOREACH(m, &excludelist_head, listq) {
sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
(uintmax_t)m->phys_addr);
first = 0;
@ -793,14 +794,14 @@ vm_page_startup(vm_offset_t vaddr)
}
/*
* Remove blacklisted pages from the physical memory allocator.
* Remove excludelisted pages from the physical memory allocator.
*/
TAILQ_INIT(&blacklist_head);
vm_page_blacklist_load(&list, &listend);
vm_page_blacklist_check(list, listend);
TAILQ_INIT(&excludelist_head);
vm_page_excludelist_load(&list, &listend);
vm_page_excludelist_check(list, listend);
list = kern_getenv("vm.blacklist");
vm_page_blacklist_check(list, NULL);
list = kern_getenv("vm.excludelist");
vm_page_excludelist_check(list, NULL);
freeenv(list);
#if VM_NRESERVLEVEL > 0

View File

@ -613,7 +613,7 @@ vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
vm_page_t vm_page_alloc_freelist(int, int);
vm_page_t vm_page_alloc_freelist_domain(int, int, int);
void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
bool vm_page_excludelist_add(vm_paddr_t pa, bool verbose);
vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,