Have preload_delete_name() free pages backing preloaded data.

On i386 and amd64, add a vm_phys segment for physical memory used to
store the kernel binary and other preloaded data.  This makes it
possible to free such memory back to the system once it is no longer
needed, e.g., when a preloaded kernel module is unloaded.  Previously,
it would have remained unused.

Reviewed by:	kib, royger
MFC after:	2 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D16330
This commit is contained in:
Mark Johnston 2018-07-19 20:00:28 +00:00
parent 73624a804a
commit 483f692ea6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=336505
5 changed files with 72 additions and 9 deletions

View File

@ -101,6 +101,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#include <vm/vm_param.h>
#include <vm/vm_phys.h>
#ifdef DDB
#ifndef KDB
@ -1227,6 +1228,12 @@ getmemsize(caddr_t kmdp, u_int64_t first)
quad_t dcons_addr, dcons_size;
int page_counter;
/*
* Tell the physical memory allocator about pages used to store
* the kernel and preloaded data. See kmem_bootstrap_free().
*/
vm_phys_add_seg((vm_paddr_t)kernphys, trunc_page(first));
bzero(physmap, sizeof(physmap));
physmap_idx = 0;

View File

@ -101,6 +101,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#include <vm/vm_param.h>
#include <vm/vm_phys.h>
#ifdef DDB
#ifndef KDB
@ -1833,6 +1834,12 @@ getmemsize(int first)
bzero(physmap, sizeof(physmap));
basemem = 0;
/*
* Tell the physical memory allocator about pages used to store
* the kernel and preloaded data. See kmem_bootstrap_free().
*/
vm_phys_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first));
/*
* Check if the loader supplied an SMAP memory map. If so,
* use that and do not make any VM86 calls.

View File

@ -33,6 +33,9 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/linker.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
/*
* Preloaded module support
*/
@ -204,29 +207,42 @@ preload_search_info(caddr_t mod, int inf)
void
preload_delete_name(const char *name)
{
caddr_t curp;
uint32_t *hdr;
caddr_t addr, curp;
uint32_t *hdr, sz;
int next;
int clearing;
addr = 0;
sz = 0;
if (preload_metadata != NULL) {
clearing = 0;
curp = preload_metadata;
for (;;) {
hdr = (uint32_t *)curp;
if (hdr[0] == 0 && hdr[1] == 0)
break;
if (hdr[0] == MODINFO_NAME || (hdr[0] == 0 && hdr[1] == 0)) {
/* Free memory used to store the file. */
if (addr != 0 && sz != 0)
kmem_bootstrap_free((vm_offset_t)addr, sz);
addr = 0;
sz = 0;
/* Search for a MODINFO_NAME field */
if (hdr[0] == MODINFO_NAME) {
if (hdr[0] == 0)
break;
if (!strcmp(name, curp + sizeof(uint32_t) * 2))
clearing = 1; /* got it, start clearing */
else if (clearing)
else if (clearing) {
clearing = 0; /* at next one now.. better stop */
}
}
if (clearing)
if (clearing) {
if (hdr[0] == MODINFO_ADDR)
addr = *(caddr_t *)(curp + sizeof(uint32_t) * 2);
else if (hdr[0] == MODINFO_SIZE)
sz = *(uint32_t *)(curp + sizeof(uint32_t) * 2);
hdr[0] = MODINFO_EMPTY;
}
/* skip to next field */
next = sizeof(uint32_t) * 2 + hdr[1];

View File

@ -75,6 +75,7 @@ int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int);
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
/* Bootstrapping. */
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
void kmem_init(vm_offset_t, vm_offset_t);

View File

@ -688,6 +688,38 @@ kmem_init(vm_offset_t start, vm_offset_t end)
vm_map_unlock(m);
}
/*
* kmem_bootstrap_free:
*
* Free pages backing preloaded data (e.g., kernel modules) to the
* system. Currently only supported on platforms that create a
* vm_phys segment for preloaded data.
*/
void
kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
{
#if defined(__i386__) || defined(__amd64__)
struct vm_domain *vmd;
vm_offset_t end;
vm_paddr_t pa;
vm_page_t m;
end = trunc_page(start + size);
start = round_page(start);
(void)vm_map_remove(kernel_map, start, end);
for (; start < end; start += PAGE_SIZE) {
pa = pmap_kextract(start);
m = PHYS_TO_VM_PAGE(pa);
vmd = vm_pagequeue_domain(m);
vm_domain_free_lock(vmd);
vm_phys_free_pages(m, 0);
vm_domain_free_unlock(vmd);
}
#endif
}
#ifdef DIAGNOSTIC
/*
* Allow userspace to directly trigger the VM drain routine for testing