Rework pmap so that it separates the PTE structure from the pv_entry
structure. This makes it possible to pre-allocate PTEs for the kernel, which is necessary for a reliable implementation of pmap_kenter(). This also avoids wasting space (about 48 bytes per page) for kernel mappings and user mappings of memory-mapped devices. This also fixes a bug with the previous version where the implementation required the pv_entry structure to be physically contiguous but did not enforce this (the structure size was not a power of two). This meant that the pv_entry free list was quickly corrupted as soon as the system was even mildly loaded.
This commit is contained in:
parent
0a622d8291
commit
5577bd2bca
File diff suppressed because it is too large
Load Diff
@ -54,6 +54,12 @@
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
#ifndef NKPT
|
||||
#define NKPT 30 /* initial number of kernel page tables */
|
||||
#endif
|
||||
#define MAXKPT (PAGE_SIZE/sizeof(vm_offset_t))
|
||||
|
||||
|
||||
/*
|
||||
* Routine: pmap_kextract
|
||||
* Function:
|
||||
@ -106,7 +112,6 @@ extern pmap_t kernel_pmap;
|
||||
* mappings of that page. An entry is a pv_entry_t, the list is pv_table.
|
||||
*/
|
||||
typedef struct pv_entry {
|
||||
struct ia64_lpte pv_pte; /* pte for collision walker */
|
||||
pmap_t pv_pmap; /* pmap where mapping lies */
|
||||
vm_offset_t pv_va; /* virtual address for mapping */
|
||||
TAILQ_ENTRY(pv_entry) pv_list;
|
||||
|
Loading…
Reference in New Issue
Block a user