Port the new PV entry allocator from amd64/i386. This allocator has two

advantages.  First, PV entries are roughly half the size.  Second, this
allocator doesn't access the paging queues, and thus it allows for the
removal of the page queues lock from this pmap.

Replace all uses of the page queues lock by a R/W lock that is private
to this pmap.

Tested by:	marcel
This commit is contained in:
Alan Cox 2012-10-26 03:02:39 +00:00
parent b28f446886
commit cdd7357cc5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=242121
3 changed files with 447 additions and 233 deletions

View File

@ -11,6 +11,8 @@ UWX_TRACE_ENABLE opt_global.h
COMPAT_FREEBSD32 opt_compat.h
PV_STATS opt_pmap.h
EXCEPTION_TRACING opt_xtrace.h
VGA_ALT_SEQACCESS opt_vga.h

File diff suppressed because it is too large Load Diff

View File

@ -64,16 +64,16 @@
* Pmap stuff
*/
struct pv_entry;
struct pv_chunk;
struct md_page {
int pv_list_count;
TAILQ_HEAD(,pv_entry) pv_list;
vm_memattr_t memattr;
};
struct pmap {
struct mtx pm_mtx;
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
uint32_t pm_rid[IA64_VM_MINKERN_REGION];
struct pmap_statistics pm_stats; /* pmap statistics */
};
@ -101,12 +101,25 @@ extern struct pmap kernel_pmap_store;
* mappings of that page. An entry is a pv_entry_t, the list is pv_list.
*/
typedef struct pv_entry {
pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_list;
TAILQ_ENTRY(pv_entry) pv_plist;
} *pv_entry_t;
/*
* pv_entries are allocated in chunks per-process. This avoids the
* need to track per-pmap assignments.
*/
#define _NPCM 6
#define _NPCPV 337
struct pv_chunk {
pmap_t pc_pmap;
TAILQ_ENTRY(pv_chunk) pc_list;
u_long pc_map[_NPCM]; /* bitmap; 1 = free */
TAILQ_ENTRY(pv_chunk) pc_lru;
u_long pc_spare[2];
struct pv_entry pc_pventry[_NPCPV];
};
#ifdef _KERNEL
extern vm_paddr_t phys_avail[];