MFamd64 with few changes:
1. Add support for automatic promotion of 4KB page mappings to 2MB page mappings. Automatic promotion can be enabled by setting the tunable "vm.pmap.pg_ps_enabled" to a non-zero value. By default, automatic promotion is disabled. Tested by: kris 2. To date, we have assumed that the TLB will only set the PG_M bit in a PTE if that PTE has the PG_RW bit set. However, this assumption does not hold on recent processors from Intel. For example, consider a PTE that has the PG_RW bit set but the PG_M bit clear. Suppose this PTE is cached in the TLB and later the PG_RW bit is cleared in the PTE, but the corresponding TLB entry is not (yet) invalidated. Historically, upon a write access using this (stale) TLB entry, the TLB would observe that the PG_RW bit had been cleared and initiate a page fault, aborting the setting of the PG_M bit in the PTE. Now, however, P4- and Core2-family processors will set the PG_M bit before observing that the PG_RW bit is clear and initiating a page fault. In other words, the write does not occur but the PG_M bit is still set. The real impact of this difference is not that great. Specifically, we should no longer assert that any PTE with the PG_M bit set must also have the PG_RW bit set, and we should ignore the state of the PG_M bit unless the PG_RW bit is set.
This commit is contained in:
parent
669aee5e44
commit
2a244be094
1183
sys/i386/i386/pmap.c
1183
sys/i386/i386/pmap.c
File diff suppressed because it is too large
Load Diff
@ -81,6 +81,13 @@
|
||||
#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
|
||||
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
|
||||
|
||||
/*
|
||||
* Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
|
||||
* 4KB (PTE) page mappings have identical settings for the following fields:
|
||||
*/
|
||||
#define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
|
||||
PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
|
||||
|
||||
/*
|
||||
* Page Protection Exception bits
|
||||
*/
|
||||
@ -213,6 +220,9 @@ pmap_kextract(vm_offset_t va)
|
||||
|
||||
#ifdef PAE
|
||||
|
||||
#define pde_cmpset(pdep, old, new) \
|
||||
atomic_cmpset_64((pdep), (old), (new))
|
||||
|
||||
static __inline pt_entry_t
|
||||
pte_load(pt_entry_t *ptep)
|
||||
{
|
||||
@ -269,6 +279,9 @@ extern pt_entry_t pg_nx;
|
||||
|
||||
#else /* PAE */
|
||||
|
||||
#define pde_cmpset(pdep, old, new) \
|
||||
atomic_cmpset_int((pdep), (old), (new))
|
||||
|
||||
static __inline pt_entry_t
|
||||
pte_load(pt_entry_t *ptep)
|
||||
{
|
||||
@ -330,6 +343,7 @@ struct pmap {
|
||||
pdpt_entry_t *pm_pdpt; /* KVA of page director pointer
|
||||
table */
|
||||
#endif
|
||||
vm_page_t pm_root; /* spare page table pages */
|
||||
};
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
@ -393,7 +407,6 @@ extern char *ptvmmap; /* poor name! */
|
||||
extern vm_offset_t virtual_avail;
|
||||
extern vm_offset_t virtual_end;
|
||||
|
||||
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
|
||||
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
|
||||
|
||||
void pmap_bootstrap(vm_paddr_t);
|
||||
@ -406,6 +419,7 @@ void pmap_kremove(vm_offset_t);
|
||||
void *pmap_mapbios(vm_paddr_t, vm_size_t);
|
||||
void *pmap_mapdev(vm_paddr_t, vm_size_t);
|
||||
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
|
||||
boolean_t pmap_page_is_mapped(vm_page_t m);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
|
||||
void pmap_set_pg(void);
|
||||
|
Loading…
Reference in New Issue
Block a user