Handle a race between pmap_kextract() and pmap_promote_pde(). This race is

known to cause a kernel crash in ZFS on i386 when superpage promotion is
enabled.

Tested by:	netchild
MFC after:	1 week
This commit is contained in:
Alan Cox 2010-01-23 18:42:28 +00:00
parent 9d98195dc3
commit cf3508519c
2 changed files with 44 additions and 4 deletions

View File

@ -243,8 +243,9 @@ struct sysmaps {
caddr_t CADDR2;
};
static struct sysmaps sysmaps_pcpu[MAXCPU];
pt_entry_t *CMAP1 = 0;
pt_entry_t *CMAP1 = 0, *KPTmap;
static pt_entry_t *CMAP3;
static pd_entry_t *KPTD;
caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR3;
struct msgbuf *msgbufp = 0;
@ -420,6 +421,21 @@ pmap_bootstrap(vm_paddr_t firstaddr)
*/
SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
/*
* KPTmap is used by pmap_kextract().
*/
SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES)
for (i = 0; i < NKPT; i++)
KPTD[i] = (KPTphys + (i << PAGE_SHIFT)) | PG_RW | PG_V;
/*
* Adjust the start of the KPTD and KPTmap so that the implementation
* of pmap_kextract() and pmap_growkernel() can be made simpler.
*/
KPTD -= KPTDI;
KPTmap -= i386_btop(KPTDI << PDRSHIFT);
/*
* ptemap is used for pmap_pte_quick
*/
@ -1839,6 +1855,7 @@ pmap_growkernel(vm_offset_t addr)
vm_page_t nkpg;
pd_entry_t newpdir;
pt_entry_t *pde;
boolean_t updated_PTD;
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
if (kernel_vm_end == 0) {
@ -1878,14 +1895,20 @@ pmap_growkernel(vm_offset_t addr)
pmap_zero_page(nkpg);
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
pdir_pde(PTD, kernel_vm_end) = newpdir;
pdir_pde(KPTD, kernel_vm_end) = newpdir;
updated_PTD = FALSE;
mtx_lock_spin(&allpmaps_lock);
LIST_FOREACH(pmap, &allpmaps, pm_list) {
if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
PG_FRAME))
updated_PTD = TRUE;
pde = pmap_pde(pmap, kernel_vm_end);
pde_store(pde, newpdir);
}
mtx_unlock_spin(&allpmaps_lock);
KASSERT(updated_PTD,
("pmap_growkernel: current page table is not in allpmaps"));
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;

View File

@ -265,6 +265,16 @@ pte_load_store_ma(pt_entry_t *ptep, pt_entry_t v)
#define pde_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte)
#elif !defined(XEN)
/*
* KPTmap is a linear mapping of the kernel page table. It differs from the
* recursive mapping in two ways: (1) it only provides access to kernel page
* table pages, and not user page table pages, and (2) it provides access to
* a kernel page table page after the corresponding virtual addresses have
* been promoted to a 2/4MB page mapping.
*/
extern pt_entry_t *KPTmap;
/*
* Routine: pmap_kextract
* Function:
@ -279,10 +289,17 @@ pmap_kextract(vm_offset_t va)
if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
} else {
pa = *vtopte(va);
/*
* Beware of a concurrent promotion that changes the PDE at
* this point! For example, vtopte() must not be used to
* access the PTE because it would use the new PDE. It is,
* however, safe to use the old PDE because the page table
* page is preserved by the promotion.
*/
pa = KPTmap[i386_btop(va)];
pa = (pa & PG_FRAME) | (va & PAGE_MASK);
}
return pa;
return (pa);
}
#define PT_UPDATES_FLUSH()