Close a race involving the OEA64 scratchpage. When the scratch page's

physical address is changed, there is a brief window during which its PTE
is invalid. Since moea64_set_scratchpage_pa() does not and cannot hold
the page table lock, it was possible for another CPU to insert a new PTE
into the scratch page's PTEG slot during this interval, corrupting both
mappings.

Solve this by creating a new flag, LPTE_LOCKED, such that
moea64_pte_insert will avoid claiming locked PTEG slots even if they
are invalid. This change also incorporates some additional paranoia
added to solve things I thought might be this bug.

Reported by:	linimon
This commit is contained in:
nwhitehorn 2010-02-24 00:54:37 +00:00
parent 80b158ba27
commit 853eaef79a
2 changed files with 15 additions and 10 deletions

View File

@ -227,6 +227,7 @@ TLBIE(pmap_t pmap, vm_offset_t va) {
#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
#define VSID_TO_SR(vsid) ((vsid) & 0xf)
#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
#define VSID_HASH_MASK 0x0000007fffffffffULL
#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */
#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */
@ -458,9 +459,9 @@ MMU_DEF(oea64_bridge_mmu);
static __inline u_int
va_to_pteg(uint64_t vsid, vm_offset_t addr)
{
u_int hash;
uint64_t hash;
hash = vsid ^ (((uint64_t)addr & ADDR_PIDX) >>
hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
ADDR_PIDX_SHFT);
return (hash & moea64_pteg_mask);
}
@ -979,6 +980,7 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
moea64_scratchpage_va[i],&j);
moea64_scratchpage_pte[i] = moea64_pvo_to_pte(
moea64_scratchpage_pvo[i],j);
moea64_scratchpage_pte[i]->pte_hi |= LPTE_LOCKED;
UNLOCK_TABLE();
}
@ -1090,8 +1092,10 @@ moea64_zero_page(mmu_t mmu, vm_page_t m)
static __inline
void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
(~LPTE_WIMG & ~LPTE_RPGN);
~(LPTE_WIMG | LPTE_RPGN);
moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
moea64_calc_wimg(pa) | (uint64_t)pa;
@ -2151,18 +2155,16 @@ moea64_pvo_remove(struct pvo_entry *pvo, int pteidx)
static __inline int
moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
{
int pteidx;
/*
* We can find the actual pte entry without searching by grabbing
* the PTEG index from 3 unused bits in pte_lo[11:9] and by
* the PTEG index from 3 unused bits in pvo_vaddr and by
* noticing the HID bit.
*/
pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
pteidx ^= moea64_pteg_mask * 8;
ptegidx ^= moea64_pteg_mask;
return (pteidx);
return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo));
}
static struct pvo_entry *
@ -2259,7 +2261,8 @@ moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt)
* First try primary hash.
*/
for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
if ((pt->pte_hi & LPTE_VALID) == 0) {
if ((pt->pte_hi & LPTE_VALID) == 0 &&
(pt->pte_hi & LPTE_LOCKED) == 0) {
pvo_pt->pte_hi &= ~LPTE_HID;
moea64_pte_set(pt, pvo_pt);
return (i);
@ -2272,7 +2275,8 @@ moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt)
ptegidx ^= moea64_pteg_mask;
for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
if ((pt->pte_hi & LPTE_VALID) == 0) {
if ((pt->pte_hi & LPTE_VALID) == 0 &&
(pt->pte_hi & LPTE_LOCKED) == 0) {
pvo_pt->pte_hi |= LPTE_HID;
moea64_pte_set(pt, pvo_pt);
return (i);

View File

@ -95,6 +95,7 @@ struct lpteg {
/* High quadword: */
#define LPTE_VSID_SHIFT 12
#define LPTE_API 0x0000000000000F80ULL
#define LPTE_LOCKED 0x0000000000000008ULL
#define LPTE_BIG 0x0000000000000004ULL /* 4kb/16Mb page */
#define LPTE_HID 0x0000000000000002ULL
#define LPTE_VALID 0x0000000000000001ULL