Make pte_load_store() an atomic operation in all cases, not just i386 PAE.

Restructure pmap_enter() to prevent the loss of a page modified (PG_M) bit
in a race between processors.  (This restructuring assumes the newly atomic
pte_load_store() for correct operation.)

Reviewed by: tegge@
PR: i386/61852
This commit is contained in:
Alan Cox 2004-10-08 08:23:43 +00:00
parent e3240372a7
commit aced26ce6e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=136252
4 changed files with 70 additions and 22 deletions

View File

@ -1839,7 +1839,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_paddr_t opa;
pd_entry_t ptepde;
pt_entry_t origpte, newpte;
vm_page_t mpte;
vm_page_t mpte, om;
va = trunc_page(va);
#ifdef PMAP_DIAGNOSTIC
@ -1881,6 +1881,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
panic("pmap_enter: invalid page directory va=%#lx\n", va);
pa = VM_PAGE_TO_PHYS(m);
om = NULL;
origpte = *pte;
opa = origpte & PG_FRAME;
@ -1921,8 +1922,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* so we go ahead and sense modify status.
*/
if (origpte & PG_MANAGED) {
if ((origpte & PG_M) && pmap_track_modified(va))
vm_page_dirty(m);
om = m;
pa |= PG_MANAGED;
}
goto validate;
@ -1933,10 +1933,17 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
err = pmap_remove_pte(pmap, pte, va, ptepde);
if (origpte & PG_W)
pmap->pm_stats.wired_count--;
if (origpte & PG_MANAGED) {
om = PHYS_TO_VM_PAGE(opa);
err = pmap_remove_entry(pmap, om, va, ptepde);
} else
err = pmap_unuse_pt(pmap, va, ptepde);
if (err)
panic("pmap_enter: pte vanished, va: 0x%lx", va);
}
} else
pmap->pm_stats.resident_count++;
/*
* Enter on the PV list if part of our managed memory. Note that we
@ -1952,7 +1959,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Increment counters
*/
pmap->pm_stats.resident_count++;
if (wired)
pmap->pm_stats.wired_count++;
@ -1977,7 +1983,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* to update the pte.
*/
if ((origpte & ~(PG_M|PG_A)) != newpte) {
pte_store(pte, newpte | PG_A);
if (origpte & PG_MANAGED) {
origpte = pte_load_store(pte, newpte | PG_A);
if ((origpte & PG_M) && pmap_track_modified(va))
vm_page_dirty(om);
if (origpte & PG_A)
vm_page_flag_set(om, PG_REFERENCED);
} else
pte_store(pte, newpte | PG_A);
if (origpte) {
pmap_invalidate_page(pmap, va);
}

View File

@ -194,15 +194,25 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
{
pt_entry_t r;
r = *ptep;
*ptep = pte;
__asm __volatile(
"xchgq %0,%1"
: "=m" (*ptep),
"=r" (r)
: "1" (pte),
"m" (*ptep));
return (r);
}
#define pte_load_clear(pte) atomic_readandclear_long(pte)
#define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
static __inline void
pte_store(pt_entry_t *ptep, pt_entry_t pte)
{
*ptep = pte;
}
#define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
#define pde_store(pdep, pde) pte_store((pdep), (pde))

View File

@ -1894,7 +1894,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
register pt_entry_t *pte;
vm_paddr_t opa;
pt_entry_t origpte, newpte;
vm_page_t mpte;
vm_page_t mpte, om;
va &= PG_FRAME;
#ifdef PMAP_DIAGNOSTIC
@ -1939,6 +1939,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
pa = VM_PAGE_TO_PHYS(m);
om = NULL;
origpte = *pte;
opa = origpte & PG_FRAME;
@ -1986,8 +1987,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* so we go ahead and sense modify status.
*/
if (origpte & PG_MANAGED) {
if ((origpte & PG_M) && pmap_track_modified(va))
vm_page_dirty(m);
om = m;
pa |= PG_MANAGED;
}
goto validate;
@ -1998,10 +1998,17 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
err = pmap_remove_pte(pmap, pte, va);
if (origpte & PG_W)
pmap->pm_stats.wired_count--;
if (origpte & PG_MANAGED) {
om = PHYS_TO_VM_PAGE(opa);
err = pmap_remove_entry(pmap, om, va);
} else
err = pmap_unuse_pt(pmap, va);
if (err)
panic("pmap_enter: pte vanished, va: 0x%x", va);
}
} else
pmap->pm_stats.resident_count++;
/*
* Enter on the PV list if part of our managed memory. Note that we
@ -2017,7 +2024,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Increment counters
*/
pmap->pm_stats.resident_count++;
if (wired)
pmap->pm_stats.wired_count++;
@ -2040,7 +2046,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* to update the pte.
*/
if ((origpte & ~(PG_M|PG_A)) != newpte) {
pte_store(pte, newpte | PG_A);
if (origpte & PG_MANAGED) {
origpte = pte_load_store(pte, newpte | PG_A);
if ((origpte & PG_M) && pmap_track_modified(va))
vm_page_dirty(om);
if (origpte & PG_A)
vm_page_flag_set(om, PG_REFERENCED);
} else
pte_store(pte, newpte | PG_A);
if (origpte) {
pmap_invalidate_page(pmap, va);
}

View File

@ -236,6 +236,8 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t v)
#define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
#else /* PAE */
static __inline pt_entry_t
@ -252,17 +254,27 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
{
pt_entry_t r;
r = *ptep;
*ptep = pte;
__asm __volatile(
"xchgl %0,%1"
: "=m" (*ptep),
"=r" (r)
: "1" (pte),
"m" (*ptep));
return (r);
}
#define pte_load_clear(pte) atomic_readandclear_int(pte)
static __inline void
pte_store(pt_entry_t *ptep, pt_entry_t pte)
{
*ptep = pte;
}
#endif /* PAE */
#define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
#define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
#define pde_store(pdep, pde) pte_store((pdep), (pde))