Use atomic ops in pmap_clear_ptes() to prevent SMP races that could

result in the loss of an accessed or modified bit from the pte.

In collaboration with: tegge@

MT5 candidate
This commit is contained in:
Alan Cox 2004-09-08 18:58:29 +00:00
parent 51eb0765c6
commit e232eb8288
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=134960
2 changed files with 20 additions and 7 deletions

View File

@ -203,7 +203,7 @@ static caddr_t crashdumpmap;
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
static void pmap_clear_ptes(vm_page_t m, int bit);
static void pmap_clear_ptes(vm_page_t m, long bit);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
vm_offset_t sva, pd_entry_t ptepde);
@ -2591,7 +2591,7 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
* Clear the given bit in each of the given page's ptes.
*/
static __inline void
pmap_clear_ptes(vm_page_t m, int bit)
pmap_clear_ptes(vm_page_t m, long bit)
{
register pv_entry_t pv;
pt_entry_t pbits, *pte;
@ -2623,15 +2623,18 @@ pmap_clear_ptes(vm_page_t m, int bit)
PMAP_LOCK(pv->pv_pmap);
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
retry:
pbits = *pte;
if (pbits & bit) {
if (bit == PG_RW) {
if (!atomic_cmpset_long(pte, pbits,
pbits & ~(PG_RW | PG_M)))
goto retry;
if (pbits & PG_M) {
vm_page_dirty(m);
}
pte_store(pte, pbits & ~(PG_M|PG_RW));
} else {
pte_store(pte, pbits & ~bit);
atomic_clear_long(pte, bit);
}
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}

View File

@ -2694,7 +2694,9 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
}
/*
* Clear the given bit in each of the given page's ptes.
* Clear the given bit in each of the given page's ptes. The bit is
* expressed as a 32-bit mask. Consequently, if the pte is 64 bits in
* size, only a bit within the least significant 32 can be cleared.
*/
static __inline void
pmap_clear_ptes(vm_page_t m, int bit)
@ -2730,15 +2732,23 @@ pmap_clear_ptes(vm_page_t m, int bit)
PMAP_LOCK(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
retry:
pbits = *pte;
if (pbits & bit) {
if (bit == PG_RW) {
/*
* Regardless of whether a pte is 32 or 64 bits
* in size, PG_RW and PG_M are among the least
* significant 32 bits.
*/
if (!atomic_cmpset_int((u_int *)pte, pbits,
pbits & ~(PG_RW | PG_M)))
goto retry;
if (pbits & PG_M) {
vm_page_dirty(m);
}
pte_store(pte, pbits & ~(PG_M|PG_RW));
} else {
pte_store(pte, pbits & ~bit);
atomic_clear_int((u_int *)pte, bit);
}
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}