Fix issue reported by alc :

MIPS doesn't really need to use atomic_cmpset_int() in situations like
 this because the software dirty bit emulation in trap.c acquires
 the pmap lock.  Atomics like this appear to be a carryover from i386
 where the hardware-managed TLB might concurrently set the modified bit.

Reviewed by:	alc
This commit is contained in:
Jayachandran C. 2010-08-06 09:25:42 +00:00
parent 9968a42675
commit b1f19c11b6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=210922

View File

@ -1716,7 +1716,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
vm_page_lock_queues();
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
pt_entry_t pbits, obits;
pt_entry_t pbits;
vm_page_t m;
vm_paddr_t pa;
@ -1745,8 +1745,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
/* Skip invalid PTEs */
if (!pte_test(pte, PTE_V))
continue;
retry:
obits = pbits = *pte;
pbits = *pte;
pa = TLBLO_PTE_TO_PA(pbits);
if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
m = PHYS_TO_VM_PAGE(pa);
@ -1757,8 +1756,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pte_set(&pbits, PTE_RO);
if (pbits != *pte) {
if (!atomic_cmpset_int((u_int *)pte, obits, pbits))
goto retry;
*pte = pbits;
pmap_update_page(pmap, sva, pbits);
}
}