On a failed fcmpset don't pointlessly repeat tests

In a few places, on a failed compare-and-set, both the amd64 pmap and
the arm64 pmap repeat tests on bits that won't change state while the
pmap is locked.  Eliminate some of these unnecessary tests.

Reviewed by:	andrew, kib, markj
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D31014
This commit is contained in:
Alan Cox 2021-07-04 00:20:42 -05:00
parent 348c41d181
commit e41fde3ed7
2 changed files with 13 additions and 13 deletions

View File

@ -8459,7 +8459,7 @@ pmap_remove_write(vm_page_t m)
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
pa_to_pvh(VM_PAGE_TO_PHYS(m));
rw_wlock(lock);
retry_pv_loop:
retry:
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
pmap = PV_PMAP(pv);
if (!PMAP_TRYLOCK(pmap)) {
@ -8469,7 +8469,7 @@ pmap_remove_write(vm_page_t m)
rw_wlock(lock);
if (pvh_gen != pvh->pv_gen) {
PMAP_UNLOCK(pmap);
goto retry_pv_loop;
goto retry;
}
}
PG_RW = pmap_rw_bit(pmap);
@ -8493,7 +8493,7 @@ pmap_remove_write(vm_page_t m)
if (pvh_gen != pvh->pv_gen ||
md_gen != m->md.pv_gen) {
PMAP_UNLOCK(pmap);
goto retry_pv_loop;
goto retry;
}
}
PG_M = pmap_modified_bit(pmap);
@ -8503,12 +8503,11 @@ pmap_remove_write(vm_page_t m)
("pmap_remove_write: found a 2mpage in page %p's pv list",
m));
pte = pmap_pde_to_pte(pde, pv->pv_va);
retry:
oldpte = *pte;
if (oldpte & PG_RW) {
if (!atomic_cmpset_long(pte, oldpte, oldpte &
while (!atomic_fcmpset_long(pte, &oldpte, oldpte &
~(PG_RW | PG_M)))
goto retry;
cpu_spinwait();
if ((oldpte & PG_M) != 0)
vm_page_dirty(m);
pmap_invalidate_page(pmap, pv->pv_va);

View File

@ -3223,10 +3223,12 @@ pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
* Return if the L2 entry already has the desired access restrictions
* in place.
*/
retry:
if ((old_l2 & mask) == nbits)
return;
while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
cpu_spinwait();
/*
* When a dirty read/write superpage mapping is write protected,
* update the dirty field of each of the superpage's constituent 4KB
@ -3240,9 +3242,6 @@ pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
vm_page_dirty(mt);
}
if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
goto retry;
/*
* Since a promotion must break the 4KB page mappings before making
* the 2MB page mapping, a pmap_invalidate_page() suffices.
@ -3334,7 +3333,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
sva += L3_SIZE) {
l3 = pmap_load(l3p);
retry:
/*
* Go to the next L3 entry if the current one is
* invalid or already has the desired access
@ -3351,6 +3350,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
continue;
}
while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) |
nbits))
cpu_spinwait();
/*
* When a dirty read/write mapping is write protected,
* update the page's dirty field.
@ -3360,8 +3363,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_pte_dirty(pmap, l3))
vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
goto retry;
if (va == va_next)
va = sva;
}