Tidy up pmap_copy(). Notably, deindent the innermost loop by making a

simple change to the control flow.  Replace an unnecessary test by a
KASSERT.  Add a comment explaining an obscure test.

Reviewed by:	kib, markj
MFC after:	3 weeks
Differential Revision:	https://reviews.freebsd.org/D20812
This commit is contained in:
Alan Cox 2019-07-01 22:00:42 +00:00
parent a4e0b5a471
commit b6ce9ba9c3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=349585

View File

@ -6345,18 +6345,18 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* *
* This routine is only advisory and need not do anything. * This routine is only advisory and need not do anything.
*/ */
void void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr) vm_offset_t src_addr)
{ {
struct rwlock *lock; struct rwlock *lock;
struct spglist free; struct spglist free;
vm_offset_t addr; pml4_entry_t *pml4e;
vm_offset_t end_addr = src_addr + len; pdp_entry_t *pdpe;
vm_offset_t va_next; pd_entry_t *pde, srcptepaddr;
pt_entry_t *dst_pte, PG_A, PG_M, PG_V, ptetemp, *src_pte;
vm_offset_t addr, end_addr, va_next;
vm_page_t dst_pdpg, dstmpte, srcmpte; vm_page_t dst_pdpg, dstmpte, srcmpte;
pt_entry_t PG_A, PG_M, PG_V;
if (dst_addr != src_addr) if (dst_addr != src_addr)
return; return;
@ -6375,6 +6375,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (pmap_emulate_ad_bits(dst_pmap)) if (pmap_emulate_ad_bits(dst_pmap))
return; return;
end_addr = src_addr + len;
lock = NULL; lock = NULL;
if (dst_pmap < src_pmap) { if (dst_pmap < src_pmap) {
PMAP_LOCK(dst_pmap); PMAP_LOCK(dst_pmap);
@ -6389,11 +6390,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
PG_V = pmap_valid_bit(dst_pmap); PG_V = pmap_valid_bit(dst_pmap);
for (addr = src_addr; addr < end_addr; addr = va_next) { for (addr = src_addr; addr < end_addr; addr = va_next) {
pt_entry_t *src_pte, *dst_pte;
pml4_entry_t *pml4e;
pdp_entry_t *pdpe;
pd_entry_t srcptepaddr, *pde;
KASSERT(addr < UPT_MIN_ADDRESS, KASSERT(addr < UPT_MIN_ADDRESS,
("pmap_copy: invalid to pmap_copy page tables")); ("pmap_copy: invalid to pmap_copy page tables"));
@ -6435,7 +6431,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
PMAP_ENTER_NORECLAIM, &lock))) { PMAP_ENTER_NORECLAIM, &lock))) {
*pde = srcptepaddr & ~PG_W; *pde = srcptepaddr & ~PG_W;
pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE); pmap_resident_count_inc(dst_pmap, NBPDR /
PAGE_SIZE);
atomic_add_long(&pmap_pde_mappings, 1); atomic_add_long(&pmap_pde_mappings, 1);
} else } else
dst_pdpg->wire_count--; dst_pdpg->wire_count--;
@ -6453,58 +6450,54 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr); src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
src_pte = &src_pte[pmap_pte_index(addr)]; src_pte = &src_pte[pmap_pte_index(addr)];
dstmpte = NULL; dstmpte = NULL;
while (addr < va_next) { for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
pt_entry_t ptetemp;
ptetemp = *src_pte; ptetemp = *src_pte;
/* /*
* we only virtual copy managed pages * We only virtual copy managed pages.
*/ */
if ((ptetemp & PG_MANAGED) != 0) { if ((ptetemp & PG_MANAGED) == 0)
if (dstmpte != NULL && continue;
dstmpte->pindex == pmap_pde_pindex(addr))
dstmpte->wire_count++; if (dstmpte != NULL) {
else if ((dstmpte = pmap_allocpte(dst_pmap, KASSERT(dstmpte->pindex ==
addr, NULL)) == NULL) pmap_pde_pindex(addr),
goto out; ("dstmpte pindex/addr mismatch"));
dst_pte = (pt_entry_t *) dstmpte->wire_count++;
PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); } else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
dst_pte = &dst_pte[pmap_pte_index(addr)]; NULL)) == NULL)
if (*dst_pte == 0 && goto out;
pmap_try_insert_pv_entry(dst_pmap, addr, dst_pte = (pt_entry_t *)
PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
&lock)) { dst_pte = &dst_pte[pmap_pte_index(addr)];
if (*dst_pte == 0 &&
pmap_try_insert_pv_entry(dst_pmap, addr,
PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) {
/*
* Clear the wired, modified, and accessed
* (referenced) bits during the copy.
*/
*dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
pmap_resident_count_inc(dst_pmap, 1);
} else {
SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr, dstmpte,
&free)) {
/* /*
* Clear the wired, modified, and * Although "addr" is not mapped,
* accessed (referenced) bits * paging-structure caches could
* during the copy. * nonetheless have entries that refer
* to the freed page table pages.
* Invalidate those entries.
*/ */
*dst_pte = ptetemp & ~(PG_W | PG_M | pmap_invalidate_page(dst_pmap, addr);
PG_A); vm_page_free_pages_toq(&free, true);
pmap_resident_count_inc(dst_pmap, 1);
} else {
SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr,
dstmpte, &free)) {
/*
* Although "addr" is not
* mapped, paging-structure
* caches could nonetheless
* have entries that refer to
* the freed page table pages.
* Invalidate those entries.
*/
pmap_invalidate_page(dst_pmap,
addr);
vm_page_free_pages_toq(&free,
true);
}
goto out;
} }
if (dstmpte->wire_count >= srcmpte->wire_count) goto out;
break;
} }
addr += PAGE_SIZE; /* Have we copied all of the valid mappings? */
src_pte++; if (dstmpte->wire_count >= srcmpte->wire_count)
break;
} }
} }
out: out: