Implement pmap_copy(). (This includes the changes applied to the amd64
pmap_copy() in r349585.) Reviewed by: kib, markj Differential Revision: https://reviews.freebsd.org/D20790
This commit is contained in:
parent
54afd9642b
commit
b233a7ed74
@ -3875,12 +3875,153 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
* in the destination map.
|
||||
*
|
||||
* This routine is only advisory and need not do anything.
|
||||
*
|
||||
* Because the executable mappings created by this routine are copied,
|
||||
* it should not have to flush the instruction cache.
|
||||
*/
|
||||
|
||||
void
|
||||
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
vm_offset_t src_addr)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
struct spglist free;
|
||||
pd_entry_t *l0, *l1, *l2, srcptepaddr;
|
||||
pt_entry_t *dst_pte, ptetemp, *src_pte;
|
||||
vm_offset_t addr, end_addr, va_next;
|
||||
vm_page_t dst_l2pg, dstmpte, srcmpte;
|
||||
|
||||
if (dst_addr != src_addr)
|
||||
return;
|
||||
end_addr = src_addr + len;
|
||||
lock = NULL;
|
||||
if (dst_pmap < src_pmap) {
|
||||
PMAP_LOCK(dst_pmap);
|
||||
PMAP_LOCK(src_pmap);
|
||||
} else {
|
||||
PMAP_LOCK(src_pmap);
|
||||
PMAP_LOCK(dst_pmap);
|
||||
}
|
||||
for (addr = src_addr; addr < end_addr; addr = va_next) {
|
||||
l0 = pmap_l0(src_pmap, addr);
|
||||
if (pmap_load(l0) == 0) {
|
||||
va_next = (addr + L0_SIZE) & ~L0_OFFSET;
|
||||
if (va_next < addr)
|
||||
va_next = end_addr;
|
||||
continue;
|
||||
}
|
||||
l1 = pmap_l0_to_l1(l0, addr);
|
||||
if (pmap_load(l1) == 0) {
|
||||
va_next = (addr + L1_SIZE) & ~L1_OFFSET;
|
||||
if (va_next < addr)
|
||||
va_next = end_addr;
|
||||
continue;
|
||||
}
|
||||
va_next = (addr + L2_SIZE) & ~L2_OFFSET;
|
||||
if (va_next < addr)
|
||||
va_next = end_addr;
|
||||
l2 = pmap_l1_to_l2(l1, addr);
|
||||
srcptepaddr = pmap_load(l2);
|
||||
if (srcptepaddr == 0)
|
||||
continue;
|
||||
if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
|
||||
if ((addr & L2_OFFSET) != 0 ||
|
||||
addr + L2_SIZE > end_addr)
|
||||
continue;
|
||||
dst_l2pg = pmap_alloc_l2(dst_pmap, addr, NULL);
|
||||
if (dst_l2pg == NULL)
|
||||
break;
|
||||
l2 = (pd_entry_t *)
|
||||
PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_l2pg));
|
||||
l2 = &l2[pmap_l2_index(addr)];
|
||||
if (pmap_load(l2) == 0 &&
|
||||
((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
|
||||
pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
|
||||
PMAP_ENTER_NORECLAIM, &lock))) {
|
||||
(void)pmap_load_store(l2, srcptepaddr &
|
||||
~ATTR_SW_WIRED);
|
||||
pmap_resident_count_inc(dst_pmap, L2_SIZE /
|
||||
PAGE_SIZE);
|
||||
atomic_add_long(&pmap_l2_mappings, 1);
|
||||
} else
|
||||
dst_l2pg->wire_count--;
|
||||
continue;
|
||||
}
|
||||
KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
|
||||
("pmap_copy: invalid L2 entry"));
|
||||
srcptepaddr &= ~ATTR_MASK;
|
||||
srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
|
||||
KASSERT(srcmpte->wire_count > 0,
|
||||
("pmap_copy: source page table page is unused"));
|
||||
if (va_next > end_addr)
|
||||
va_next = end_addr;
|
||||
src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
|
||||
src_pte = &src_pte[pmap_l3_index(addr)];
|
||||
dstmpte = NULL;
|
||||
for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
|
||||
ptetemp = pmap_load(src_pte);
|
||||
|
||||
/*
|
||||
* We only virtual copy managed pages.
|
||||
*/
|
||||
if ((ptetemp & ATTR_SW_MANAGED) == 0)
|
||||
continue;
|
||||
|
||||
if (dstmpte != NULL) {
|
||||
KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
|
||||
("dstmpte pindex/addr mismatch"));
|
||||
dstmpte->wire_count++;
|
||||
} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
|
||||
NULL)) == NULL)
|
||||
goto out;
|
||||
dst_pte = (pt_entry_t *)
|
||||
PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
|
||||
dst_pte = &dst_pte[pmap_l3_index(addr)];
|
||||
if (pmap_load(dst_pte) == 0 &&
|
||||
pmap_try_insert_pv_entry(dst_pmap, addr,
|
||||
PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
|
||||
/*
|
||||
* Clear the wired, modified, and accessed
|
||||
* (referenced) bits during the copy.
|
||||
*
|
||||
* XXX not yet
|
||||
*/
|
||||
(void)pmap_load_store(dst_pte, ptetemp &
|
||||
~ATTR_SW_WIRED);
|
||||
pmap_resident_count_inc(dst_pmap, 1);
|
||||
} else {
|
||||
SLIST_INIT(&free);
|
||||
if (pmap_unwire_l3(dst_pmap, addr, dstmpte,
|
||||
&free)) {
|
||||
/*
|
||||
* Although "addr" is not mapped,
|
||||
* paging-structure caches could
|
||||
* nonetheless have entries that refer
|
||||
* to the freed page table pages.
|
||||
* Invalidate those entries.
|
||||
*
|
||||
* XXX redundant invalidation
|
||||
*/
|
||||
pmap_invalidate_page(dst_pmap, addr);
|
||||
vm_page_free_pages_toq(&free, true);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
/* Have we copied all of the valid mappings? */
|
||||
if (dstmpte->wire_count >= srcmpte->wire_count)
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
/*
|
||||
* XXX This barrier may not be needed because the destination pmap is
|
||||
* not active.
|
||||
*/
|
||||
dsb(ishst);
|
||||
|
||||
if (lock != NULL)
|
||||
rw_wunlock(lock);
|
||||
PMAP_UNLOCK(src_pmap);
|
||||
PMAP_UNLOCK(dst_pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user