Implement pmap_mincore for arm. Now programs using it don't cause a

flood of console messages.

Reviewed by:	alc@
This commit is contained in:
Warner Losh 2012-05-21 06:56:26 +00:00
parent 50c9ba2c76
commit c69a673406

View File

@ -4557,9 +4557,59 @@ pmap_remove_write(vm_page_t m)
int
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
printf("pmap_mincore()\n");
return (0);
struct l2_bucket *l2b;
pt_entry_t *ptep, pte;
vm_paddr_t pa;
vm_page_t m;
int val;
boolean_t managed;
PMAP_LOCK(pmap);
retry:
l2b = pmap_get_l2_bucket(pmap, addr);
if (l2b == NULL) {
val = 0;
goto out;
}
ptep = &l2b->l2b_kva[l2pte_index(addr)];
pte = *ptep;
if (!l2pte_valid(pte)) {
val = 0;
goto out;
}
val = MINCORE_INCORE;
if (pte & L2_S_PROT_W)
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
managed = false;
pa = l2pte_pa(pte);
m = PHYS_TO_VM_PAGE(pa);
if (m != NULL && !(m->oflags & VPO_UNMANAGED))
managed = true;
if (managed) {
/*
* the ARM pmap tries to maintain a per-mapping
* reference bit. The trouble is that it's kept in
* the PV entry, not the PTE, so it's costly to access
* here. You would need to acquire the page queues
* lock, call pmap_find_pv(), and introduce a custom
* version of vm_page_pa_tryrelock() that releases and
* reacquires the page queues lock. In the end, I
* doubt it's worthwhile. This may falsely report
* the given address as referenced.
*/
if ((m->md.pvh_attrs & PVF_REF) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
out:
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}