Remove page locking from pmap_mincore().
After r352110 the page lock no longer protects a page's identity, so there is no purpose in locking the page in pmap_mincore(). Instead, if vm.mincore_mapped is set to the non-default value of 0, re-lookup the page after acquiring its object lock, which holds the page's identity stable. The change removes the last callers of vm_page_pa_tryrelock(), so remove it. Reviewed by: kib Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D21823
This commit is contained in:
parent
30738a349d
commit
01cef4caa7
@ -8433,10 +8433,12 @@ pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
|
||||
}
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
pd_entry_t *pdep;
|
||||
pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
|
||||
@ -8449,7 +8451,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
PG_RW = pmap_rw_bit(pmap);
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
pdep = pmap_pde(pmap, addr);
|
||||
if (pdep != NULL && (*pdep & PG_V)) {
|
||||
if (*pdep & PG_PS) {
|
||||
@ -8478,11 +8479,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
|
||||
(pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (val);
|
||||
}
|
||||
|
@ -4142,10 +4142,12 @@ pmap_remove_write(vm_page_t m)
|
||||
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
struct l2_bucket *l2b;
|
||||
pt_entry_t *ptep, pte;
|
||||
@ -4155,17 +4157,16 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
boolean_t managed;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
l2b = pmap_get_l2_bucket(pmap, addr);
|
||||
if (l2b == NULL) {
|
||||
val = 0;
|
||||
goto out;
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (0);
|
||||
}
|
||||
ptep = &l2b->l2b_kva[l2pte_index(addr)];
|
||||
pte = *ptep;
|
||||
if (!l2pte_valid(pte)) {
|
||||
val = 0;
|
||||
goto out;
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (0);
|
||||
}
|
||||
val = MINCORE_INCORE;
|
||||
if (pte & L2_S_PROT_W)
|
||||
@ -4192,12 +4193,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
}
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
out:
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (val);
|
||||
}
|
||||
|
@ -6217,10 +6217,12 @@ pmap_activate(struct thread *td)
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the pmap work for mincore.
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
pt1_entry_t *pte1p, pte1;
|
||||
pt2_entry_t *pte2p, pte2;
|
||||
@ -6229,7 +6231,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
int val;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
pte1p = pmap_pte1(pmap, addr);
|
||||
pte1 = pte1_load(pte1p);
|
||||
if (pte1_is_section(pte1)) {
|
||||
@ -6257,11 +6258,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
}
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (val);
|
||||
}
|
||||
|
@ -5607,10 +5607,12 @@ pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
|
||||
}
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
pt_entry_t *pte, tpte;
|
||||
vm_paddr_t mask, pa;
|
||||
@ -5618,8 +5620,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
bool managed;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
val = 0;
|
||||
pte = pmap_pte(pmap, addr, &lvl);
|
||||
if (pte != NULL) {
|
||||
tpte = pmap_load(pte);
|
||||
@ -5649,18 +5649,16 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
|
||||
|
||||
pa = (tpte & ~ATTR_MASK) | (addr & mask);
|
||||
} else
|
||||
} else {
|
||||
managed = false;
|
||||
val = 0;
|
||||
}
|
||||
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
|
||||
return (val);
|
||||
}
|
||||
|
||||
|
@ -5700,10 +5700,12 @@ __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode)
|
||||
}
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
static int
|
||||
__CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
__CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
pd_entry_t pde;
|
||||
pt_entry_t pte;
|
||||
@ -5711,7 +5713,6 @@ __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
int val;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
pde = *pmap_pde(pmap, addr);
|
||||
if (pde != 0) {
|
||||
if ((pde & PG_PS) != 0) {
|
||||
@ -5740,11 +5741,8 @@ __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
|
||||
(pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (val);
|
||||
}
|
||||
|
@ -632,10 +632,10 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
|
||||
}
|
||||
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
|
||||
return (pmap_methods_ptr->pm_mincore(pmap, addr, locked_pa));
|
||||
return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2362,7 +2362,7 @@ kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry,
|
||||
vm_object_t obj, tobj;
|
||||
vm_page_t m, m_adv;
|
||||
vm_offset_t addr;
|
||||
vm_paddr_t locked_pa;
|
||||
vm_paddr_t pa;
|
||||
vm_pindex_t pi, pi_adv, pindex;
|
||||
|
||||
*super = false;
|
||||
@ -2370,7 +2370,7 @@ kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry,
|
||||
if (vmmap_skip_res_cnt)
|
||||
return;
|
||||
|
||||
locked_pa = 0;
|
||||
pa = 0;
|
||||
obj = entry->object.vm_object;
|
||||
addr = entry->start;
|
||||
m_adv = NULL;
|
||||
@ -2400,8 +2400,7 @@ kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry,
|
||||
m_adv = NULL;
|
||||
if (m->psind != 0 && addr + pagesizes[1] <= entry->end &&
|
||||
(addr & (pagesizes[1] - 1)) == 0 &&
|
||||
(pmap_mincore(map->pmap, addr, &locked_pa) &
|
||||
MINCORE_SUPER) != 0) {
|
||||
(pmap_mincore(map->pmap, addr, &pa) & MINCORE_SUPER) != 0) {
|
||||
*super = true;
|
||||
pi_adv = atop(pagesizes[1]);
|
||||
} else {
|
||||
@ -2417,7 +2416,6 @@ kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry,
|
||||
*resident_count += pi_adv;
|
||||
next:;
|
||||
}
|
||||
PA_UNLOCK_COND(locked_pa);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3193,10 +3193,12 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
|
||||
}
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
pt_entry_t *ptep, pte;
|
||||
vm_paddr_t pa;
|
||||
@ -3204,12 +3206,11 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
int val;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
ptep = pmap_pte(pmap, addr);
|
||||
pte = (ptep != NULL) ? *ptep : 0;
|
||||
if (!pte_test(&pte, PTE_V)) {
|
||||
val = 0;
|
||||
goto out;
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (0);
|
||||
}
|
||||
val = MINCORE_INCORE;
|
||||
if (pte_test(&pte, PTE_D))
|
||||
@ -3229,12 +3230,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
|
||||
pte_test(&pte, PTE_MANAGED)) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
out:
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (val);
|
||||
}
|
||||
|
@ -3565,7 +3565,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
*/
|
||||
static int
|
||||
mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
vm_paddr_t *locked_pa)
|
||||
vm_paddr_t *pap)
|
||||
{
|
||||
|
||||
/* XXX: this should be implemented at some point */
|
||||
|
@ -91,7 +91,7 @@ CODE {
|
||||
}
|
||||
|
||||
static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
|
||||
vm_paddr_t *locked_pa)
|
||||
vm_paddr_t *pap)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
@ -679,7 +679,7 @@ METHOD void zero_page_area {
|
||||
*
|
||||
* @param _pmap physical map
|
||||
* @param _addr page virtual address
|
||||
* @param _locked_pa page physical address
|
||||
* @param _pa page physical address
|
||||
*
|
||||
* @retval 0 no result
|
||||
* @retval non-zero mincore(2) flag values
|
||||
@ -688,7 +688,7 @@ METHOD int mincore {
|
||||
mmu_t _mmu;
|
||||
pmap_t _pmap;
|
||||
vm_offset_t _addr;
|
||||
vm_paddr_t *_locked_pa;
|
||||
vm_paddr_t *_pap;
|
||||
} DEFAULT mmu_null_mincore;
|
||||
|
||||
|
||||
|
@ -382,11 +382,11 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
|
||||
}
|
||||
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
|
||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
|
||||
return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
|
||||
return (MMU_MINCORE(mmu_obj, pmap, addr, pap));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -4228,10 +4228,12 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
|
||||
}
|
||||
|
||||
/*
|
||||
* perform the pmap work for mincore
|
||||
* Perform the pmap work for mincore(2). If the page is not both referenced and
|
||||
* modified by this pmap, returns its physical address so that the caller can
|
||||
* find other mappings.
|
||||
*/
|
||||
int
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
pt_entry_t *l2, *l3, tpte;
|
||||
vm_paddr_t pa;
|
||||
@ -4239,10 +4241,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
bool managed;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
retry:
|
||||
managed = false;
|
||||
val = 0;
|
||||
|
||||
l2 = pmap_l2(pmap, addr);
|
||||
if (l2 != NULL && ((tpte = pmap_load(l2)) & PTE_V) != 0) {
|
||||
if ((tpte & PTE_RWX) != 0) {
|
||||
@ -4251,8 +4249,10 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
} else {
|
||||
l3 = pmap_l2_to_l3(l2, addr);
|
||||
tpte = pmap_load(l3);
|
||||
if ((tpte & PTE_V) == 0)
|
||||
goto done;
|
||||
if ((tpte & PTE_V) == 0) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (0);
|
||||
}
|
||||
pa = PTE_TO_PHYS(tpte) | (addr & L3_OFFSET);
|
||||
val = MINCORE_INCORE;
|
||||
}
|
||||
@ -4262,16 +4262,14 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
if ((tpte & PTE_A) != 0)
|
||||
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
|
||||
managed = (tpte & PTE_SW_MANAGED) == PTE_SW_MANAGED;
|
||||
} else {
|
||||
managed = false;
|
||||
val = 0;
|
||||
}
|
||||
|
||||
done:
|
||||
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
|
||||
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
|
||||
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
|
||||
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
|
||||
goto retry;
|
||||
} else
|
||||
PA_UNLOCK_COND(*locked_pa);
|
||||
*pap = pa;
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (val);
|
||||
}
|
||||
|
@ -2241,7 +2241,7 @@ pmap_remove_write(vm_page_t m)
|
||||
}
|
||||
|
||||
int
|
||||
pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa)
|
||||
pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *pap)
|
||||
{
|
||||
|
||||
/* TODO; */
|
||||
|
@ -144,8 +144,7 @@ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
|
||||
boolean_t pmap_is_referenced(vm_page_t m);
|
||||
boolean_t pmap_is_valid_memattr(pmap_t, vm_memattr_t);
|
||||
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
||||
int pmap_mincore(pmap_t pmap, vm_offset_t addr,
|
||||
vm_paddr_t *locked_pa);
|
||||
int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap);
|
||||
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
|
||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
|
||||
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
|
||||
|
@ -779,21 +779,16 @@ sys_mincore(struct thread *td, struct mincore_args *uap)
|
||||
int
|
||||
kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
|
||||
{
|
||||
vm_offset_t addr, first_addr;
|
||||
vm_offset_t end, cend;
|
||||
pmap_t pmap;
|
||||
vm_map_t map;
|
||||
int error = 0;
|
||||
int vecindex, lastvecindex;
|
||||
vm_map_entry_t current;
|
||||
vm_map_entry_t entry;
|
||||
vm_map_entry_t current, entry;
|
||||
vm_object_t object;
|
||||
vm_paddr_t locked_pa;
|
||||
vm_offset_t addr, cend, end, first_addr;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
vm_pindex_t pindex;
|
||||
int mincoreinfo;
|
||||
int error, lastvecindex, mincoreinfo, vecindex;
|
||||
unsigned int timestamp;
|
||||
boolean_t locked;
|
||||
|
||||
/*
|
||||
* Make sure that the addresses presented are valid for user
|
||||
@ -836,7 +831,7 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
|
||||
* ignore submaps (for now) or null objects
|
||||
*/
|
||||
if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
|
||||
current->object.vm_object == NULL)
|
||||
current->object.vm_object == NULL)
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -849,50 +844,45 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
|
||||
if (cend > end)
|
||||
cend = end;
|
||||
|
||||
/*
|
||||
* scan this entry one page at a time
|
||||
*/
|
||||
while (addr < cend) {
|
||||
for (; addr < cend; addr += PAGE_SIZE) {
|
||||
/*
|
||||
* Check pmap first, it is likely faster, also
|
||||
* it can provide info as to whether we are the
|
||||
* one referencing or modifying the page.
|
||||
*/
|
||||
object = NULL;
|
||||
locked_pa = 0;
|
||||
retry:
|
||||
m = NULL;
|
||||
mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
|
||||
object = NULL;
|
||||
retry:
|
||||
pa = 0;
|
||||
mincoreinfo = pmap_mincore(pmap, addr, &pa);
|
||||
if (mincore_mapped) {
|
||||
/*
|
||||
* We only care about this pmap's
|
||||
* mapping of the page, if any.
|
||||
*/
|
||||
if (locked_pa != 0) {
|
||||
vm_page_unlock(PHYS_TO_VM_PAGE(
|
||||
locked_pa));
|
||||
}
|
||||
} else if (locked_pa != 0) {
|
||||
;
|
||||
} else if (pa != 0) {
|
||||
/*
|
||||
* The page is mapped by this process but not
|
||||
* both accessed and modified. It is also
|
||||
* managed. Acquire the object lock so that
|
||||
* other mappings might be examined.
|
||||
* other mappings might be examined. The page's
|
||||
* identity may change at any point before its
|
||||
* object lock is acquired, so re-validate if
|
||||
* necessary.
|
||||
*/
|
||||
m = PHYS_TO_VM_PAGE(locked_pa);
|
||||
if (m->object != object) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
while (object == NULL || m->object != object) {
|
||||
if (object != NULL)
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = m->object;
|
||||
locked = VM_OBJECT_TRYWLOCK(object);
|
||||
vm_page_unlock(m);
|
||||
if (!locked) {
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_page_lock(m);
|
||||
object = (vm_object_t)atomic_load_ptr(
|
||||
&m->object);
|
||||
if (object == NULL)
|
||||
goto retry;
|
||||
}
|
||||
} else
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
if (pa != pmap_extract(pmap, addr))
|
||||
goto retry;
|
||||
KASSERT(vm_page_all_valid(m),
|
||||
("mincore: page %p is mapped but invalid",
|
||||
m));
|
||||
@ -922,11 +912,14 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
|
||||
}
|
||||
}
|
||||
if (m != NULL) {
|
||||
/* Examine other mappings to the page. */
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
|
||||
/* Examine other mappings of the page. */
|
||||
if (m->dirty == 0 && pmap_is_modified(m))
|
||||
vm_page_dirty(m);
|
||||
if (m->dirty != 0)
|
||||
mincoreinfo |= MINCORE_MODIFIED_OTHER;
|
||||
|
||||
/*
|
||||
* The first test for PGA_REFERENCED is an
|
||||
* optimization. The second test is
|
||||
@ -985,7 +978,6 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
|
||||
goto RestartScan;
|
||||
|
||||
lastvecindex = vecindex;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,10 +168,6 @@ SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
|
||||
&boot_pages, 0,
|
||||
"number of pages allocated for bootstrapping the VM system");
|
||||
|
||||
static int pa_tryrelock_restart;
|
||||
SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
|
||||
&pa_tryrelock_restart, 0, "Number of tryrelock restarts");
|
||||
|
||||
static TAILQ_HEAD(, vm_page) blacklist_head;
|
||||
static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
|
||||
SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
|
||||
@ -252,34 +248,6 @@ CTASSERT(sizeof(u_long) >= 8);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Try to acquire a physical address lock while a pmap is locked. If we
|
||||
* fail to trylock we unlock and lock the pmap directly and cache the
|
||||
* locked pa in *locked. The caller should then restart their loop in case
|
||||
* the virtual to physical mapping has changed.
|
||||
*/
|
||||
int
|
||||
vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
|
||||
{
|
||||
vm_paddr_t lockpa;
|
||||
|
||||
lockpa = *locked;
|
||||
*locked = pa;
|
||||
if (lockpa) {
|
||||
PA_LOCK_ASSERT(lockpa, MA_OWNED);
|
||||
if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
|
||||
return (0);
|
||||
PA_UNLOCK(lockpa);
|
||||
}
|
||||
if (PA_TRYLOCK(pa))
|
||||
return (0);
|
||||
PMAP_UNLOCK(pmap);
|
||||
atomic_add_int(&pa_tryrelock_restart, 1);
|
||||
PA_LOCK(pa);
|
||||
PMAP_LOCK(pmap);
|
||||
return (EAGAIN);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_set_page_size:
|
||||
*
|
||||
|
@ -607,7 +607,6 @@ void vm_page_invalid(vm_page_t m);
|
||||
void vm_page_launder(vm_page_t m);
|
||||
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
|
||||
vm_page_t vm_page_next(vm_page_t m);
|
||||
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
|
||||
void vm_page_pqbatch_drain(void);
|
||||
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
|
||||
vm_page_t vm_page_prev(vm_page_t m);
|
||||
|
Loading…
Reference in New Issue
Block a user