Previously, vm_radix_remove() would panic if the radix trie didn't
contain a vm_page_t at the specified index. However, with this change, vm_radix_remove() no longer panics. Instead, it returns NULL if there is no vm_page_t at the specified index. Otherwise, it returns the vm_page_t. The motivation for this change is that it simplifies the use of radix tries in the amd64, arm64, and i386 pmap implementations. Instead of performing a lookup before every remove, the pmap can simply perform the remove. Reviewed by: kib, markj Differential Revision: https://reviews.freebsd.org/D8708
This commit is contained in:
parent
3c606f671e
commit
e94965d82e
@ -614,7 +614,6 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
|
||||
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
|
||||
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
|
||||
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
|
||||
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
|
||||
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
|
||||
struct rwlock **lockp);
|
||||
@ -625,7 +624,7 @@ static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
struct spglist *free, struct rwlock **lockp);
|
||||
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
|
||||
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
|
||||
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
|
||||
static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
|
||||
struct spglist *free);
|
||||
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
|
||||
@ -2209,29 +2208,17 @@ pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks for a page table page mapping the specified virtual address in the
|
||||
* specified pmap's collection of idle page table pages. Returns NULL if there
|
||||
* is no page table page corresponding to the specified virtual address.
|
||||
* Removes the page table page mapping the specified virtual address from the
|
||||
* specified pmap's collection of idle page table pages, and returns it.
|
||||
* Otherwise, returns NULL if there is no page table page corresponding to the
|
||||
* specified virtual address.
|
||||
*/
|
||||
static __inline vm_page_t
|
||||
pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
|
||||
pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
return (vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the specified page table page from the specified pmap's collection
|
||||
* of idle page table pages. The specified page table page must be a member of
|
||||
* the pmap's collection.
|
||||
*/
|
||||
static __inline void
|
||||
pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
|
||||
{
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
vm_radix_remove(&pmap->pm_root, mpte->pindex);
|
||||
return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3450,10 +3437,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
|
||||
oldpde = *pde;
|
||||
KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
|
||||
("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
|
||||
if ((oldpde & PG_A) != 0 && (mpte = pmap_lookup_pt_page(pmap, va)) !=
|
||||
NULL)
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
else {
|
||||
if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
|
||||
NULL) {
|
||||
KASSERT((oldpde & PG_W) == 0,
|
||||
("pmap_demote_pde: page table page for a wired mapping"
|
||||
" is missing"));
|
||||
@ -3567,11 +3552,10 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
|
||||
|
||||
KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
mpte = pmap_lookup_pt_page(pmap, va);
|
||||
mpte = pmap_remove_pt_page(pmap, va);
|
||||
if (mpte == NULL)
|
||||
panic("pmap_remove_kernel_pde: Missing pt page.");
|
||||
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
mptepa = VM_PAGE_TO_PHYS(mpte);
|
||||
newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
|
||||
|
||||
@ -3646,9 +3630,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
if (pmap == kernel_pmap) {
|
||||
pmap_remove_kernel_pde(pmap, pdq, sva);
|
||||
} else {
|
||||
mpte = pmap_lookup_pt_page(pmap, sva);
|
||||
mpte = pmap_remove_pt_page(pmap, sva);
|
||||
if (mpte != NULL) {
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
pmap_resident_count_dec(pmap, 1);
|
||||
KASSERT(mpte->wire_count == NPTEPG,
|
||||
("pmap_remove_pde: pte page wire count error"));
|
||||
@ -5488,9 +5471,8 @@ pmap_remove_pages(pmap_t pmap)
|
||||
TAILQ_EMPTY(&mt->md.pv_list))
|
||||
vm_page_aflag_clear(mt, PGA_WRITEABLE);
|
||||
}
|
||||
mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
|
||||
mpte = pmap_remove_pt_page(pmap, pv->pv_va);
|
||||
if (mpte != NULL) {
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
pmap_resident_count_dec(pmap, 1);
|
||||
KASSERT(mpte->wire_count == NPTEPG,
|
||||
("pmap_remove_pages: pte page wire count error"));
|
||||
|
@ -2509,29 +2509,17 @@ pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks for a page table page mapping the specified virtual address in the
|
||||
* specified pmap's collection of idle page table pages. Returns NULL if there
|
||||
* is no page table page corresponding to the specified virtual address.
|
||||
* Removes the page table page mapping the specified virtual address from the
|
||||
* specified pmap's collection of idle page table pages, and returns it.
|
||||
* Otherwise, returns NULL if there is no page table page corresponding to the
|
||||
* specified virtual address.
|
||||
*/
|
||||
static __inline vm_page_t
|
||||
pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
|
||||
pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
return (vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(va)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the specified page table page from the specified pmap's collection
|
||||
* of idle page table pages. The specified page table page must be a member of
|
||||
* the pmap's collection.
|
||||
*/
|
||||
static __inline void
|
||||
pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
|
||||
{
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
vm_radix_remove(&pmap->pm_root, mpte->pindex);
|
||||
return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3586,10 +3574,9 @@ pmap_remove_pages(pmap_t pmap)
|
||||
TAILQ_EMPTY(&mt->md.pv_list))
|
||||
vm_page_aflag_clear(mt, PGA_WRITEABLE);
|
||||
}
|
||||
ml3 = pmap_lookup_pt_page(pmap,
|
||||
ml3 = pmap_remove_pt_page(pmap,
|
||||
pv->pv_va);
|
||||
if (ml3 != NULL) {
|
||||
pmap_remove_pt_page(pmap, ml3);
|
||||
pmap_resident_count_dec(pmap,1);
|
||||
KASSERT(ml3->wire_count == NL3PG,
|
||||
("pmap_remove_pages: l3 page wire count error"));
|
||||
@ -4374,9 +4361,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if ((ml3 = pmap_lookup_pt_page(pmap, va)) != NULL) {
|
||||
pmap_remove_pt_page(pmap, ml3);
|
||||
} else {
|
||||
if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
|
||||
ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
|
||||
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
|
||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
|
||||
|
@ -318,7 +318,6 @@ static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
|
||||
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
|
||||
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
|
||||
static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
|
||||
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
|
||||
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
|
||||
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
|
||||
@ -328,7 +327,7 @@ static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
struct spglist *free);
|
||||
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
|
||||
struct spglist *free);
|
||||
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
|
||||
static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
|
||||
struct spglist *free);
|
||||
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
|
||||
@ -1713,29 +1712,17 @@ pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks for a page table page mapping the specified virtual address in the
|
||||
* specified pmap's collection of idle page table pages. Returns NULL if there
|
||||
* is no page table page corresponding to the specified virtual address.
|
||||
* Removes the page table page mapping the specified virtual address from the
|
||||
* specified pmap's collection of idle page table pages, and returns it.
|
||||
* Otherwise, returns NULL if there is no page table page corresponding to the
|
||||
* specified virtual address.
|
||||
*/
|
||||
static __inline vm_page_t
|
||||
pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
|
||||
pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
return (vm_radix_lookup(&pmap->pm_root, va >> PDRSHIFT));
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the specified page table page from the specified pmap's collection
|
||||
* of idle page table pages. The specified page table page must be a member of
|
||||
* the pmap's collection.
|
||||
*/
|
||||
static __inline void
|
||||
pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
|
||||
{
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
vm_radix_remove(&pmap->pm_root, mpte->pindex);
|
||||
return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2630,10 +2617,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
|
||||
oldpde = *pde;
|
||||
KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
|
||||
("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
|
||||
if ((oldpde & PG_A) != 0 && (mpte = pmap_lookup_pt_page(pmap, va)) !=
|
||||
NULL)
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
else {
|
||||
if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
|
||||
NULL) {
|
||||
KASSERT((oldpde & PG_W) == 0,
|
||||
("pmap_demote_pde: page table page for a wired mapping"
|
||||
" is missing"));
|
||||
@ -2770,11 +2755,10 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
|
||||
vm_page_t mpte;
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
mpte = pmap_lookup_pt_page(pmap, va);
|
||||
mpte = pmap_remove_pt_page(pmap, va);
|
||||
if (mpte == NULL)
|
||||
panic("pmap_remove_kernel_pde: Missing pt page.");
|
||||
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
mptepa = VM_PAGE_TO_PHYS(mpte);
|
||||
newpde = mptepa | PG_M | PG_A | PG_RW | PG_V;
|
||||
|
||||
@ -2841,9 +2825,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
if (pmap == kernel_pmap) {
|
||||
pmap_remove_kernel_pde(pmap, pdq, sva);
|
||||
} else {
|
||||
mpte = pmap_lookup_pt_page(pmap, sva);
|
||||
mpte = pmap_remove_pt_page(pmap, sva);
|
||||
if (mpte != NULL) {
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
pmap->pm_stats.resident_count--;
|
||||
KASSERT(mpte->wire_count == NPTEPG,
|
||||
("pmap_remove_pde: pte page wire count error"));
|
||||
@ -4542,9 +4525,8 @@ pmap_remove_pages(pmap_t pmap)
|
||||
if (TAILQ_EMPTY(&mt->md.pv_list))
|
||||
vm_page_aflag_clear(mt, PGA_WRITEABLE);
|
||||
}
|
||||
mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
|
||||
mpte = pmap_remove_pt_page(pmap, pv->pv_va);
|
||||
if (mpte != NULL) {
|
||||
pmap_remove_pt_page(pmap, mpte);
|
||||
pmap->pm_stats.resident_count--;
|
||||
KASSERT(mpte->wire_count == NPTEPG,
|
||||
("pmap_remove_pages: pte page wire count error"));
|
||||
|
@ -1241,9 +1241,8 @@ vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
|
||||
/*
|
||||
* vm_page_remove:
|
||||
*
|
||||
* Removes the given mem entry from the object/offset-page
|
||||
* table and the object page list, but do not invalidate/terminate
|
||||
* the backing store.
|
||||
* Removes the specified page from its containing object, but does not
|
||||
* invalidate any backing storage.
|
||||
*
|
||||
* The object must be locked. The page must be locked if it is managed.
|
||||
*/
|
||||
@ -1251,6 +1250,7 @@ void
|
||||
vm_page_remove(vm_page_t m)
|
||||
{
|
||||
vm_object_t object;
|
||||
vm_page_t mrem;
|
||||
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_assert_locked(m);
|
||||
@ -1259,11 +1259,12 @@ vm_page_remove(vm_page_t m)
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
if (vm_page_xbusied(m))
|
||||
vm_page_xunbusy_maybelocked(m);
|
||||
mrem = vm_radix_remove(&object->rtree, m->pindex);
|
||||
KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
|
||||
|
||||
/*
|
||||
* Now remove from the object's list of backed pages.
|
||||
*/
|
||||
vm_radix_remove(&object->rtree, m->pindex);
|
||||
TAILQ_REMOVE(&object->memq, m, listq);
|
||||
|
||||
/*
|
||||
|
@ -660,10 +660,10 @@ vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the specified index from the tree.
|
||||
* Panics if the key is not present.
|
||||
* Remove the specified index from the trie, and return the value stored at
|
||||
* that index. If the index is not present, return NULL.
|
||||
*/
|
||||
void
|
||||
vm_page_t
|
||||
vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
|
||||
{
|
||||
struct vm_radix_node *rnode, *parent;
|
||||
@ -674,23 +674,23 @@ vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
|
||||
if (vm_radix_isleaf(rnode)) {
|
||||
m = vm_radix_topage(rnode);
|
||||
if (m->pindex != index)
|
||||
panic("%s: invalid key found", __func__);
|
||||
return (NULL);
|
||||
vm_radix_setroot(rtree, NULL);
|
||||
return;
|
||||
return (m);
|
||||
}
|
||||
parent = NULL;
|
||||
for (;;) {
|
||||
if (rnode == NULL)
|
||||
panic("vm_radix_remove: impossible to locate the key");
|
||||
return (NULL);
|
||||
slot = vm_radix_slot(index, rnode->rn_clev);
|
||||
if (vm_radix_isleaf(rnode->rn_child[slot])) {
|
||||
m = vm_radix_topage(rnode->rn_child[slot]);
|
||||
if (m->pindex != index)
|
||||
panic("%s: invalid key found", __func__);
|
||||
return (NULL);
|
||||
rnode->rn_child[slot] = NULL;
|
||||
rnode->rn_count--;
|
||||
if (rnode->rn_count > 1)
|
||||
break;
|
||||
return (m);
|
||||
for (i = 0; i < VM_RADIX_COUNT; i++)
|
||||
if (rnode->rn_child[i] != NULL)
|
||||
break;
|
||||
@ -707,7 +707,7 @@ vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
|
||||
rnode->rn_count--;
|
||||
rnode->rn_child[i] = NULL;
|
||||
vm_radix_node_put(rnode);
|
||||
break;
|
||||
return (m);
|
||||
}
|
||||
parent = rnode;
|
||||
rnode = rnode->rn_child[slot];
|
||||
|
@ -42,7 +42,7 @@ vm_page_t vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index);
|
||||
vm_page_t vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index);
|
||||
vm_page_t vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index);
|
||||
void vm_radix_reclaim_allnodes(struct vm_radix *rtree);
|
||||
void vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index);
|
||||
vm_page_t vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index);
|
||||
vm_page_t vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
Loading…
Reference in New Issue
Block a user