arm64/pmap: Sparsify pv_table

Reviewed by:	markj, kib
Approved by:	scottl (implicit)
MFC after:	1 week
Sponsored by:	Ampere Computing, Inc.
Differential Revision:	https://reviews.freebsd.org/D26132
This commit is contained in:
D Scott Phillips 2020-09-21 22:23:57 +00:00
parent 7988971a99
commit de03184698
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=365981
2 changed files with 71 additions and 18 deletions

View File

@ -178,7 +178,31 @@ __FBSDID("$FreeBSD$");
#endif
#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
#define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)])
static struct md_page *
pa_to_pvh(vm_paddr_t pa)
{
struct vm_phys_seg *seg;
int segind;
for (segind = 0; segind < vm_phys_nsegs; segind++) {
seg = &vm_phys_segs[segind];
if (pa >= seg->start && pa < seg->end)
return ((struct md_page *)seg->md_first +
pmap_l2_pindex(pa) - pmap_l2_pindex(seg->start));
}
panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa);
}
static struct md_page *
page_to_pvh(vm_page_t m)
{
struct vm_phys_seg *seg;
seg = &vm_phys_segs[m->segind];
return ((struct md_page *)seg->md_first +
pmap_l2_pindex(VM_PAGE_TO_PHYS(m)) - pmap_l2_pindex(seg->start));
}
#define NPV_LIST_LOCKS MAXCPU
@ -1049,6 +1073,8 @@ pmap_init_asids(struct asid_set *set, int bits)
void
pmap_init(void)
{
struct vm_phys_seg *seg, *next_seg;
struct md_page *pvh;
vm_size_t s;
uint64_t mmfr1;
int i, pv_npg, vmid_bits;
@ -1093,7 +1119,12 @@ pmap_init(void)
/*
* Calculate the size of the pv head table for superpages.
*/
pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
pv_npg = 0;
for (i = 0; i < vm_phys_nsegs; i++) {
seg = &vm_phys_segs[i];
pv_npg += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
pmap_l2_pindex(seg->start);
}
/*
* Allocate memory for the pv head table for superpages.
@ -1105,6 +1136,31 @@ pmap_init(void)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
/*
* Set pointers from vm_phys_segs to pv_table.
*/
for (i = 0, pvh = pv_table; i < vm_phys_nsegs; i++) {
seg = &vm_phys_segs[i];
seg->md_first = pvh;
pvh += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
pmap_l2_pindex(seg->start);
/*
* If there is a following segment, and the final
* superpage of this segment and the initial superpage
* of the next segment are the same then adjust the
* pv_table entry for that next segment down by one so
* that the pv_table entries will be shared.
*/
if (i + 1 < vm_phys_nsegs) {
next_seg = &vm_phys_segs[i + 1];
if (pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - 1 ==
pmap_l2_pindex(next_seg->start)) {
pvh--;
}
}
}
vm_initialized = 1;
}
@ -2247,7 +2303,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
m->md.pv_gen++;
if (TAILQ_EMPTY(&m->md.pv_list) &&
(m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
if (TAILQ_EMPTY(&pvh->pv_list)) {
vm_page_aflag_clear(m,
PGA_WRITEABLE);
@ -2788,7 +2844,7 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list) &&
(m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
@ -2858,7 +2914,7 @@ pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
pmap_pvh_free(&m->md, pmap, sva);
if (TAILQ_EMPTY(&m->md.pv_list) &&
(m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
@ -2997,8 +3053,7 @@ pmap_remove_all(vm_page_t m)
("pmap_remove_all: page %p is not managed", m));
SLIST_INIT(&free);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
retry:
rw_wlock(lock);
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
@ -4480,7 +4535,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
break;
}
if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
@ -4535,7 +4590,7 @@ pmap_page_wired_mappings(vm_page_t m)
PMAP_UNLOCK(pmap);
}
if ((m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
if (!PMAP_TRYLOCK(pmap)) {
@ -4577,7 +4632,7 @@ pmap_page_is_mapped(vm_page_t m)
rw_rlock(lock);
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
((m->flags & PG_FICTITIOUS) == 0 &&
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
!TAILQ_EMPTY(&page_to_pvh(m)->pv_list));
rw_runlock(lock);
return (rv);
}
@ -4740,8 +4795,7 @@ pmap_remove_pages(pmap_t pmap)
if ((m->a.flags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&m->md.pv_list) &&
(m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(
VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m,
PGA_WRITEABLE);
@ -4818,7 +4872,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
goto out;
}
if ((m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = page_to_pvh(m);
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
PMAP_ASSERT_STAGE1(pmap);
@ -4938,8 +4992,7 @@ pmap_remove_write(vm_page_t m)
if (!pmap_page_is_write_mapped(m))
return;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
retry_pv_loop:
rw_wlock(lock);
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
@ -5035,7 +5088,7 @@ pmap_ts_referenced(vm_page_t m)
cleared = 0;
pa = VM_PAGE_TO_PHYS(m);
lock = PHYS_TO_PV_LIST_LOCK(pa);
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
rw_wlock(lock);
retry:
not_cleared = 0;
@ -5312,8 +5365,7 @@ pmap_clear_modify(vm_page_t m)
if (!pmap_page_is_write_mapped(m))
return;
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
pa_to_pvh(VM_PAGE_TO_PHYS(m));
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_wlock(lock);
restart:

View File

@ -72,6 +72,7 @@ struct vm_phys_seg {
#if VM_NRESERVLEVEL > 0
vm_reserv_t first_reserv;
#endif
void *md_first;
int domain;
struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
};