Add vm_page_any_valid()

Use it and several other vm_page_*_valid() functions in more places.

Suggested and reviewed by:	markj
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D37024
This commit is contained in:
Konstantin Belousov 2022-10-18 12:16:36 +03:00
parent 5bd45b2ba3
commit 934bfc128e
4 changed files with 24 additions and 16 deletions

View File

@ -6009,7 +6009,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
* If the page table page is not leftover from an earlier promotion,
* initialize it.
*/
if (mpte->valid == 0)
if (vm_page_none_valid(mpte))
pmap_fill_ptp(firstpte, newpte);
pmap_demote_pde_check(firstpte, newpte);
@ -6085,7 +6085,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* If this page table page was unmapped by a promotion, then it
* contains valid mappings. Zero it to invalidate those mappings.
*/
if (mpte->valid != 0)
if (vm_page_any_valid(mpte))
pagezero((void *)PHYS_TO_DMAP(mptepa));
/*
@ -6151,7 +6151,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
} else {
mpte = pmap_remove_pt_page(pmap, sva);
if (mpte != NULL) {
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(mpte),
("pmap_remove_pde: pte page not promoted"));
pmap_pt_page_count_adj(pmap, -1);
KASSERT(mpte->ref_count == NPTEPG,
@ -7678,7 +7678,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
if (!vm_object_populate(object, pindex, pindex + atop(size)))
return;
p = vm_page_lookup(object, pindex);
KASSERT(p->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(p),
("pmap_object_init_pt: invalid page %p", p));
pat_mode = p->md.pat_mode;
@ -7698,7 +7698,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
KASSERT(p->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(p),
("pmap_object_init_pt: invalid page %p", p));
if (pa != VM_PAGE_TO_PHYS(p) ||
pat_mode != p->md.pat_mode)
@ -8445,7 +8445,7 @@ pmap_remove_pages(pmap_t pmap)
}
mpte = pmap_remove_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(mpte),
("pmap_remove_pages: pte page not promoted"));
pmap_pt_page_count_adj(pmap, -1);
KASSERT(mpte->ref_count == NPTEPG,

View File

@ -2847,7 +2847,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* If the page table page is not leftover from an earlier promotion,
* initialize it.
*/
if (mpte->valid == 0)
if (vm_page_none_valid(mpte))
pmap_fill_ptp(firstpte, newpte);
KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
@ -2922,7 +2922,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* If this page table page was unmapped by a promotion, then it
* contains valid mappings. Zero it to invalidate those mappings.
*/
if (mpte->valid != 0)
if (vm_page_any_valid(mpte))
pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]);
/*
@ -2986,7 +2986,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
} else {
mpte = pmap_remove_pt_page(pmap, sva);
if (mpte != NULL) {
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(mpte),
("pmap_remove_pde: pte page not promoted"));
pmap->pm_stats.resident_count--;
KASSERT(mpte->ref_count == NPTEPG,
@ -4209,7 +4209,7 @@ __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr,
if (!vm_object_populate(object, pindex, pindex + atop(size)))
return;
p = vm_page_lookup(object, pindex);
KASSERT(p->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(p),
("pmap_object_init_pt: invalid page %p", p));
pat_mode = p->md.pat_mode;
@ -4229,7 +4229,7 @@ __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr,
p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
KASSERT(p->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(p),
("pmap_object_init_pt: invalid page %p", p));
if (pa != VM_PAGE_TO_PHYS(p) ||
pat_mode != p->md.pat_mode)
@ -4837,7 +4837,7 @@ __CONCAT(PMTYPE, remove_pages)(pmap_t pmap)
}
mpte = pmap_remove_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
KASSERT(vm_page_all_valid(mpte),
("pmap_remove_pages: pte page not promoted"));
pmap->pm_stats.resident_count--;
KASSERT(mpte->ref_count == NPTEPG,

View File

@ -2548,7 +2548,7 @@ vm_page_alloc_check(vm_page_t m)
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
("page %p has unexpected memattr %d",
m, pmap_page_get_memattr(m)));
KASSERT(m->valid == 0, ("free page %p is valid", m));
KASSERT(vm_page_none_valid(m), ("free page %p is valid", m));
pmap_vm_page_alloc_check(m);
}
@ -4226,7 +4226,7 @@ vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
* If we were asked to not cache the page, place it near the head of the
* inactive queue so that is reclaimed sooner.
*/
if (noreuse || m->valid == 0) {
if (noreuse || vm_page_none_valid(m)) {
nqueue = PQ_INACTIVE;
nflag = PGA_REQUEUE_HEAD;
} else {
@ -4704,7 +4704,8 @@ vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int al
ma[0] = m;
for (i = 1; i < after; i++) {
if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
if (ma[i]->valid || !vm_page_tryxbusy(ma[i]))
if (vm_page_any_valid(ma[i]) ||
!vm_page_tryxbusy(ma[i]))
break;
} else {
ma[i] = vm_page_alloc(object, m->pindex + i,
@ -5392,7 +5393,7 @@ vm_page_is_valid(vm_page_t m, int base, int size)
vm_page_bits_t bits;
bits = vm_page_bits(base, size);
return (m->valid != 0 && (m->valid & bits) == bits);
return (vm_page_any_valid(m) && (m->valid & bits) == bits);
}
/*

View File

@ -993,6 +993,13 @@ vm_page_all_valid(vm_page_t m)
return (m->valid == VM_PAGE_BITS_ALL);
}
static inline bool
vm_page_any_valid(vm_page_t m)
{
return (m->valid != 0);
}
static inline bool
vm_page_none_valid(vm_page_t m)
{