Convert vm_page_alloc() callers to use vm_page_alloc_noobj().

Remove page zeroing code from consumers and stop specifying
VM_ALLOC_NOOBJ.  In a few places, also convert an allocation loop to
simply use VM_ALLOC_WAITOK.

Similarly, convert vm_page_alloc_domain() callers.

Note that callers are now responsible for assigning the pindex.

Reviewed by:	alc, hselasky, kib
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D31986
This commit is contained in:
Mark Johnston 2021-10-19 20:23:39 -04:00
parent b498f71bc5
commit a4667e09e6
32 changed files with 146 additions and 280 deletions

View File

@ -305,8 +305,7 @@ mp_realloc_pcpu(int cpuid, int domain)
oa = (vm_offset_t)&__pcpu[cpuid];
if (vm_phys_domain(pmap_kextract(oa)) == domain)
return;
m = vm_page_alloc_domain(NULL, 0, domain,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
m = vm_page_alloc_noobj_domain(domain, 0);
if (m == NULL)
return;
na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));

View File

@ -2342,10 +2342,9 @@ pmap_init_pv_table(void)
highest = start + (s / sizeof(*pvd)) - 1;
for (j = 0; j < s; j += PAGE_SIZE) {
vm_page_t m = vm_page_alloc_domain(NULL, 0,
domain, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
vm_page_t m = vm_page_alloc_noobj_domain(domain, 0);
if (m == NULL)
panic("vm_page_alloc_domain failed for %lx\n", (vm_offset_t)pvd + j);
panic("failed to allocate PV table page");
pmap_qenter((vm_offset_t)pvd + j, &m, 1);
}
@ -4312,15 +4311,11 @@ pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags)
{
vm_page_t m;
m = vm_page_alloc(NULL, pindex, flags | VM_ALLOC_NOOBJ);
m = vm_page_alloc_noobj(flags);
if (__predict_false(m == NULL))
return (NULL);
m->pindex = pindex;
pmap_pt_page_count_adj(pmap, 1);
if ((flags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
return (m);
}
@ -4358,8 +4353,8 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
/*
* allocate the page directory page
*/
pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_ZERO |
VM_ALLOC_WAITOK);
pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg);
pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys);
@ -4389,8 +4384,7 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
pmap_pinit_pml4(pmltop_pg);
if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
pmltop_pgu = pmap_alloc_pt_page(NULL, 0,
VM_ALLOC_WIRED | VM_ALLOC_NORMAL |
VM_ALLOC_WAITOK);
VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP(
VM_PAGE_TO_PHYS(pmltop_pgu));
if (pmap_is_la57(pmap))
@ -5480,8 +5474,7 @@ get_pv_entry(pmap_t pmap, struct rwlock **lockp)
}
}
/* No free items, allocate another chunk */
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
if (lockp == NULL) {
PV_STAT(counter_u64_add(pc_chunk_tryfail, 1));
@ -5584,8 +5577,7 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
break;
}
for (reclaimed = false; avail < needed; avail += _NPCPV) {
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
m = reclaim_pv_chunk(pmap, lockp);
if (m == NULL)
@ -5957,8 +5949,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
* priority is normal.
*/
mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
(in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_WIRED);
(in_kernel ? VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED);
/*
* If the allocation of the new page table page fails,
@ -10355,8 +10346,7 @@ pmap_quick_remove_page(vm_offset_t addr)
static vm_page_t
pmap_large_map_getptp_unlocked(void)
{
return (pmap_alloc_pt_page(kernel_pmap, 0,
VM_ALLOC_NORMAL | VM_ALLOC_ZERO));
return (pmap_alloc_pt_page(kernel_pmap, 0, VM_ALLOC_ZERO));
}
static vm_page_t
@ -11411,12 +11401,10 @@ pmap_kasan_enter_alloc_4k(void)
{
vm_page_t m;
m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (m == NULL)
panic("%s: no memory to grow shadow map", __func__);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
return (m);
}
@ -11481,12 +11469,10 @@ pmap_kmsan_enter_alloc_4k(void)
{
vm_page_t m;
m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (m == NULL)
panic("%s: no memory to grow shadow map", __func__);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
return (m);
}

View File

@ -49,16 +49,14 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
pagezero(va);
return (va);
}

View File

@ -2069,12 +2069,12 @@ pmap_growkernel(vm_offset_t addr)
/*
* Install new PT2s page into kernel PT2TAB.
*/
m = vm_page_alloc(NULL,
pte1_index(kernel_vm_end) & ~PT2PG_MASK,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL)
panic("%s: no memory to grow kernel", __func__);
m->pindex = pte1_index(kernel_vm_end) & ~PT2PG_MASK;
/*
* QQQ: To link all new L2 page tables from L1 page
* table now and so pmap_kenter_pte1() them
@ -2488,8 +2488,7 @@ _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
/*
* Install new PT2s page into pmap PT2TAB.
*/
m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK,
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL) {
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
@ -2505,6 +2504,7 @@ _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
*/
return (NULL);
}
m->pindex = pte1_idx & ~PT2PG_MASK;
pmap->pm_stats.resident_count++;
pt2pg_pa = pmap_pt2pg_init(pmap, va, m);
} else {
@ -3062,8 +3062,8 @@ get_pv_entry(pmap_t pmap, boolean_t try)
* global lock. If "pv_vafree" is currently non-empty, it will
* remain non-empty until pmap_pte2list_alloc() completes.
*/
if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (pv_vafree == 0 ||
(m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
if (try) {
pv_entry_count--;
PV_STAT(pc_chunk_tryfail++);
@ -3711,9 +3711,8 @@ pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
* "failure" if the mapping was never accessed or the
* allocation of the new page table page fails.
*/
if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL,
pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) {
if ((opte1 & PTE1_A) == 0 ||
(m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free);
vm_page_free_pages_toq(&free, false);
@ -3721,6 +3720,7 @@ pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
__func__, va, pmap);
return (FALSE);
}
m->pindex = pte1_index(va) & ~PT2PG_MASK;
if (va < VM_MAXUSER_ADDRESS)
pmap->pm_stats.resident_count++;

View File

@ -1766,16 +1766,11 @@ pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage, int levels)
/*
* allocate the l0 page
*/
while ((m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
vm_wait(NULL);
m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
if ((m->flags & PG_ZERO) == 0)
pagezero(pmap->pm_l0);
pmap->pm_root.rt_root = 0;
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
@ -1841,8 +1836,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if (lockp != NULL) {
RELEASE_PV_LIST_LOCK(lockp);
PMAP_UNLOCK(pmap);
@ -1856,8 +1850,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
*/
return (NULL);
}
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->pindex = ptepindex;
/*
* Because of AArch64's weak memory consistency model, we must have a
@ -2191,13 +2184,11 @@ pmap_growkernel(vm_offset_t addr)
l1 = pmap_l0_to_l1(l0, kernel_vm_end);
if (pmap_load(l1) == 0) {
/* We need a new PDP entry */
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
pmap_zero_page(nkpg);
nkpg->pindex = kernel_vm_end >> L1_SHIFT;
/* See the dmb() in _pmap_alloc_l3(). */
dmb(ishst);
paddr = VM_PAGE_TO_PHYS(nkpg);
@ -2214,13 +2205,11 @@ pmap_growkernel(vm_offset_t addr)
continue;
}
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
pmap_zero_page(nkpg);
nkpg->pindex = kernel_vm_end >> L2_SHIFT;
/* See the dmb() in _pmap_alloc_l3(). */
dmb(ishst);
paddr = VM_PAGE_TO_PHYS(nkpg);
@ -2565,8 +2554,7 @@ get_pv_entry(pmap_t pmap, struct rwlock **lockp)
}
}
/* No free items, allocate another chunk */
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
if (lockp == NULL) {
PV_STAT(pc_chunk_tryfail++);
@ -2631,8 +2619,7 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
break;
}
for (reclaimed = false; avail < needed; avail += _NPCPV) {
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
m = reclaim_pv_chunk(pmap, lockp);
if (m == NULL)
@ -6148,8 +6135,8 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
return (NULL);
}
if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if ((ml2 = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED)) ==
NULL) {
CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
" in pmap %p", va, pmap);
l2 = NULL;
@ -6282,9 +6269,9 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
* priority (VM_ALLOC_INTERRUPT). Otherwise, the
* priority is normal.
*/
ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
ml3 = vm_page_alloc_noobj(
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
VM_ALLOC_WIRED);
/*
* If the allocation of the new page table page fails,
@ -6296,6 +6283,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
" in pmap %p", va, pmap);
goto fail;
}
ml3->pindex = pmap_l2_pindex(va);
if (!ADDR_IS_KERNEL(va)) {
ml3->ref_count = NL3PG;

View File

@ -47,16 +47,14 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
pagezero(va);
return (va);
}

View File

@ -387,16 +387,11 @@ iommu_pmap_pinit_levels(pmap_t pmap, int levels)
/*
* allocate the l0 page
*/
while ((m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
vm_wait(NULL);
m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
if ((m->flags & PG_ZERO) == 0)
pagezero(pmap->pm_l0);
pmap->pm_root.rt_root = 0;
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
@ -446,16 +441,14 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex)
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
/*
* Indicate the need to retry. While waiting, the page table
* page may have been allocated.
*/
return (NULL);
}
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->pindex = ptepindex;
/*
* Because of AArch64's weak memory consistency model, we must have a

View File

@ -97,7 +97,7 @@ linux_alloc_pages(gfp_t flags, unsigned int order)
if ((flags & M_ZERO) != 0)
req |= VM_ALLOC_ZERO;
if (order == 0 && (flags & GFP_DMA32) == 0) {
page = vm_page_alloc(NULL, 0, req);
page = vm_page_alloc_noobj(req);
if (page == NULL)
return (NULL);
} else {

View File

@ -178,12 +178,7 @@ ttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
{
vm_page_t p;
while (1) {
p = vm_page_alloc(NULL, 0, req);
if (p != NULL)
break;
vm_wait(NULL);
}
p = vm_page_alloc_noobj(req | VM_ALLOC_WAITOK);
pmap_page_set_memattr(p, memattr);
return (p);
}

View File

@ -1609,8 +1609,7 @@ ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
"failed -- packet dropped!\n");
goto nobufs;
}
frame = vm_page_alloc(NULL, 0,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
frame = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED);
if (frame == NULL) {
device_printf(sc->ti_dev, "buffer allocation "

View File

@ -460,8 +460,7 @@ vtballoon_alloc_page(struct vtballoon_softc *sc)
{
vm_page_t m;
m = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP);
m = vm_page_alloc_noobj(VM_ALLOC_NODUMP);
if (m != NULL)
sc->vtballoon_current_npages++;

View File

@ -228,25 +228,18 @@ decrease_reservation(unsigned long nr_pages)
nr_pages = nitems(frame_list);
for (i = 0; i < nr_pages; i++) {
if ((page = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_ZERO)) == NULL) {
/*
* Zero the page, or else we might be leaking important data to
* other domains on the same host. Xen doesn't scrub ballooned
* out memory pages, the guest is in charge of making sure that
* no information is leaked.
*/
if ((page = vm_page_alloc_noobj(VM_ALLOC_ZERO)) == NULL) {
nr_pages = i;
need_sleep = 1;
break;
}
if ((page->flags & PG_ZERO) == 0) {
/*
* Zero the page, or else we might be leaking
* important data to other domains on the same
* host. Xen doesn't scrub ballooned out memory
* pages, the guest is in charge of making
* sure that no information is leaked.
*/
pmap_zero_page(page);
}
frame_list[i] = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q);

View File

@ -368,20 +368,13 @@ gntdev_alloc_gref(struct ioctl_gntdev_alloc_gref *arg)
grefs[i].file_index = file_offset + i * PAGE_SIZE;
grefs[i].gref_id = GRANT_REF_INVALID;
grefs[i].notify = NULL;
grefs[i].page = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL
| VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
grefs[i].page = vm_page_alloc_noobj(VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (grefs[i].page == NULL) {
log(LOG_ERR, "Page allocation failed.");
error = ENOMEM;
break;
}
if ((grefs[i].page->flags & PG_ZERO) == 0) {
/*
* Zero the allocated page, as we don't want to
* leak our memory to other domains.
*/
pmap_zero_page(grefs[i].page);
}
grefs[i].page->valid = VM_PAGE_BITS_ALL;
error = gnttab_grant_foreign_access(arg->domid,

View File

@ -4913,13 +4913,8 @@ nfsm_add_ext_pgs(struct mbuf *m, int maxextsiz, int *bextpg)
*bextpg = 0;
m->m_next = mp;
} else {
do {
pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP |
VM_ALLOC_WIRED);
if (pg == NULL)
vm_wait(NULL);
} while (pg == NULL);
pg = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_NODUMP |
VM_ALLOC_WIRED);
m->m_epg_pa[m->m_epg_npgs] = VM_PAGE_TO_PHYS(pg);
*bextpg = m->m_epg_npgs;
m->m_epg_npgs++;

View File

@ -8920,13 +8920,8 @@ nfsm_split(struct mbuf *mp, uint64_t xfer)
* page.
*/
if (left < plen) {
do {
pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP |
VM_ALLOC_WIRED);
if (pg == NULL)
vm_wait(NULL);
} while (pg == NULL);
pg = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_NODUMP |
VM_ALLOC_WIRED);
m2->m_epg_pa[0] = VM_PAGE_TO_PHYS(pg);
m2->m_epg_npgs = 1;

View File

@ -2057,7 +2057,6 @@ __CONCAT(PMTYPE, pinit0)(pmap_t pmap)
static int
__CONCAT(PMTYPE, pinit)(pmap_t pmap)
{
vm_page_t m;
int i;
/*
@ -2085,11 +2084,10 @@ __CONCAT(PMTYPE, pinit)(pmap_t pmap)
* allocate the page directory page(s)
*/
for (i = 0; i < NPGPTD; i++) {
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
pmap->pm_ptdpg[i] = m;
pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED |
VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
#ifdef PMAP_PAE_COMP
pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(m) | PG_V;
pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V;
#endif
}
@ -2103,10 +2101,6 @@ __CONCAT(PMTYPE, pinit)(pmap_t pmap)
}
#endif
for (i = 0; i < NPGPTD; i++)
if ((pmap->pm_ptdpg[i]->flags & PG_ZERO) == 0)
pagezero(pmap->pm_pdir + (i * NPDEPG));
/* Install the trampoline mapping. */
pmap->pm_pdir[TRPTDI] = PTD[TRPTDI];
@ -2130,8 +2124,7 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
@ -2146,8 +2139,7 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
*/
return (NULL);
}
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->pindex = ptepindex;
/*
* Map the pagetable page into the process address space, if
@ -2271,16 +2263,13 @@ __CONCAT(PMTYPE, growkernel)(vm_offset_t addr)
continue;
}
nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
nkpg->pindex = kernel_vm_end >> PDRSHIFT;
nkpt++;
if ((nkpg->flags & PG_ZERO) == 0)
pmap_zero_page(nkpg);
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
pdir_pde(KPTD, kernel_vm_end) = newpdir;
@ -2575,8 +2564,8 @@ get_pv_entry(pmap_t pmap, boolean_t try)
* global lock. If "pv_vafree" is currently non-empty, it will
* remain non-empty until pmap_ptelist_alloc() completes.
*/
if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (pv_vafree == 0 ||
(m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
if (try) {
pv_entry_count--;
PV_STAT(pc_chunk_tryfail++);
@ -2808,9 +2797,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* "failure" if the mapping was never accessed or the
* allocation of the new page table page fails.
*/
if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED)) == NULL) {
if ((oldpde & PG_A) == 0 ||
(mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
sva = trunc_4mpage(va);
pmap_remove_pde(pmap, pde, sva, &free);
@ -2821,6 +2809,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
" in pmap %p", va, pmap);
return (FALSE);
}
mpte->pindex = va >> PDRSHIFT;
if (pmap != kernel_pmap) {
mpte->ref_count = NPTEPG;
pmap->pm_stats.resident_count++;
@ -5914,8 +5903,7 @@ pmap_trm_import(void *unused __unused, vmem_size_t size, int flags,
prev_addr += trm_guard;
trm_pte = PTmap + atop(prev_addr);
for (af = prev_addr; af < addr; af += PAGE_SIZE) {
m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) |
PG_M | PG_A | PG_RW | PG_V | pgeflag |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE));
@ -5934,10 +5922,8 @@ pmap_init_trm(void)
trm_guard = 0;
pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK);
vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE);
pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO);
if ((pd_m->flags & PG_ZERO) == 0)
pmap_zero_page(pd_m);
pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK |
VM_ALLOC_ZERO);
PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE);
}

View File

@ -1624,8 +1624,8 @@ mb_alloc_ext_plus_pages(int len, int how)
npgs = howmany(len, PAGE_SIZE);
for (i = 0; i < npgs; i++) {
do {
pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | VM_ALLOC_WIRED);
pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
VM_ALLOC_WIRED);
if (pg == NULL) {
if (how == M_NOWAIT) {
m->m_epg_npgs = i;

View File

@ -2101,12 +2101,8 @@ ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
} else {
off = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
do {
pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP |
VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL);
} while (pg == NULL);
pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
len = m_epg_pagelen(m, i, off);
state->parray[i] = VM_PAGE_TO_PHYS(pg);
state->dst_iov[i].iov_base =

View File

@ -1767,8 +1767,7 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
int error, length, i, needed;
ssize_t total;
int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP |
VM_ALLOC_WIRED;
int pflags = malloc2vm_flags(how) | VM_ALLOC_NODUMP | VM_ALLOC_WIRED;
MPASS((flags & M_PKTHDR) == 0);
MPASS((how & M_ZERO) == 0);
@ -1816,7 +1815,7 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
needed = length = MIN(maxseg, total);
for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
retry_page:
pg_array[i] = vm_page_alloc(NULL, 0, pflags);
pg_array[i] = vm_page_alloc_noobj(pflags);
if (pg_array[i] == NULL) {
if (how & M_NOWAIT) {
goto failed;

View File

@ -4926,9 +4926,8 @@ vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
* could interfere with paging I/O, no matter which
* process we are.
*/
p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) |
VM_ALLOC_WAITOK);
p = vm_page_alloc_noobj(VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | VM_ALLOC_WAITOK);
pmap_qenter(pg, &p, 1);
bp->b_pages[index] = p;
}

View File

@ -1115,10 +1115,6 @@ pmap_alloc_direct_page(unsigned int index, int req)
VM_ALLOC_ZERO);
if (m == NULL)
return (NULL);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->pindex = index;
return (m);
}

View File

@ -75,8 +75,6 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)MIPS_PHYS_TO_DIRECT(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
return (va);
}

View File

@ -1915,8 +1915,8 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
*flags = UMA_SLAB_PRIV;
needed_lock = !PMAP_LOCKED(kernel_pmap);
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
@ -1938,9 +1938,6 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
if (needed_lock)
PMAP_UNLOCK(kernel_pmap);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero((void *)va, PAGE_SIZE);
return (void *)va;
}

View File

@ -1214,8 +1214,7 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
break;
}
for (reclaimed = false; avail < needed; avail += _NPCPV) {
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
m = reclaim_pv_chunk(pmap, lockp);
if (m == NULL)
@ -1637,8 +1636,7 @@ get_pv_entry(pmap_t pmap, struct rwlock **lockp)
}
}
/* No free items, allocate another chunk */
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
if (lockp == NULL) {
PV_STAT(pc_chunk_tryfail++);
@ -3529,13 +3527,11 @@ mmu_radix_growkernel(vm_offset_t addr)
l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
if ((be64toh(*l2e) & PG_V) == 0) {
/* We need a new PDP entry */
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
mmu_radix_zero_page(nkpg);
nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT;
paddr = VM_PAGE_TO_PHYS(nkpg);
pde_store(l2e, paddr);
continue; /* try again */
@ -3550,13 +3546,11 @@ mmu_radix_growkernel(vm_offset_t addr)
continue;
}
nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end),
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
mmu_radix_zero_page(nkpg);
nkpg->pindex = pmap_l3e_pindex(kernel_vm_end);
paddr = VM_PAGE_TO_PHYS(nkpg);
pde_store(l3e, paddr);
@ -4243,8 +4237,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if (lockp != NULL) {
RELEASE_PV_LIST_LOCK(lockp);
PMAP_UNLOCK(pmap);
@ -4257,8 +4250,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
*/
return (NULL);
}
if ((m->flags & PG_ZERO) == 0)
mmu_radix_zero_page(m);
m->pindex = ptepindex;
/*
* Map the pagetable page into the process address space, if
@ -4915,10 +4907,9 @@ pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
* is the only part of the kernel address space that must be
* handled here.
*/
if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj(
(va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ?
VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
sva = trunc_2mpage(va);
pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
@ -4928,6 +4919,7 @@ pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
" in pmap %p", va, pmap);
return (FALSE);
}
mpte->pindex = pmap_l3e_pindex(va);
if (va < VM_MAXUSER_ADDRESS)
pmap_resident_count_inc(pmap, 1);
}
@ -5947,13 +5939,13 @@ pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
oldpdpe = be64toh(*l2e);
KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
if (pdpg == NULL) {
CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
}
pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT;
pdpgpa = VM_PAGE_TO_PHYS(pdpg);
firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
KASSERT((oldpdpe & PG_A) != 0,

View File

@ -264,8 +264,7 @@ ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
for (i = 0; i < PTBL_PAGES; i++) {
pidx = (PTBL_PAGES * pdir_idx) + i;
while ((m = vm_page_alloc(NULL, pidx,
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
while ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
if (nosleep) {
ptbl_free_pmap_ptbl(pmap, ptbl);
for (j = 0; j < i; j++)
@ -279,6 +278,7 @@ ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
}
m->pindex = pidx;
mtbl[i] = m;
}

View File

@ -157,8 +157,8 @@ mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
vm_page_t m;
int req;
req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
while ((m = vm_page_alloc(NULL, idx, req)) == NULL) {
req = VM_ALLOC_WIRED | VM_ALLOC_ZERO;
while ((m = vm_page_alloc_noobj(req)) == NULL) {
if (nosleep)
return (0);
@ -168,10 +168,7 @@ mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
}
if (!(m->flags & PG_ZERO))
/* Zero whole ptbl. */
mmu_booke_zero_page(m);
m->pindex = idx;
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}

View File

@ -55,8 +55,8 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
@ -72,9 +72,6 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
} else {
va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa);
}
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
atomic_add_int(&hw_uma_mdpages, 1);
return (va);

View File

@ -1220,17 +1220,13 @@ pmap_pinit(pmap_t pmap)
/*
* allocate the l1 page
*/
while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
vm_wait(NULL);
l1pt = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO |
VM_ALLOC_WAITOK);
l1phys = VM_PAGE_TO_PHYS(l1pt);
pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT);
if ((l1pt->flags & PG_ZERO) == 0)
pagezero(pmap->pm_l1);
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
CPU_ZERO(&pmap->pm_active);
@ -1272,8 +1268,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL) {
if (lockp != NULL) {
RELEASE_PV_LIST_LOCK(lockp);
PMAP_UNLOCK(pmap);
@ -1289,9 +1285,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
*/
return (NULL);
}
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->pindex = ptepindex;
/*
* Map the pagetable page into the process address space, if
@ -1485,13 +1479,11 @@ pmap_growkernel(vm_offset_t addr)
l1 = pmap_l1(kernel_pmap, kernel_vm_end);
if (pmap_load(l1) == 0) {
/* We need a new PDP entry */
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
pmap_zero_page(nkpg);
nkpg->pindex = kernel_vm_end >> L1_SHIFT;
paddr = VM_PAGE_TO_PHYS(nkpg);
pn = (paddr / PAGE_SIZE);
@ -1513,14 +1505,11 @@ pmap_growkernel(vm_offset_t addr)
continue;
}
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0) {
pmap_zero_page(nkpg);
}
nkpg->pindex = kernel_vm_end >> L2_SHIFT;
paddr = VM_PAGE_TO_PHYS(nkpg);
pn = (paddr / PAGE_SIZE);
@ -1700,8 +1689,7 @@ get_pv_entry(pmap_t pmap, struct rwlock **lockp)
}
}
/* No free items, allocate another chunk */
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
if (lockp == NULL) {
PV_STAT(pc_chunk_tryfail++);
@ -1767,8 +1755,7 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
break;
}
for (reclaimed = false; avail < needed; avail += _NPCPV) {
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
m = reclaim_pv_chunk(pmap, lockp);
if (m == NULL)
@ -2487,10 +2474,9 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
("pmap_demote_l2_locked: oldl2 is not a leaf entry"));
if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
NULL) {
if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc(NULL,
pmap_l2_pindex(va), (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT :
VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) ==
NULL) {
if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc_noobj(
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
(void)pmap_remove_l2(pmap, l2, va & ~L2_OFFSET,
pmap_load(pmap_l1(pmap, va)), &free, lockp);
@ -2499,6 +2485,7 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
"failure for va %#lx in pmap %p", va, pmap);
return (false);
}
mpte->pindex = pmap_l2_pindex(va);
if (va < VM_MAXUSER_ADDRESS) {
mpte->ref_count = Ln_ENTRIES;
pmap_resident_count_inc(pmap, 1);
@ -2750,13 +2737,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/* TODO: This is not optimal, but should mostly work */
if (l3 == NULL) {
if (l2 == NULL) {
l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
l2_m = vm_page_alloc_noobj(VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (l2_m == NULL)
panic("pmap_enter: l2 pte_m == NULL");
if ((l2_m->flags & PG_ZERO) == 0)
pmap_zero_page(l2_m);
l2_pa = VM_PAGE_TO_PHYS(l2_m);
l2_pn = (l2_pa / PAGE_SIZE);
@ -2769,8 +2753,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
l2 = pmap_l1_to_l2(l1, va);
}
l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
l3_m = vm_page_alloc_noobj(VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (l3_m == NULL)
panic("pmap_enter: l3 pte_m == NULL");
if ((l3_m->flags & PG_ZERO) == 0)

View File

@ -46,16 +46,14 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
return (va);
}

View File

@ -1979,24 +1979,23 @@ pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
TAILQ_INIT(&alloctail);
flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
malloc2vm_flags(wait);
flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | malloc2vm_flags(wait);
*pflag = UMA_SLAB_KERNEL;
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu)) {
p = vm_page_alloc(NULL, 0, flags);
p = vm_page_alloc_noobj(flags);
} else {
#ifndef NUMA
p = vm_page_alloc(NULL, 0, flags);
p = vm_page_alloc_noobj(flags);
#else
pc = pcpu_find(cpu);
if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain)))
p = NULL;
else
p = vm_page_alloc_domain(NULL, 0,
pc->pc_domain, flags);
p = vm_page_alloc_noobj_domain(pc->pc_domain,
flags);
if (__predict_false(p == NULL))
p = vm_page_alloc(NULL, 0, flags);
p = vm_page_alloc_noobj(flags);
#endif
}
if (__predict_false(p == NULL))
@ -2039,16 +2038,17 @@ noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
vm_offset_t retkva, zkva;
vm_page_t p, p_next;
uma_keg_t keg;
int req;
TAILQ_INIT(&alloctail);
keg = zone->uz_keg;
req = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
if ((wait & M_WAITOK) != 0)
req |= VM_ALLOC_WAITOK;
npages = howmany(bytes, PAGE_SIZE);
while (npages > 0) {
p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
VM_ALLOC_NOWAIT));
p = vm_page_alloc_noobj_domain(domain, req);
if (p != NULL) {
/*
* Since the page does not belong to an object, its

View File

@ -698,10 +698,7 @@ kmem_init_zero_region(void)
* zeros, while not using much more physical resources.
*/
addr = kva_alloc(ZERO_REGION_SIZE);
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
pmap_qenter(addr + i, &m, 1);
pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);

View File

@ -197,8 +197,7 @@ vm_page_init(void *dummy)
fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED);
}
/*