vm_page: Add a new page allocator interface for unnamed pages

The diff adds vm_page_alloc_noobj() and vm_page_alloc_noobj_domain().
These mostly correspond to vm_page_alloc() and vm_page_alloc_domain()
when no VM object is specified, with the exception that they handle
VM_ALLOC_ZERO by zeroing the page, rather than by preserving PG_ZERO.

This simplifies callers and will permit simplification of the
vm_page_alloc_domain() definition.

Since the new allocator variant is similar to vm_page_alloc_freelist(),
implement both of them using a common backend allocator function.  No
functional change intended.

Reviewed by:	alc, kib
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D31985
This commit is contained in:
Mark Johnston 2021-10-19 20:22:12 -04:00
parent a23e6a1078
commit b498f71bc5
2 changed files with 100 additions and 59 deletions

View File

@ -2395,23 +2395,72 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
}
/*
* vm_page_alloc_freelist:
*
* Allocate a physical page from the specified free page list.
*
* The caller must always specify an allocation class.
*
* allocation classes:
* VM_ALLOC_NORMAL normal process request
* VM_ALLOC_SYSTEM system *really* needs a page
* VM_ALLOC_INTERRUPT interrupt time request
*
* optional allocation flags:
* VM_ALLOC_COUNT(number) the number of additional pages that the caller
* intends to allocate
* VM_ALLOC_WIRED wire the allocated page
* VM_ALLOC_ZERO prefer a zeroed page
* Allocate a physical page that is not intended to be inserted into a VM
* object. If the "freelist" parameter is not equal to VM_NFREELIST, then only
* pages from the specified vm_phys freelist will be returned.
*/
static __always_inline vm_page_t
_vm_page_alloc_noobj_domain(int domain, const int freelist, int req)
{
struct vm_domain *vmd;
vm_page_t m;
int flags;
KASSERT((req & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_NOOBJ)) == 0,
("%s: invalid req %#x", __func__, req));
flags = (req & VM_ALLOC_NODUMP) != 0 ? PG_NODUMP : 0;
vmd = VM_DOMAIN(domain);
again:
if (freelist == VM_NFREELIST &&
vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
M_NOWAIT | M_NOVM);
if (m != NULL) {
flags |= PG_PCPU_CACHE;
goto found;
}
}
if (vm_domain_allocate(vmd, req, 1)) {
vm_domain_free_lock(vmd);
if (freelist == VM_NFREELIST)
m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0);
else
m = vm_phys_alloc_freelist_pages(domain, freelist,
VM_FREEPOOL_DIRECT, 0);
vm_domain_free_unlock(vmd);
if (m == NULL)
vm_domain_freecnt_inc(vmd, 1);
}
if (m == NULL) {
if (vm_domain_alloc_fail(vmd, NULL, req))
goto again;
return (NULL);
}
found:
vm_page_dequeue(m);
vm_page_alloc_check(m);
/* Consumers should not rely on a useful default pindex value. */
m->pindex = 0xdeadc0dedeadc0de;
m->flags = (m->flags & PG_ZERO) | flags;
m->a.flags = 0;
m->oflags = VPO_UNMANAGED;
m->busy_lock = VPB_UNBUSIED;
if ((req & VM_ALLOC_WIRED) != 0) {
vm_wire_add(1);
m->ref_count = 1;
}
if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
return (m);
}
vm_page_t
vm_page_alloc_freelist(int freelist, int req)
{
@ -2432,64 +2481,54 @@ vm_page_alloc_freelist(int freelist, int req)
vm_page_t
vm_page_alloc_freelist_domain(int domain, int freelist, int req)
{
struct vm_domain *vmd;
KASSERT(freelist >= 0 && freelist < VM_NFREELIST,
("%s: invalid freelist %d", __func__, freelist));
return (_vm_page_alloc_noobj_domain(domain, freelist, req));
}
vm_page_t
vm_page_alloc_noobj(int req)
{
struct vm_domainset_iter di;
vm_page_t m;
u_int flags;
int domain;
m = NULL;
vmd = VM_DOMAIN(domain);
again:
if (vm_domain_allocate(vmd, req, 1)) {
vm_domain_free_lock(vmd);
m = vm_phys_alloc_freelist_pages(domain, freelist,
VM_FREEPOOL_DIRECT, 0);
vm_domain_free_unlock(vmd);
if (m == NULL)
vm_domain_freecnt_inc(vmd, 1);
}
if (m == NULL) {
if (vm_domain_alloc_fail(vmd, NULL, req))
goto again;
return (NULL);
}
vm_page_dequeue(m);
vm_page_alloc_check(m);
vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
do {
m = vm_page_alloc_noobj_domain(domain, req);
if (m != NULL)
break;
} while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
/*
* Initialize the page. Only the PG_ZERO flag is inherited.
*/
m->a.flags = 0;
flags = 0;
if ((req & VM_ALLOC_ZERO) != 0)
flags = PG_ZERO;
m->flags &= flags;
if ((req & VM_ALLOC_WIRED) != 0) {
vm_wire_add(1);
m->ref_count = 1;
}
/* Unmanaged pages don't use "act_count". */
m->oflags = VPO_UNMANAGED;
return (m);
}
vm_page_t
vm_page_alloc_noobj_domain(int domain, int req)
{
return (_vm_page_alloc_noobj_domain(domain, VM_NFREELIST, req));
}
/*
* Check a page that has been freshly dequeued from a freelist.
*/
static void
vm_page_alloc_check(vm_page_t m)
{
KASSERT(m->object == NULL, ("page %p has object", m));
KASSERT(m->ref_count == 0, ("page %p has references", m));
KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
KASSERT(m->a.queue == PQ_NONE &&
(m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
("page %p has unexpected queue %d, flags %#x",
m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
KASSERT(m->valid == 0, ("free page %p is valid", m));
KASSERT(m->ref_count == 0, ("page %p has references", m));
KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
KASSERT(m->dirty == 0, ("page %p is dirty", m));
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
("page %p has unexpected memattr %d",
m, pmap_page_get_memattr(m)));
KASSERT(m->valid == 0, ("free page %p is valid", m));
pmap_vm_page_alloc_check(m);
}

View File

@ -527,8 +527,8 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
* Legend:
* (a) - vm_page_alloc() supports the flag.
* (c) - vm_page_alloc_contig() supports the flag.
* (f) - vm_page_alloc_freelist() supports the flag.
* (g) - vm_page_grab() supports the flag.
* (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
* (p) - vm_page_grab_pages() supports the flag.
* Bits above 15 define the count of additional pages that the caller
* intends to allocate.
@ -537,10 +537,10 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
#define VM_ALLOC_INTERRUPT 1
#define VM_ALLOC_SYSTEM 2
#define VM_ALLOC_CLASS_MASK 3
#define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */
#define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */
#define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */
#define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */
#define VM_ALLOC_WAITOK 0x0008 /* (acn) Sleep and retry */
#define VM_ALLOC_WAITFAIL 0x0010 /* (acn) Sleep and return error */
#define VM_ALLOC_WIRED 0x0020 /* (acgnp) Allocate a wired page */
#define VM_ALLOC_ZERO 0x0040 /* (acgnp) Allocate a zeroed page */
#define VM_ALLOC_NORECLAIM 0x0080 /* (c) Do not reclaim after failure */
#define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
#define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
@ -548,7 +548,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
#define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
#define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
#define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
#define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */
#define VM_ALLOC_NOWAIT 0x8000 /* (acgnp) Do not sleep */
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
@ -614,6 +614,8 @@ vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
vm_memattr_t memattr);
vm_page_t vm_page_alloc_freelist(int, int);
vm_page_t vm_page_alloc_freelist_domain(int, int, int);
vm_page_t vm_page_alloc_noobj(int);
vm_page_t vm_page_alloc_noobj_domain(int, int);
void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);