Relax the object type restrictions on vm_page_alloc_contig(). Specifically,
add support for object types that were previously prohibited because they could contain PG_CACHED pages. Roughly halve the number of radix trie operations performed by vm_page_alloc_contig() using the same approach that is employed by vm_page_alloc(). Also, eliminate the radix trie lookup performed with the free page queues lock held. Tidy up the handling of radix trie insert failures in vm_page_alloc() and vm_page_alloc_contig(). Reviewed by: kib, markj Tested by: pho Sponsored by: Dell EMC Isilon Differential Revision: https://reviews.freebsd.org/D8878
This commit is contained in:
parent
04f4d619eb
commit
d619a90d54
@ -1486,13 +1486,12 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
vm_page_t m, mpred;
|
||||
int flags, req_class;
|
||||
|
||||
mpred = 0; /* XXX: pacify gcc */
|
||||
mpred = NULL; /* XXX: pacify gcc */
|
||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
|
||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
|
||||
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
|
||||
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
|
||||
("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
|
||||
req));
|
||||
("vm_page_alloc: inconsistent object(%p)/req(%x)", object, req));
|
||||
if (object != NULL)
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
|
||||
@ -1596,10 +1595,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
m->wire_count = 0;
|
||||
}
|
||||
m->object = NULL;
|
||||
KASSERT(m->object == NULL, ("page %p has object", m));
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
m->busy_lock = VPB_UNBUSIED;
|
||||
vm_page_free(m);
|
||||
/* Don't change PG_ZERO. */
|
||||
vm_page_free_toq(m);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -1641,6 +1641,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
* memory attribute setting for the physical pages cannot be configured
|
||||
* to VM_MEMATTR_DEFAULT.
|
||||
*
|
||||
* The specified object may not contain fictitious pages.
|
||||
*
|
||||
* The caller must always specify an allocation class.
|
||||
*
|
||||
* allocation classes:
|
||||
@ -1664,20 +1666,21 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
|
||||
vm_paddr_t boundary, vm_memattr_t memattr)
|
||||
{
|
||||
vm_page_t m, m_tmp, m_ret;
|
||||
u_int flags;
|
||||
vm_page_t m, m_ret, mpred;
|
||||
u_int busy_lock, flags, oflags;
|
||||
int req_class;
|
||||
|
||||
mpred = NULL; /* XXX: pacify gcc */
|
||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
|
||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
|
||||
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
|
||||
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
|
||||
("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
|
||||
("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
|
||||
req));
|
||||
if (object != NULL) {
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
KASSERT(object->type == OBJT_PHYS,
|
||||
("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
|
||||
KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
|
||||
("vm_page_alloc_contig: object %p has fictitious pages",
|
||||
object));
|
||||
}
|
||||
KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
|
||||
@ -1689,18 +1692,34 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
|
||||
req_class = VM_ALLOC_SYSTEM;
|
||||
|
||||
if (object != NULL) {
|
||||
mpred = vm_radix_lookup_le(&object->rtree, pindex);
|
||||
KASSERT(mpred == NULL || mpred->pindex != pindex,
|
||||
("vm_page_alloc_contig: pindex already allocated"));
|
||||
}
|
||||
|
||||
/*
|
||||
* Can we allocate the pages without the number of free pages falling
|
||||
* below the lower bound for the allocation class?
|
||||
*/
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved ||
|
||||
(req_class == VM_ALLOC_SYSTEM &&
|
||||
vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) ||
|
||||
(req_class == VM_ALLOC_INTERRUPT &&
|
||||
vm_cnt.v_free_count >= npages)) {
|
||||
/*
|
||||
* Can we allocate the pages from a reservation?
|
||||
*/
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
retry:
|
||||
if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
|
||||
(m_ret = vm_reserv_alloc_contig(object, pindex, npages,
|
||||
low, high, alignment, boundary)) == NULL)
|
||||
low, high, alignment, boundary, mpred)) == NULL)
|
||||
#endif
|
||||
/*
|
||||
* If not, allocate them from the free page queues.
|
||||
*/
|
||||
m_ret = vm_phys_alloc_contig(npages, low, high,
|
||||
alignment, boundary);
|
||||
} else {
|
||||
@ -1732,6 +1751,13 @@ retry:
|
||||
flags = PG_ZERO;
|
||||
if ((req & VM_ALLOC_NODUMP) != 0)
|
||||
flags |= PG_NODUMP;
|
||||
oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
|
||||
VPO_UNMANAGED : 0;
|
||||
busy_lock = VPB_UNBUSIED;
|
||||
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
|
||||
busy_lock = VPB_SINGLE_EXCLUSIVER;
|
||||
if ((req & VM_ALLOC_SBUSY) != 0)
|
||||
busy_lock = VPB_SHARERS_WORD(1);
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
atomic_add_int(&vm_cnt.v_wire_count, npages);
|
||||
if (object != NULL) {
|
||||
@ -1742,37 +1768,32 @@ retry:
|
||||
for (m = m_ret; m < &m_ret[npages]; m++) {
|
||||
m->aflags = 0;
|
||||
m->flags = (m->flags | PG_NODUMP) & flags;
|
||||
m->busy_lock = VPB_UNBUSIED;
|
||||
if (object != NULL) {
|
||||
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
|
||||
m->busy_lock = VPB_SINGLE_EXCLUSIVER;
|
||||
if ((req & VM_ALLOC_SBUSY) != 0)
|
||||
m->busy_lock = VPB_SHARERS_WORD(1);
|
||||
}
|
||||
m->busy_lock = busy_lock;
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
m->wire_count = 1;
|
||||
/* Unmanaged pages don't use "act_count". */
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
m->act_count = 0;
|
||||
m->oflags = oflags;
|
||||
if (object != NULL) {
|
||||
if (vm_page_insert(m, object, pindex)) {
|
||||
if (vm_paging_needed())
|
||||
pagedaemon_wakeup();
|
||||
if (vm_page_insert_after(m, object, pindex, mpred)) {
|
||||
pagedaemon_wakeup();
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count,
|
||||
npages);
|
||||
for (m_tmp = m, m = m_ret;
|
||||
m < &m_ret[npages]; m++) {
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
atomic_subtract_int(
|
||||
&vm_cnt.v_wire_count, npages);
|
||||
KASSERT(m->object == NULL,
|
||||
("page %p has object", m));
|
||||
mpred = m;
|
||||
for (m = m_ret; m < &m_ret[npages]; m++) {
|
||||
if (m <= mpred &&
|
||||
(req & VM_ALLOC_WIRED) != 0)
|
||||
m->wire_count = 0;
|
||||
if (m >= m_tmp) {
|
||||
m->object = NULL;
|
||||
m->oflags |= VPO_UNMANAGED;
|
||||
}
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
m->busy_lock = VPB_UNBUSIED;
|
||||
vm_page_free(m);
|
||||
/* Don't change PG_ZERO. */
|
||||
vm_page_free_toq(m);
|
||||
}
|
||||
return (NULL);
|
||||
}
|
||||
mpred = m;
|
||||
} else
|
||||
m->pindex = pindex;
|
||||
if (memattr != VM_MEMATTR_DEFAULT)
|
||||
@ -1791,6 +1812,7 @@ static void
|
||||
vm_page_alloc_check(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT(m->object == NULL, ("page %p has object", m));
|
||||
KASSERT(m->queue == PQ_NONE,
|
||||
("page %p has unexpected queue %d", m, m->queue));
|
||||
KASSERT(m->wire_count == 0, ("page %p is wired", m));
|
||||
|
@ -404,14 +404,18 @@ vm_reserv_populate(vm_reserv_t rv, int index)
|
||||
* physical address boundary that is a multiple of that value. Both
|
||||
* "alignment" and "boundary" must be a power of two.
|
||||
*
|
||||
* The page "mpred" must immediately precede the offset "pindex" within the
|
||||
* specified object.
|
||||
*
|
||||
* The object and free page queue must be locked.
|
||||
*/
|
||||
vm_page_t
|
||||
vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
|
||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
|
||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
|
||||
vm_page_t mpred)
|
||||
{
|
||||
vm_paddr_t pa, size;
|
||||
vm_page_t m, m_ret, mpred, msucc;
|
||||
vm_page_t m, m_ret, msucc;
|
||||
vm_pindex_t first, leftcap, rightcap;
|
||||
vm_reserv_t rv;
|
||||
u_long allocpages, maxpages, minpages;
|
||||
@ -448,10 +452,11 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
|
||||
/*
|
||||
* Look for an existing reservation.
|
||||
*/
|
||||
mpred = vm_radix_lookup_le(&object->rtree, pindex);
|
||||
if (mpred != NULL) {
|
||||
KASSERT(mpred->object == object,
|
||||
("vm_reserv_alloc_contig: object doesn't contain mpred"));
|
||||
KASSERT(mpred->pindex < pindex,
|
||||
("vm_reserv_alloc_contig: pindex already allocated"));
|
||||
("vm_reserv_alloc_contig: mpred doesn't precede pindex"));
|
||||
rv = vm_reserv_from_page(mpred);
|
||||
if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
|
||||
goto found;
|
||||
@ -460,7 +465,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
|
||||
msucc = TAILQ_FIRST(&object->memq);
|
||||
if (msucc != NULL) {
|
||||
KASSERT(msucc->pindex > pindex,
|
||||
("vm_reserv_alloc_contig: pindex already allocated"));
|
||||
("vm_reserv_alloc_contig: msucc doesn't succeed pindex"));
|
||||
rv = vm_reserv_from_page(msucc);
|
||||
if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
|
||||
goto found;
|
||||
|
@ -47,7 +47,7 @@
|
||||
*/
|
||||
vm_page_t vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex,
|
||||
u_long npages, vm_paddr_t low, vm_paddr_t high,
|
||||
u_long alignment, vm_paddr_t boundary);
|
||||
u_long alignment, vm_paddr_t boundary, vm_page_t mpred);
|
||||
vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex,
|
||||
vm_page_t mpred);
|
||||
void vm_reserv_break_all(vm_object_t object);
|
||||
|
Loading…
x
Reference in New Issue
Block a user