Retire vm_reserv_extend_{contig,page}(). These functions were introduced

as part of a false start toward fine-grained reservation locking.  In the
end, they were not needed, so eliminate them.

Order the parameters to vm_reserv_alloc_{contig,page}() consistently with
the vm_page functions that call them.

Update the comments about the locking requirements for
vm_reserv_alloc_{contig,page}().  They no longer require a free page
queues lock.

Wrap several lines that became too long after the "req" and "domain"
parameters were added to vm_reserv_alloc_{contig,page}().

Reviewed by:	kib, markj
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D20492
This commit is contained in:
Alan Cox 2019-06-03 05:15:36 +00:00
parent 5ec57af4b2
commit 2d5039db18
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=348525
3 changed files with 74 additions and 183 deletions

View File

@ -1834,8 +1834,8 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
* Can we allocate the page from a reservation?
*/
if (vm_object_reserv(object) &&
((m = vm_reserv_extend(req, object, pindex, domain, mpred)) != NULL ||
(m = vm_reserv_alloc_page(req, object, pindex, domain, mpred)) != NULL)) {
(m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
NULL) {
domain = vm_phys_domain(m);
vmd = VM_DOMAIN(domain);
goto found;
@ -2039,10 +2039,8 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
* Can we allocate the pages from a reservation?
*/
if (vm_object_reserv(object) &&
((m_ret = vm_reserv_extend_contig(req, object, pindex, domain,
npages, low, high, alignment, boundary, mpred)) != NULL ||
(m_ret = vm_reserv_alloc_contig(req, object, pindex, domain,
npages, low, high, alignment, boundary, mpred)) != NULL)) {
(m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
mpred, npages, low, high, alignment, boundary)) != NULL) {
domain = vm_phys_domain(m_ret);
vmd = VM_DOMAIN(domain);
goto found;

View File

@ -575,96 +575,9 @@ vm_reserv_populate(vm_reserv_t rv, int index)
vm_reserv_domain_unlock(rv->domain);
}
/*
* Attempts to allocate a contiguous set of physical pages from existing
* reservations. See vm_reserv_alloc_contig() for a description of the
* function's parameters.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
*
* The object must be locked.
*/
vm_page_t
vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex,
int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary, vm_page_t mpred)
{
struct vm_domain *vmd;
vm_paddr_t pa, size;
vm_page_t m, msucc;
vm_reserv_t rv;
int i, index;
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
/*
* Is a reservation fundamentally impossible?
*/
if (pindex < VM_RESERV_INDEX(object, pindex) ||
pindex + npages > object->size || object->resident_page_count == 0)
return (NULL);
/*
* All reservations of a particular size have the same alignment.
* Assuming that the first page is allocated from a reservation, the
* least significant bits of its physical address can be determined
* from its offset from the beginning of the reservation and the size
* of the reservation.
*
* Could the specified index within a reservation of the smallest
* possible size satisfy the alignment and boundary requirements?
*/
pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
if ((pa & (alignment - 1)) != 0)
return (NULL);
size = npages << PAGE_SHIFT;
if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
return (NULL);
/*
* Look for an existing reservation.
*/
rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
if (rv == NULL)
return (NULL);
KASSERT(object != kernel_object || rv->domain == domain,
("vm_reserv_extend_contig: Domain mismatch from reservation."));
index = VM_RESERV_INDEX(object, pindex);
/* Does the allocation fit within the reservation? */
if (index + npages > VM_LEVEL_0_NPAGES)
return (NULL);
domain = rv->domain;
vmd = VM_DOMAIN(domain);
vm_reserv_lock(rv);
if (rv->object != object)
goto out;
m = &rv->pages[index];
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
goto out;
/* Handle vm_page_rename(m, new_object, ...). */
for (i = 0; i < npages; i++) {
if (popmap_is_set(rv->popmap, index + i))
goto out;
}
if (!vm_domain_allocate(vmd, req, npages))
goto out;
for (i = 0; i < npages; i++)
vm_reserv_populate(rv, index + i);
vm_reserv_unlock(rv);
return (m);
out:
vm_reserv_unlock(rv);
return (NULL);
}
/*
* Allocates a contiguous set of physical pages of the given size "npages"
* from newly created reservations. All of the physical pages
* from existing or newly created reservations. All of the physical pages
* must be at or above the given physical address "low" and below the given
* physical address "high". The given value "alignment" determines the
* alignment of the first physical page in the set. If the given value
@ -672,18 +585,15 @@ vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex,
* physical address boundary that is a multiple of that value. Both
* "alignment" and "boundary" must be a power of two.
*
* Callers should first invoke vm_reserv_extend_contig() to attempt an
* allocation from existing reservations.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
*
* The object and free page queue must be locked.
* The object must be locked.
*/
vm_page_t
vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_page_t mpred)
vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary)
{
struct vm_domain *vmd;
vm_paddr_t pa, size;
@ -721,13 +631,42 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int doma
return (NULL);
/*
* Callers should've extended an existing reservation prior to
* calling this function. If a reservation exists it is
* incompatible with the allocation.
* Look for an existing reservation.
*/
rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
if (rv != NULL)
if (rv != NULL) {
KASSERT(object != kernel_object || rv->domain == domain,
("vm_reserv_alloc_contig: domain mismatch"));
index = VM_RESERV_INDEX(object, pindex);
/* Does the allocation fit within the reservation? */
if (index + npages > VM_LEVEL_0_NPAGES)
return (NULL);
domain = rv->domain;
vmd = VM_DOMAIN(domain);
vm_reserv_lock(rv);
/* Handle reclaim race. */
if (rv->object != object)
goto out;
m = &rv->pages[index];
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + size > high ||
(pa & (alignment - 1)) != 0 ||
((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
goto out;
/* Handle vm_page_rename(m, new_object, ...). */
for (i = 0; i < npages; i++)
if (popmap_is_set(rv->popmap, index + i))
goto out;
if (!vm_domain_allocate(vmd, req, npages))
goto out;
for (i = 0; i < npages; i++)
vm_reserv_populate(rv, index + i);
vm_reserv_unlock(rv);
return (m);
out:
vm_reserv_unlock(rv);
return (NULL);
}
/*
* Could at least one reservation fit between the first index to the
@ -849,8 +788,7 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int doma
}
/*
* Attempts to extend an existing reservation and allocate the page to the
* object.
* Allocate a physical page from an existing or newly created reservation.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
@ -858,67 +796,8 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int doma
* The object must be locked.
*/
vm_page_t
vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain,
vm_page_t mpred)
{
struct vm_domain *vmd;
vm_page_t m, msucc;
vm_reserv_t rv;
int index;
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Could a reservation currently exist?
*/
if (pindex < VM_RESERV_INDEX(object, pindex) ||
pindex >= object->size || object->resident_page_count == 0)
return (NULL);
/*
* Look for an existing reservation.
*/
rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
if (rv == NULL)
return (NULL);
KASSERT(object != kernel_object || rv->domain == domain,
("vm_reserv_extend: Domain mismatch from reservation."));
domain = rv->domain;
vmd = VM_DOMAIN(domain);
index = VM_RESERV_INDEX(object, pindex);
m = &rv->pages[index];
vm_reserv_lock(rv);
/* Handle reclaim race. */
if (rv->object != object ||
/* Handle vm_page_rename(m, new_object, ...). */
popmap_is_set(rv->popmap, index)) {
m = NULL;
goto out;
}
if (vm_domain_allocate(vmd, req, 1) == 0)
m = NULL;
else
vm_reserv_populate(rv, index);
out:
vm_reserv_unlock(rv);
return (m);
}
/*
* Attempts to allocate a new reservation for the object, and allocates a
* page from that reservation. Callers should first invoke vm_reserv_extend()
* to attempt an allocation from an existing reservation.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
*
* The object and free page queue must be locked.
*/
vm_page_t
vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain,
vm_page_t mpred)
vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
int req, vm_page_t mpred)
{
struct vm_domain *vmd;
vm_page_t m, msucc;
@ -936,13 +815,32 @@ vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain
return (NULL);
/*
* Callers should've extended an existing reservation prior to
* calling this function. If a reservation exists it is
* incompatible with the allocation.
* Look for an existing reservation.
*/
rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
if (rv != NULL)
return (NULL);
if (rv != NULL) {
KASSERT(object != kernel_object || rv->domain == domain,
("vm_reserv_alloc_page: domain mismatch"));
domain = rv->domain;
vmd = VM_DOMAIN(domain);
index = VM_RESERV_INDEX(object, pindex);
m = &rv->pages[index];
vm_reserv_lock(rv);
/* Handle reclaim race. */
if (rv->object != object ||
/* Handle vm_page_rename(m, new_object, ...). */
popmap_is_set(rv->popmap, index)) {
m = NULL;
goto out;
}
if (vm_domain_allocate(vmd, req, 1) == 0)
m = NULL;
else
vm_reserv_populate(rv, index);
out:
vm_reserv_unlock(rv);
return (m);
}
/*
* Could a reservation fit between the first index to the left that

View File

@ -47,17 +47,12 @@
/*
* The following functions are only to be used by the virtual memory system.
*/
vm_page_t vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex,
int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary, vm_page_t mpred);
vm_page_t vm_reserv_extend_contig(int req, vm_object_t object,
vm_pindex_t pindex, int domain, u_long npages,
vm_page_t vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex,
int domain, int req, vm_page_t mpred, u_long npages,
vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_page_t mpred);
vm_page_t vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex,
int domain, vm_page_t mpred);
vm_page_t vm_reserv_extend(int req, vm_object_t object,
vm_pindex_t pindex, int domain, vm_page_t mpred);
vm_paddr_t boundary);
vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex,
int domain, int req, vm_page_t mpred);
void vm_reserv_break_all(vm_object_t object);
boolean_t vm_reserv_free_page(vm_page_t m);
void vm_reserv_init(void);