The conversion of kmem_alloc_attr() from operating on a vm map to a vmem

arena in r254025 introduced a bug in the case when an allocation is only
partially successful.  Specifically, the vm object lock was not being
acquired before freeing the allocated pages.  To address this bug, replace
the existing code by a call to kmem_unback().

Change the type of a variable in kmem_alloc_attr() so that an allocation
of two or more gigabytes won't fail.

Replace the error handling code in kmem_back() by a call to kmem_unback().

Reviewed by:	kib (an earlier version)
MFC after:	1 week
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2015-09-26 22:57:10 +00:00
parent 0a805de6f3
commit 9e829b2272
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=288281

View File

@ -159,11 +159,10 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
vm_offset_t addr;
vm_offset_t addr, i;
vm_ooffset_t offset;
vm_page_t m;
int pflags, tries;
int i;
size = round_page(size);
if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
@ -184,18 +183,7 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
tries++;
goto retry;
}
/*
* Unmap and free the pages.
*/
if (i != 0)
pmap_remove(kernel_pmap, addr, addr + i);
while (i != 0) {
i -= PAGE_SIZE;
m = vm_page_lookup(object,
OFF_TO_IDX(offset + i));
vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
kmem_unback(object, addr, i);
vmem_free(vmem, addr, size);
return (0);
}
@ -353,25 +341,13 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
* aren't on any queues.
*/
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if ((flags & M_NOWAIT) == 0) {
VM_OBJECT_WUNLOCK(object);
VM_WAIT;
VM_OBJECT_WLOCK(object);
goto retry;
}
/*
* Unmap and free the pages.
*/
if (i != 0)
pmap_remove(kernel_pmap, addr, addr + i);
while (i != 0) {
i -= PAGE_SIZE;
m = vm_page_lookup(object,
OFF_TO_IDX(offset + i));
vm_page_unwire(m, PQ_INACTIVE);
vm_page_free(m);
}
VM_OBJECT_WUNLOCK(object);
kmem_unback(object, addr, i);
return (KERN_NO_SPACE);
}
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
@ -387,6 +363,15 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
return (KERN_SUCCESS);
}
/*
* kmem_unback:
*
* Unmap and free the physical pages underlying the specified virtual
* address range.
*
* A physical page must exist within the specified object at each index
* that is being unmapped.
*/
void
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{