Eliminate the arena parameter to kmem_malloc_domain(). It is redundant.

The domain and flags parameters suffice.  In fact, the related functions
kmem_alloc_{attr,contig}_domain() don't have an arena parameter.

Reviewed by:	kib, markj
Differential Revision:	https://reviews.freebsd.org/D16713
This commit is contained in:
Alan Cox 2018-08-18 18:33:50 +00:00
parent 9e2d4791d1
commit 067fd85894
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338018
3 changed files with 9 additions and 25 deletions

View File

@ -1169,7 +1169,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
void *p; /* Returned page */
*pflag = UMA_SLAB_KERNEL;
p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait);
p = (void *) kmem_malloc_domain(domain, bytes, wait);
return (p);
}
@ -3680,32 +3680,22 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
void *
uma_large_malloc_domain(vm_size_t size, int domain, int wait)
{
struct vmem *arena;
vm_offset_t addr;
uma_slab_t slab;
#if VM_NRESERVLEVEL > 0
if (__predict_true((wait & M_EXEC) == 0))
arena = kernel_arena;
else
arena = kernel_rwx_arena;
#else
arena = kernel_arena;
#endif
slab = zone_alloc_item(slabzone, NULL, domain, wait);
if (slab == NULL)
return (NULL);
if (domain == UMA_ANYDOMAIN)
addr = kmem_malloc(arena, size, wait);
addr = kmem_malloc(NULL, size, wait);
else
addr = kmem_malloc_domain(arena, domain, size, wait);
addr = kmem_malloc_domain(domain, size, wait);
if (addr != 0) {
vsetslab(addr, slab);
slab->us_data = (void *)addr;
slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
#if VM_NRESERVLEVEL > 0
if (__predict_false(arena == kernel_rwx_arena))
if (__predict_false((wait & M_EXEC) != 0))
slab->us_flags |= UMA_SLAB_KRWX;
#endif
slab->us_size = size;

View File

@ -65,8 +65,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
vm_offset_t kmem_malloc_domain(struct vmem *, int domain, vm_size_t size,
int flags);
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
/* This provides memory for previously allocated address space. */

View File

@ -372,23 +372,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
* Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size, int flags)
kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
vmem_t *arena;
vm_offset_t addr;
int rv;
#if VM_NRESERVLEVEL > 0
KASSERT(vmem == kernel_arena || vmem == kernel_rwx_arena,
("kmem_malloc_domain: Only kernel_arena or kernel_rwx_arena "
"are supported."));
if (__predict_true(vmem == kernel_arena))
if (__predict_true((flags & M_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
arena = vm_dom[domain].vmd_kernel_rwx_arena;
#else
KASSERT(vmem == kernel_arena,
("kmem_malloc_domain: Only kernel_arena is supported."));
arena = vm_dom[domain].vmd_kernel_arena;
#endif
size = round_page(size);
@ -404,7 +399,7 @@ kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size, int flags)
}
vm_offset_t
kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
vm_offset_t addr;
@ -412,7 +407,7 @@ kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
do {
addr = kmem_malloc_domain(vmem, domain, size, flags);
addr = kmem_malloc_domain(domain, size, flags);
if (addr != 0)
break;
} while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);