Eliminate kmem_malloc()'s unused arena parameter. (The arena parameter

became unused in FreeBSD 12.x as a side-effect of the NUMA-related
changes.)

Reviewed by:	kib, markj
Discussed with:	jeff, re@
Differential Revision:	https://reviews.freebsd.org/D16825
This commit is contained in:
Alan Cox 2018-08-21 16:43:46 +00:00
parent b1a90834bb
commit 83a90bffd8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338143
20 changed files with 37 additions and 59 deletions

View File

@ -402,18 +402,14 @@ native_start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* allocate and set up an idle stack data page */
bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO);
doublefault_stack = (char *)kmem_malloc(kernel_arena,
PAGE_SIZE, M_WAITOK | M_ZERO);
mce_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
M_WAITOK | M_ZERO);
nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
M_WAITOK | M_ZERO);
dbg_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK |
M_ZERO);
mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
nmi_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
dbg_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
bootAP = cpu;

View File

@ -1412,8 +1412,7 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
M_WAITOK | M_ZERO);
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);

View File

@ -361,8 +361,8 @@ amd64_set_ioperm(td, uap)
*/
pcb = td->td_pcb;
if (pcb->pcb_tssp == NULL) {
tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
ctob(IOPAGES + 1), M_WAITOK);
tssp = (struct amd64tss *)kmem_malloc(ctob(IOPAGES + 1),
M_WAITOK);
pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
ctob(IOPAGES + 1), false);
iomap = (char *)&tssp[1];
@ -463,7 +463,7 @@ user_ldt_alloc(struct proc *p, int force)
mtx_unlock(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
sva = kmem_malloc(kernel_arena, sz, M_WAITOK | M_ZERO);
sva = kmem_malloc(sz, M_WAITOK | M_ZERO);
new_ldt->ldt_base = (caddr_t)sva;
pmap_pti_add_kva(sva, sva + sz, false);
new_ldt->ldt_refcnt = 1;

View File

@ -119,8 +119,7 @@ cpu_mp_start(void)
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
M_WAITOK | M_ZERO);
dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dcache_wbinv_poc_all();

View File

@ -1782,8 +1782,7 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
M_WAITOK | M_ZERO);
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);

View File

@ -484,8 +484,7 @@ start_cpu(u_int id, uint64_t target_cpu)
pcpup = &__pcpu[cpuid];
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
M_WAITOK | M_ZERO);
dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);

View File

@ -923,8 +923,7 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
M_WAITOK | M_ZERO);
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);

View File

@ -165,7 +165,7 @@ linux_alloc_kmem(gfp_t flags, unsigned int order)
vm_offset_t addr;
if ((flags & GFP_DMA32) == 0) {
addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK);
addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
} else {
addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);

View File

@ -283,8 +283,8 @@ hypercall_create(void *arg __unused)
* the NX bit.
* - Assume kmem_malloc() returns properly aligned memory.
*/
hypercall_context.hc_addr = (void *)kmem_malloc(kernel_arena, PAGE_SIZE,
M_EXEC | M_WAITOK);
hypercall_context.hc_addr = (void *)kmem_malloc(PAGE_SIZE, M_EXEC |
M_WAITOK);
hypercall_context.hc_paddr = vtophys(hypercall_context.hc_addr);
/* Get the 'reserved' bits, which requires preservation. */

View File

@ -328,11 +328,9 @@ start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* allocate and set up a boot stack data page */
bootstacks[cpu] =
(char *)kmem_malloc(kernel_arena, kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
bootstacks[cpu] = (char *)kmem_malloc(kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);

View File

@ -998,8 +998,7 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
M_WAITOK | M_ZERO);
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);

View File

@ -185,7 +185,7 @@ start_ap(int cpuid)
int cpus, ms;
cpus = mp_naps;
dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
mips_sync();

View File

@ -168,8 +168,8 @@ cpu_mp_start(void)
void *dpcpu;
pc = &__pcpu[cpu.cr_cpuid];
dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK |
M_ZERO);
pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
dpcpu_init(dpcpu, cpu.cr_cpuid);
} else {

View File

@ -391,8 +391,7 @@ cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
pcpu_init(pcpup, id, sizeof(struct pcpu));
dpcpu[id - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
M_WAITOK | M_ZERO);
dpcpu[id - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dpcpu_init(dpcpu[id - 1], id);
printf("Starting CPU %u (%lx)\n", id, target_cpu);

View File

@ -342,12 +342,10 @@ ap_start(phandle_t node, u_int mid, u_int cpu_impl)
cpuid_to_mid[cpuid] = mid;
cpu_identify(csa->csa_ver, clock, cpuid);
va = kmem_malloc(kernel_arena, PCPU_PAGES * PAGE_SIZE,
M_WAITOK | M_ZERO);
va = kmem_malloc(PCPU_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
pcpu_init(pc, cpuid, sizeof(*pc));
dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
M_WAITOK | M_ZERO), cpuid);
dpcpu_init((void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO), cpuid);
pc->pc_addr = va;
pc->pc_clock = clock;
pc->pc_impl = cpu_impl;

View File

@ -3687,7 +3687,7 @@ uma_large_malloc_domain(vm_size_t size, int domain, int wait)
if (slab == NULL)
return (NULL);
if (domain == UMA_ANYDOMAIN)
addr = kmem_malloc(NULL, size, wait);
addr = kmem_malloc(size, wait);
else
addr = kmem_malloc_domain(domain, size, wait);
if (addr != 0) {

View File

@ -64,7 +64,7 @@ vm_offset_t kmem_alloc_contig(vm_size_t size, int flags,
vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
vm_offset_t kmem_malloc(vm_size_t size, int flags);
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);

View File

@ -278,8 +278,7 @@ vm_ksubmap_init(struct kva_md_info *kmi)
(vm_paddr_t)1 << 32, ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT);
if (firstaddr == 0)
#endif
firstaddr = kmem_malloc(kernel_arena, size,
M_ZERO | M_WAITOK);
firstaddr = kmem_malloc(size, M_ZERO | M_WAITOK);
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;

View File

@ -392,7 +392,7 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags)
}
vm_offset_t
kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags)
kmem_malloc(vm_size_t size, int flags)
{
struct vm_domainset_iter di;
vm_offset_t addr;

View File

@ -350,18 +350,12 @@ start_xen_ap(int cpu)
const size_t stacksize = kstack_pages * PAGE_SIZE;
/* allocate and set up an idle stack data page */
bootstacks[cpu] =
(void *)kmem_malloc(kernel_arena, stacksize, M_WAITOK | M_ZERO);
doublefault_stack =
(char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO);
mce_stack =
(char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO);
nmi_stack =
(char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO);
dbg_stack =
(void *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO);
dpcpu =
(void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
bootstacks[cpu] = (void *)kmem_malloc(stacksize, M_WAITOK | M_ZERO);
doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
nmi_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
dbg_stack = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
bootAP = cpu;