Make kstack_pages a tunable on arm, x86, and powepc. On i386, the
initial thread stack is not adjusted by the tunable, the stack is allocated too early to get access to the kernel environment. See TD0_KSTACK_PAGES for the thread0 stack sizing on i386. The tunable was tested on x86 only. From the visual inspection, it seems that it might work on arm and powerpc. The arm USPACE_SVC_STACK_TOP and powerpc USPACE macros seems to be already incorrect for the threads with non-default kstack size. I only changed the macros to use variable instead of constant, since I cannot test. On arm64, mips and sparc64, some static data structures are sized by KSTACK_PAGES, so the tunable is disabled. Sponsored by: The FreeBSD Foundation MFC after: 2 week
This commit is contained in:
parent
141883cce0
commit
9033c894a1
@ -93,7 +93,6 @@ ASSYM(TDP_KTHREAD, TDP_KTHREAD);
|
||||
ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
|
||||
ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
|
||||
ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
|
||||
ASSYM(KSTACK_PAGES, KSTACK_PAGES);
|
||||
ASSYM(PAGE_SIZE, PAGE_SIZE);
|
||||
ASSYM(NPTEPG, NPTEPG);
|
||||
ASSYM(NPDEPG, NPDEPG);
|
||||
|
@ -1516,12 +1516,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
|
||||
char *env;
|
||||
size_t kstack0_sz;
|
||||
|
||||
thread0.td_kstack = physfree + KERNBASE;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
|
||||
bzero((void *)thread0.td_kstack, kstack0_sz);
|
||||
physfree += kstack0_sz;
|
||||
|
||||
/*
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
@ -1533,6 +1527,12 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
|
||||
/* Init basic tunables, hz etc */
|
||||
init_param1();
|
||||
|
||||
thread0.td_kstack = physfree + KERNBASE;
|
||||
thread0.td_kstack_pages = kstack_pages;
|
||||
kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
|
||||
bzero((void *)thread0.td_kstack, kstack0_sz);
|
||||
physfree += kstack0_sz;
|
||||
|
||||
/*
|
||||
* make gdt memory segments
|
||||
*/
|
||||
|
@ -348,7 +348,7 @@ native_start_all_aps(void)
|
||||
|
||||
/* allocate and set up an idle stack data page */
|
||||
bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
|
||||
KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
|
||||
kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO);
|
||||
doublefault_stack = (char *)kmem_malloc(kernel_arena,
|
||||
PAGE_SIZE, M_WAITOK | M_ZERO);
|
||||
nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
|
||||
@ -356,7 +356,7 @@ native_start_all_aps(void)
|
||||
dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
|
||||
bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
|
||||
bootAP = cpu;
|
||||
|
||||
/* attempt to start the Application Processor */
|
||||
|
@ -1066,7 +1066,7 @@ init_proc0(vm_offset_t kstack)
|
||||
proc_linkup0(&proc0, &thread0);
|
||||
thread0.td_kstack = kstack;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
(thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
|
||||
thread0.td_pcb->pcb_flags = 0;
|
||||
thread0.td_pcb->pcb_vfpcpu = -1;
|
||||
thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ;
|
||||
@ -1360,7 +1360,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
|
||||
valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
|
||||
valloc_pages(kernelstack, kstack_pages * MAXCPU);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
|
||||
/*
|
||||
@ -1614,7 +1614,7 @@ initarm(struct arm_boot_params *abp)
|
||||
irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
|
||||
abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
|
||||
undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
|
||||
kernelstack = pmap_preboot_get_vpages(KSTACK_PAGES * MAXCPU);
|
||||
kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
|
||||
|
||||
/* Allocate message buffer. */
|
||||
msgbufp = (void *)pmap_preboot_get_vpages(
|
||||
|
@ -512,7 +512,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
|
||||
valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
|
||||
valloc_pages(kernelstack, kstack_pages * MAXCPU);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
|
||||
/*
|
||||
@ -553,7 +553,7 @@ initarm(struct arm_boot_params *abp)
|
||||
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
|
||||
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
|
||||
KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
|
||||
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
|
||||
L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
|
||||
|
@ -222,7 +222,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
|
||||
/*
|
||||
@ -260,7 +260,7 @@ initarm(struct arm_boot_params *abp)
|
||||
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
|
||||
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
|
||||
KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
|
||||
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
|
||||
L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
|
||||
|
@ -131,7 +131,7 @@
|
||||
#define KSTACK_GUARD_PAGES 1
|
||||
#endif /* !KSTACK_GUARD_PAGES */
|
||||
|
||||
#define USPACE_SVC_STACK_TOP (KSTACK_PAGES * PAGE_SIZE)
|
||||
#define USPACE_SVC_STACK_TOP (kstack_pages * PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* Mach derived conversion macros
|
||||
|
@ -271,7 +271,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
/*
|
||||
* Now we start construction of the L1 page table
|
||||
@ -307,7 +307,7 @@ initarm(struct arm_boot_params *abp)
|
||||
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
|
||||
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
|
||||
KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
|
||||
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
|
||||
L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
|
||||
|
@ -225,7 +225,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
alloc_pages(minidataclean.pv_pa, 1);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
/*
|
||||
|
@ -226,7 +226,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
alloc_pages(minidataclean.pv_pa, 1);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
/*
|
||||
|
@ -225,7 +225,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
/*
|
||||
* Now we start construction of the L1 page table
|
||||
|
@ -295,7 +295,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
alloc_pages(minidataclean.pv_pa, 1);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
|
||||
|
@ -206,7 +206,7 @@ initarm(struct arm_boot_params *abp)
|
||||
valloc_pages(irqstack, IRQ_STACK_SIZE);
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(kernelstack, kstack_pages);
|
||||
alloc_pages(minidataclean.pv_pa, 1);
|
||||
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
/*
|
||||
|
@ -462,7 +462,7 @@ db_findstack_cmd(db_expr_t addr, bool have_addr, db_expr_t dummy3 __unused,
|
||||
for (ks_ce = kstack_cache; ks_ce != NULL;
|
||||
ks_ce = ks_ce->next_ks_entry) {
|
||||
if ((vm_offset_t)ks_ce <= saddr && saddr < (vm_offset_t)ks_ce +
|
||||
PAGE_SIZE * KSTACK_PAGES) {
|
||||
PAGE_SIZE * kstack_pages) {
|
||||
db_printf("Cached stack %p\n", ks_ce);
|
||||
return;
|
||||
}
|
||||
|
@ -101,8 +101,6 @@ ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
|
||||
ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
|
||||
ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
|
||||
/* ASSYM(UPAGES, UPAGES);*/
|
||||
ASSYM(KSTACK_PAGES, KSTACK_PAGES);
|
||||
ASSYM(TD0_KSTACK_PAGES, TD0_KSTACK_PAGES);
|
||||
ASSYM(PAGE_SIZE, PAGE_SIZE);
|
||||
ASSYM(NPTEPG, NPTEPG);
|
||||
|
@ -348,7 +348,7 @@ start_all_aps(void)
|
||||
|
||||
/* allocate and set up a boot stack data page */
|
||||
bootstacks[cpu] =
|
||||
(char *)kmem_malloc(kernel_arena, KSTACK_PAGES * PAGE_SIZE,
|
||||
(char *)kmem_malloc(kernel_arena, kstack_pages * PAGE_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
@ -360,7 +360,8 @@ start_all_aps(void)
|
||||
outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
|
||||
#endif
|
||||
|
||||
bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 4;
|
||||
bootSTK = (char *)bootstacks[cpu] + kstack_pages *
|
||||
PAGE_SIZE - 4;
|
||||
bootAP = cpu;
|
||||
|
||||
/* attempt to start the Application Processor */
|
||||
|
@ -275,7 +275,7 @@ i386_extend_pcb(struct thread *td)
|
||||
ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
|
||||
M_WAITOK | M_ZERO);
|
||||
/* -16 is so we can convert a trapframe into vm86trapframe inplace */
|
||||
ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
|
||||
ext->ext_tss.tss_esp0 = td->td_kstack + ctob(td->td_kstack_pages) -
|
||||
sizeof(struct pcb) - 16;
|
||||
ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
|
||||
/*
|
||||
|
@ -832,7 +832,7 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp,
|
||||
mem_charged = 0;
|
||||
vm2 = NULL;
|
||||
if (pages == 0)
|
||||
pages = KSTACK_PAGES;
|
||||
pages = kstack_pages;
|
||||
/* Allocate new proc. */
|
||||
newproc = uma_zalloc(proc_zone, M_WAITOK);
|
||||
td2 = FIRST_THREAD_IN_PROC(newproc);
|
||||
|
@ -159,6 +159,9 @@ void
|
||||
init_param1(void)
|
||||
{
|
||||
|
||||
#if !defined(__mips__) && !defined(__arm64__) && !defined(__sparc64__)
|
||||
TUNABLE_INT_FETCH("kern.kstack_pages", &kstack_pages);
|
||||
#endif
|
||||
hz = -1;
|
||||
TUNABLE_INT_FETCH("kern.hz", &hz);
|
||||
if (hz == -1)
|
||||
|
@ -932,13 +932,13 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
* Allocate a kernel stack with a guard page for thread0 and map it
|
||||
* into the kernel page map.
|
||||
*/
|
||||
pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
|
||||
pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
|
||||
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
|
||||
virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
|
||||
virtual_avail = va + kstack_pages * PAGE_SIZE;
|
||||
CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
|
||||
thread0.td_kstack = va;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
for (i = 0; i < KSTACK_PAGES; i++) {
|
||||
thread0.td_kstack_pages = kstack_pages;
|
||||
for (i = 0; i < kstack_pages; i++) {
|
||||
moea_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
|
@ -917,13 +917,13 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
|
||||
* Allocate a kernel stack with a guard page for thread0 and map it
|
||||
* into the kernel page map.
|
||||
*/
|
||||
pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
|
||||
pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
|
||||
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
|
||||
virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
|
||||
virtual_avail = va + kstack_pages * PAGE_SIZE;
|
||||
CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
|
||||
thread0.td_kstack = va;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
for (i = 0; i < KSTACK_PAGES; i++) {
|
||||
thread0.td_kstack_pages = kstack_pages;
|
||||
for (i = 0; i < kstack_pages; i++) {
|
||||
moea64_kenter(mmup, va, pa);
|
||||
pa += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
|
@ -1207,7 +1207,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
/* Steal physical memory for kernel stack from the end */
|
||||
/* of the first avail region */
|
||||
/*******************************************************/
|
||||
kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
|
||||
kstack0_sz = kstack_pages * PAGE_SIZE;
|
||||
kstack0_phys = availmem_regions[0].mr_start +
|
||||
availmem_regions[0].mr_size;
|
||||
kstack0_phys -= kstack0_sz;
|
||||
@ -1312,7 +1312,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
/* Enter kstack0 into kernel map, provide guard page */
|
||||
kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
|
||||
thread0.td_kstack = kstack0;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
thread0.td_kstack_pages = kstack_pages;
|
||||
|
||||
debugf("kstack_sz = 0x%08x\n", kstack0_sz);
|
||||
debugf("kstack0_phys at 0x%08x - 0x%08x\n",
|
||||
@ -1320,7 +1320,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
|
||||
|
||||
virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
|
||||
for (i = 0; i < KSTACK_PAGES; i++) {
|
||||
for (i = 0; i < kstack_pages; i++) {
|
||||
mmu_booke_kenter(mmu, kstack0, kstack0_phys);
|
||||
kstack0 += PAGE_SIZE;
|
||||
kstack0_phys += PAGE_SIZE;
|
||||
|
@ -111,7 +111,7 @@
|
||||
#endif
|
||||
#endif
|
||||
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
|
||||
#define USPACE (KSTACK_PAGES * PAGE_SIZE) /* total size of pcb */
|
||||
#define USPACE (kstack_pages * PAGE_SIZE) /* total size of pcb */
|
||||
|
||||
/*
|
||||
* Mach derived conversion macros
|
||||
|
@ -327,11 +327,11 @@ vm_thread_new(struct thread *td, int pages)
|
||||
|
||||
/* Bounds check */
|
||||
if (pages <= 1)
|
||||
pages = KSTACK_PAGES;
|
||||
pages = kstack_pages;
|
||||
else if (pages > KSTACK_MAX_PAGES)
|
||||
pages = KSTACK_MAX_PAGES;
|
||||
|
||||
if (pages == KSTACK_PAGES) {
|
||||
if (pages == kstack_pages) {
|
||||
mtx_lock(&kstack_cache_mtx);
|
||||
if (kstack_cache != NULL) {
|
||||
ks_ce = kstack_cache;
|
||||
@ -340,7 +340,7 @@ vm_thread_new(struct thread *td, int pages)
|
||||
|
||||
td->td_kstack_obj = ks_ce->ksobj;
|
||||
td->td_kstack = (vm_offset_t)ks_ce;
|
||||
td->td_kstack_pages = KSTACK_PAGES;
|
||||
td->td_kstack_pages = kstack_pages;
|
||||
return (1);
|
||||
}
|
||||
mtx_unlock(&kstack_cache_mtx);
|
||||
@ -444,7 +444,7 @@ vm_thread_dispose(struct thread *td)
|
||||
ks = td->td_kstack;
|
||||
td->td_kstack = 0;
|
||||
td->td_kstack_pages = 0;
|
||||
if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
|
||||
if (pages == kstack_pages && kstacks <= kstack_cache_size) {
|
||||
ks_ce = (struct kstack_cache_entry *)ks;
|
||||
ks_ce->ksobj = ksobj;
|
||||
mtx_lock(&kstack_cache_mtx);
|
||||
@ -471,7 +471,7 @@ vm_thread_stack_lowmem(void *nulll)
|
||||
ks_ce = ks_ce->next_ks_entry;
|
||||
|
||||
vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
|
||||
KSTACK_PAGES);
|
||||
kstack_pages);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,7 +215,7 @@ start_xen_ap(int cpu)
|
||||
{
|
||||
struct vcpu_guest_context *ctxt;
|
||||
int ms, cpus = mp_naps;
|
||||
const size_t stacksize = KSTACK_PAGES * PAGE_SIZE;
|
||||
const size_t stacksize = kstack_pages * PAGE_SIZE;
|
||||
|
||||
/* allocate and set up an idle stack data page */
|
||||
bootstacks[cpu] =
|
||||
@ -227,7 +227,7 @@ start_xen_ap(int cpu)
|
||||
dpcpu =
|
||||
(void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
|
||||
bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
|
||||
bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
|
||||
bootAP = cpu;
|
||||
|
||||
ctxt = malloc(sizeof(*ctxt), M_TEMP, M_WAITOK | M_ZERO);
|
||||
|
Loading…
x
Reference in New Issue
Block a user