Provide access to pcpu structures for SMP kernels.

The basic idea is to use a the same virtual address as a window onto
distinct physical memory locations - one per processor. The physical
address that you access through this mapping depends on which cpu you
are currently executing on. We can now use the same virtual address
on any processor to access its per-cpu area.

The details are:

- The virtual address for 'struct pcpu *pcpup' is obtained by
  stealing 2 pages worth of KVA in pmap_bootstrap().

- The mapping from the constant virtual address to a distinct
  physical page is done in cpu_pcpu_init() through a wired TLB entry.

- A side-effect of this is that we reserve 2 pages worth of memory
  for the pcpu but in reality it needs much less than that. The unused
  memory is now used as the boot stack for the BSP and APs.

Remove SMP-specific bits from locore.S. The plan is to use a separate
mpboot.S for AP bootstrap.

Discussed on: freebsd-mips

Approved by: imp (mentor)
This commit is contained in:
neel 2010-01-30 01:54:29 +00:00
parent 248442cb37
commit f0bf9d2db5
6 changed files with 76 additions and 118 deletions

View File

@ -309,8 +309,16 @@
/*
* The first TLB entry that write random hits.
* TLB entry 0 maps the kernel stack of the currently running thread
* TLB entry 1 maps the pcpu area of processor (only for SMP builds)
*/
#define KSTACK_TLB_ENTRY 0
#ifdef SMP
#define PCPU_TLB_ENTRY 1
#define VMWIRED_ENTRIES 2
#else
#define VMWIRED_ENTRIES 1
#endif /* SMP */
/*
* The number of process id entries.

View File

@ -38,35 +38,15 @@
struct pmap *pc_curpmap; /* pmap of curthread */ \
u_int32_t pc_next_asid; /* next ASID to alloc */ \
u_int32_t pc_asid_generation; /* current ASID generation */ \
u_int pc_pending_ipis; /* the IPIs pending to this CPU */ \
void *pc_boot_stack;
u_int pc_pending_ipis; /* IPIs pending to this CPU */
#ifdef _KERNEL
#ifdef SMP
static __inline struct pcpu*
get_pcpup(void)
{
/*
* FREEBSD_DEVELOPERS_FIXME
* In multiprocessor case, store/retrieve the pcpu structure
* address for current CPU in scratch register for fast access.
*
* In this routine, read the scratch register to retrieve the PCPU
* structure for this CPU
*/
struct pcpu *ret;
extern char pcpu_space[MAXCPU][PAGE_SIZE * 2];
#define PCPU_ADDR(cpu) (struct pcpu *)(pcpu_space[(cpu)])
/* ret should contain the pointer to the PCPU structure for this CPU */
return(ret);
}
#define PCPUP ((struct pcpu *)get_pcpup())
#else
/* Uni processor systems */
extern struct pcpu *pcpup;
#define PCPUP pcpup
#endif /* SMP */
#define PCPU_ADD(member, value) (PCPUP->pc_ ## member += (value))
#define PCPU_GET(member) (PCPUP->pc_ ## member)

View File

@ -82,7 +82,6 @@ ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
ASSYM(PC_SEGBASE, offsetof(struct pcpu, pc_segbase));
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread));
ASSYM(PC_BOOT_STACK, offsetof(struct pcpu, pc_boot_stack));
ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));

View File

@ -77,14 +77,9 @@
GLOBAL(fenvp)
.space 4 # Assumes mips32? Is that OK?
#endif
GLOBAL(stackspace)
.space NBPG /* Smaller than it should be since it's temp. */
.align 8
GLOBAL(topstack)
.set noreorder
.text
GLOBAL(btext)
@ -133,6 +128,7 @@ VECTOR(_locore, unknown)
or t2, t1
mtc0 t2, COP_0_STATUS_REG
COP0_SYNC
/* Make sure KSEG0 is cached */
li t0, CFG_K0_CACHED
mtc0 t0, MIPS_COP_0_CONFIG
@ -157,13 +153,6 @@ VECTOR(_locore, unknown)
sw t0, _C_LABEL(cpu_id)
sw t1, _C_LABEL(fpu_id)
/*
* Initialize stack and call machine startup.
*/
PTR_LA sp, _C_LABEL(topstack) - START_FRAME
PTR_LA gp, _C_LABEL(_gp)
sw zero, START_FRAME - 4(sp) # Zero out old ra for debugger
/*xxximp
* now that we pass a0...a3 to the platform_init routine, do we need
* to stash this stuff here?
@ -174,58 +163,19 @@ VECTOR(_locore, unknown)
#endif
/*
* The following needs to be done differently for each platform and
* there needs to be a good way to plug this in.
* Initialize stack and call machine startup.
*/
#if defined(SMP) && defined(CPU_XLR)
/*
* Block all the slave CPUs
*/
/* XXX a0, a1, a2 shouldn't be used here */
/*
* Read the cpu id from the cp0 config register
* cpuid[9:4], thrid[3: 0]
*/
mfc0 a0, COP_0_CONFIG, 7
srl a1, a0, 4
andi a1, a1, 0x3f
andi a0, a0, 0xf
PTR_LA sp, _C_LABEL(pcpu_space)
addiu sp, (NBPG * 2) - START_FRAME
/* calculate linear cpuid */
sll t0, a1, 2
addu a2, t0, a0
/* Initially, disable all hardware threads on each core except thread0 */
li t1, VCPU_ID_0
li t2, XLR_THREAD_ENABLE_IND
mtcr t1, t2
#endif
sw zero, START_FRAME - 4(sp) # Zero out old ra for debugger
sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger
#if defined(TARGET_OCTEON) /* Maybe this is mips32/64 generic? */
.set push
.set mips32r2
rdhwr t0, $0
.set pop
#else
move t0, zero
#endif
/* Stage the secondary cpu start until later */
bne t0, zero, start_secondary
nop
#ifdef SMP
PTR_LA t0, _C_LABEL(__pcpu)
SET_CPU_PCPU(t0)
/* If not master cpu, jump... */
/*XXX this assumes the above #if 0'd code runs */
bne a2, zero, start_secondary
nop
#endif
PTR_LA gp, _C_LABEL(_gp)
/* Call the platform-specific startup code. */
jal _C_LABEL(platform_start)
sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger
nop
PTR_LA sp, _C_LABEL(thread0)
lw a0, TD_PCB(sp)
@ -238,25 +188,4 @@ VECTOR(_locore, unknown)
PANIC("Startup failed!")
#ifdef SMP
start_secondary:
move a0, a1
2:
addiu t0, PCPU_SIZE
subu a1, 1
bne a1, zero, 2b
nop
SET_CPU_PCPU(t0)
smp_wait:
lw sp, PC_BOOT_STACK(t0)
beqz sp, smp_wait
nop
jal _C_LABEL(smp_init_secondary)
nop
#else
start_secondary:
b start_secondary
nop
#endif
VECTOR_END(_locore)

View File

@ -115,12 +115,28 @@ int clocks_running = 0;
vm_offset_t kstack0;
/*
* Each entry in the pcpu_space[] array is laid out in the following manner:
* struct pcpu for cpu 'n' pcpu_space[n]
* boot stack for cpu 'n' pcpu_space[n] + PAGE_SIZE * 2 - START_FRAME
*
* Note that the boot stack grows downwards and we assume that we never
* use enough stack space to trample over the 'struct pcpu' that is at
* the beginning of the array.
*
* The array is aligned on a (PAGE_SIZE * 2) boundary so that the 'struct pcpu'
* is always in the even page frame of the wired TLB entry on SMP kernels.
*
* The array is in the .data section so that the stack does not get zeroed out
* when the .bss section is zeroed.
*/
char pcpu_space[MAXCPU][PAGE_SIZE * 2] \
__aligned(PAGE_SIZE * 2) __section(".data");
#ifdef SMP
struct pcpu __pcpu[MAXCPU];
char pcpu_boot_stack[KSTACK_PAGES * PAGE_SIZE * MAXCPU];
struct pcpu *pcpup = 0; /* initialized in pmap_bootstrap() */
#else
struct pcpu pcpu;
struct pcpu *pcpup = &pcpu;
struct pcpu *pcpup = (struct pcpu *)pcpu_space;
#endif
vm_offset_t phys_avail[PHYS_AVAIL_ENTRIES + 2];
@ -269,11 +285,7 @@ void
mips_pcpu0_init()
{
/* Initialize pcpu info of cpu-zero */
#ifdef SMP
pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
#else
pcpu_init(pcpup, 0, sizeof(struct pcpu));
#endif
pcpu_init(PCPU_ADDR(0), 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
}
@ -283,6 +295,10 @@ mips_pcpu0_init()
void
mips_proc0_init(void)
{
#ifdef SMP
if (platform_processor_id() != 0)
panic("BSP must be processor number 0");
#endif
proc_linkup0(&proc0, &thread0);
KASSERT((kstack0 & PAGE_MASK) == 0,
@ -410,12 +426,27 @@ void
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
{
#ifdef SMP
if (cpuid != 0)
pcpu->pc_boot_stack = (void *)(pcpu_boot_stack + cpuid *
(KSTACK_PAGES * PAGE_SIZE));
vm_paddr_t pa;
struct tlb tlb;
int lobits;
#endif
pcpu->pc_next_asid = 1;
pcpu->pc_asid_generation = 1;
#ifdef SMP
/*
* Map the pcpu structure at the virtual address 'pcpup'.
* We use a wired tlb index to do this one-time mapping.
*/
memset(&tlb, 0, sizeof(tlb));
pa = vtophys(pcpu);
lobits = PTE_RW | PTE_V | PTE_G | PTE_CACHE;
tlb.tlb_hi = (vm_offset_t)pcpup;
tlb.tlb_lo0 = mips_paddr_to_tlbpfn(pa) | lobits;
tlb.tlb_lo1 = mips_paddr_to_tlbpfn(pa + PAGE_SIZE) | lobits;
Mips_TLBWriteIndexed(PCPU_TLB_ENTRY, &tlb);
#endif
}
int

View File

@ -355,6 +355,17 @@ pmap_bootstrap(void)
virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
virtual_end = VM_MAX_KERNEL_ADDRESS;
#ifdef SMP
/*
* Steal some virtual address space to map the pcpu area.
*/
virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
pcpup = (struct pcpu *)virtual_avail;
virtual_avail += PAGE_SIZE * 2;
if (bootverbose)
printf("pcpu is available at virtual address %p.\n", pcpup);
#endif
/*
* Steal some virtual space that will not be in kernel_segmap. This
* va memory space will be used to map in kernel pages that are
@ -428,8 +439,8 @@ pmap_bootstrap(void)
kernel_pmap->pm_segtab = kernel_segmap;
kernel_pmap->pm_active = ~0;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
kernel_pmap->pm_asid[PCPU_GET(cpuid)].asid = PMAP_ASID_RESERVED;
kernel_pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
kernel_pmap->pm_asid[0].gen = 0;
pmap_max_asid = VMNUM_PIDS;
MachSetPID(0);
}