Add sparc64 support.

Compiled (and helped) by:	pluknet
This commit is contained in:
Attilio Rao 2011-05-06 21:53:29 +00:00
parent 2953224e26
commit 0d9fa7bd31
7 changed files with 105 additions and 72 deletions

View File

@ -55,7 +55,6 @@ typedef unsigned long __uint64_t;
* Standard type definitions.
*/
typedef __int32_t __clock_t; /* clock()... */
typedef unsigned int __cpumask_t;
typedef __int64_t __critical_t;
typedef double __double_t;
typedef float __float_t;

View File

@ -40,6 +40,7 @@
#define _MACHINE_PMAP_H_
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <machine/cache.h>
@ -61,7 +62,7 @@ struct pmap {
struct mtx pm_mtx;
struct tte *pm_tsb;
vm_object_t pm_tsb_obj;
cpumask_t pm_active;
cpuset_t pm_active;
u_int pm_context[MAXCPU];
struct pmap_statistics pm_stats;
};

View File

@ -38,6 +38,7 @@
#ifndef LOCORE
#include <sys/cpuset.h>
#include <sys/proc.h>
#include <sys/sched.h>
@ -76,17 +77,17 @@ struct cpu_start_args {
};
struct ipi_cache_args {
cpumask_t ica_mask;
cpuset_t ica_mask;
vm_paddr_t ica_pa;
};
struct ipi_rd_args {
cpumask_t ira_mask;
cpuset_t ira_mask;
register_t *ira_val;
};
struct ipi_tlb_args {
cpumask_t ita_mask;
cpuset_t ita_mask;
struct pmap *ita_pmap;
u_long ita_start;
u_long ita_end;
@ -100,7 +101,7 @@ extern struct pcb stoppcbs[];
void cpu_mp_bootstrap(struct pcpu *pc);
void cpu_mp_shutdown(void);
typedef void cpu_ipi_selected_t(u_int, u_long, u_long, u_long);
typedef void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
extern cpu_ipi_selected_t *cpu_ipi_selected;
typedef void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
extern cpu_ipi_single_t *cpu_ipi_single;
@ -140,7 +141,7 @@ ipi_all_but_self(u_int ipi)
}
static __inline void
ipi_selected(u_int cpus, u_int ipi)
ipi_selected(cpuset_t cpus, u_int ipi)
{
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
@ -197,7 +198,8 @@ ipi_rd(u_int cpu, void *func, u_long *val)
sched_pin();
ira = &ipi_rd_args;
mtx_lock_spin(&ipi_mtx);
ira->ira_mask = 1 << cpu | PCPU_GET(cpumask);
ira->ira_mask = PCPU_GET(cpumask);
CPU_SET(cpu, &ira->ira_mask);
ira->ira_val = val;
cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
return (&ira->ira_mask);
@ -207,18 +209,21 @@ static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
struct ipi_tlb_args *ita;
cpumask_t cpus;
cpuset_t cpus;
if (smp_cpus == 1)
return (NULL);
sched_pin();
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
cpus = pm->pm_active;
CPU_AND(&cpus, PCPU_PTR(other_cpus));
if (CPU_EMPTY(&cpus)) {
sched_unpin();
return (NULL);
}
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
CPU_OR(&cpus, PCPU_PTR(cpumask));
ita->ita_mask = cpus;
ita->ita_pmap = pm;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
(u_long)ita);
@ -229,18 +234,21 @@ static __inline void *
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
struct ipi_tlb_args *ita;
cpumask_t cpus;
cpuset_t cpus;
if (smp_cpus == 1)
return (NULL);
sched_pin();
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
cpus = pm->pm_active;
CPU_AND(&cpus, PCPU_PTR(other_cpus));
if (CPU_EMPTY(&cpus)) {
sched_unpin();
return (NULL);
}
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
CPU_OR(&cpus, PCPU_PTR(cpumask));
ita->ita_mask = cpus;
ita->ita_pmap = pm;
ita->ita_va = va;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
@ -251,18 +259,21 @@ static __inline void *
ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
{
struct ipi_tlb_args *ita;
cpumask_t cpus;
cpuset_t cpus;
if (smp_cpus == 1)
return (NULL);
sched_pin();
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
cpus = pm->pm_active;
CPU_AND(&cpus, PCPU_PTR(other_cpus));
if (CPU_EMPTY(&cpus)) {
sched_unpin();
return (NULL);
}
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
CPU_OR(&cpus, PCPU_PTR(cpumask));
ita->ita_mask = cpus;
ita->ita_pmap = pm;
ita->ita_start = start;
ita->ita_end = end;
@ -274,11 +285,11 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
static __inline void
ipi_wait(void *cookie)
{
volatile cpumask_t *mask;
volatile cpuset_t *mask;
if ((mask = cookie) != NULL) {
atomic_clear_int(mask, PCPU_GET(cpumask));
while (*mask != 0)
CPU_NAND_ATOMIC(mask, PCPU_PTR(cpumask));
while (!CPU_EMPTY(mask))
;
mtx_unlock_spin(&ipi_mtx);
sched_unpin();

View File

@ -445,8 +445,7 @@ intr_describe(int vec, void *ih, const char *descr)
* allocate CPUs round-robin.
*/
/* The BSP is always a valid target. */
static cpumask_t intr_cpus = (1 << 0);
static cpuset_t intr_cpus;
static int current_cpu;
static void
@ -468,7 +467,7 @@ intr_assign_next_cpu(struct intr_vector *iv)
current_cpu++;
if (current_cpu > mp_maxid)
current_cpu = 0;
} while (!(intr_cpus & (1 << current_cpu)));
} while (!CPU_ISSET(current_cpu, &intr_cpus));
}
/* Attempt to bind the specified IRQ to the specified CPU. */
@ -504,7 +503,7 @@ intr_add_cpu(u_int cpu)
if (bootverbose)
printf("INTR: Adding CPU %d as a target\n", cpu);
intr_cpus |= (1 << cpu);
CPU_SET(cpu, &intr_cpus);
}
/*
@ -518,6 +517,9 @@ intr_shuffle_irqs(void *arg __unused)
struct intr_vector *iv;
int i;
/* The BSP is always a valid target. */
CPU_SETOF(0, &intr_cpus);
/* Don't bother on UP. */
if (mp_ncpus == 1)
return;

View File

@ -121,7 +121,7 @@ cpu_ipi_single_t *cpu_ipi_single;
static vm_offset_t mp_tramp;
static u_int cpuid_to_mid[MAXCPU];
static int isjbus;
static volatile cpumask_t shutdown_cpus;
static volatile cpuset_t shutdown_cpus;
static void ap_count(phandle_t node, u_int mid, u_int cpu_impl);
static void ap_start(phandle_t node, u_int mid, u_int cpu_impl);
@ -228,7 +228,7 @@ void
cpu_mp_setmaxid()
{
all_cpus = 1 << curcpu;
CPU_SETOF(curcpu, &all_cpus);
mp_ncpus = 1;
mp_maxid = 0;
@ -283,6 +283,7 @@ sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
void
cpu_mp_start(void)
{
cpuset_t ocpus;
mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
@ -299,7 +300,9 @@ cpu_mp_start(void)
KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS,
("%s: can only IPI a maximum of %d JBus-CPUs",
__func__, IDR_JALAPENO_MAX_BN_PAIRS));
PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
ocpus = all_cpus;
CPU_CLR(curcpu, &ocpus);
PCPU_SET(other_cpus, ocpus);
smp_active = 1;
}
@ -357,7 +360,7 @@ ap_start(phandle_t node, u_int mid, u_int cpu_impl)
cache_init(pc);
all_cpus |= 1 << cpuid;
CPU_SET(cpuid, &all_cpus);
intr_add_cpu(cpuid);
}
@ -421,6 +424,7 @@ cpu_mp_unleash(void *v)
void
cpu_mp_bootstrap(struct pcpu *pc)
{
cpuset_t ocpus;
volatile struct cpu_start_args *csa;
csa = &cpu_start_args;
@ -465,7 +469,9 @@ cpu_mp_bootstrap(struct pcpu *pc)
smp_cpus++;
KASSERT(curthread != NULL, ("%s: curthread", __func__));
PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
ocpus = all_cpus;
CPU_CLR(curcpu, &all_cpus);
PCPU_SET(other_cpus, ocpus);
printf("SMP: AP CPU #%d Launched!\n", curcpu);
csa->csa_count--;
@ -484,14 +490,22 @@ cpu_mp_bootstrap(struct pcpu *pc)
void
cpu_mp_shutdown(void)
{
cpuset_t cpus;
int i;
critical_enter();
shutdown_cpus = PCPU_GET(other_cpus);
if (stopped_cpus != PCPU_GET(other_cpus)) /* XXX */
stop_cpus(stopped_cpus ^ PCPU_GET(other_cpus));
cpus = shutdown_cpus;
/* XXX: Stopp all the CPUs which aren't already. */
if (CPU_CMP(&stopped_cpus, &cpus)) {
/* pc_other_cpus is just a flat "on" mask without curcpu. */
CPU_NAND(&cpus, &stopped_cpus);
stop_cpus(cpus);
}
i = 0;
while (shutdown_cpus != 0) {
while (!CPU_EMPTY(&shutdown_cpus)) {
if (i++ > 100000) {
printf("timeout shutting down CPUs.\n");
break;
@ -509,20 +523,24 @@ cpu_ipi_ast(struct trapframe *tf)
static void
cpu_ipi_stop(struct trapframe *tf)
{
cpuset_t tcmask;
CTR2(KTR_SMP, "%s: stopped %d", __func__, curcpu);
sched_pin();
savectx(&stoppcbs[curcpu]);
atomic_set_acq_int(&stopped_cpus, PCPU_GET(cpumask));
while ((started_cpus & PCPU_GET(cpumask)) == 0) {
if ((shutdown_cpus & PCPU_GET(cpumask)) != 0) {
atomic_clear_int(&shutdown_cpus, PCPU_GET(cpumask));
tcmask = PCPU_GET(cpumask);
CPU_OR_ATOMIC(&stopped_cpus, &tcmask);
while (!CPU_OVERLAP(&started_cpus, &tcmask)) {
if (CPU_OVERLAP(&shutdown_cpus, &tcmask)) {
CPU_OR_ATOMIC(&shutdown_cpus, &tcmask);
(void)intr_disable();
for (;;)
;
}
}
atomic_clear_rel_int(&started_cpus, PCPU_GET(cpumask));
atomic_clear_rel_int(&stopped_cpus, PCPU_GET(cpumask));
CPU_NAND_ATOMIC(&started_cpus, &tcmask);
CPU_NAND_ATOMIC(&stopped_cpus, &tcmask);
sched_unpin();
CTR2(KTR_SMP, "%s: restarted %d", __func__, curcpu);
}
@ -551,13 +569,13 @@ cpu_ipi_hardclock(struct trapframe *tf)
}
static void
spitfire_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
spitfire_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
{
u_int cpu;
while (cpus) {
cpu = ffs(cpus) - 1;
cpus &= ~(1 << cpu);
while (CPU_EMPTY(&cpus)) {
cpu = cpusetobj_ffs(&cpus) - 1;
CPU_CLR(cpu, &cpus);
spitfire_ipi_single(cpu, d0, d1, d2);
}
}
@ -657,20 +675,21 @@ cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
}
static void
cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
cheetah_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
{
char pbuf[CPUSETBUFSIZ];
register_t s;
u_long ids;
u_int bnp;
u_int cpu;
int i;
KASSERT((cpus & (1 << curcpu)) == 0,
("%s: CPU can't IPI itself", __func__));
KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself",
__func__));
KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
IDR_CHEETAH_ALL_BUSY) == 0,
("%s: outstanding dispatch", __func__));
if (cpus == 0)
if (CPU_EMPTY(&cpus))
return;
ids = 0;
for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
@ -681,7 +700,7 @@ cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
membar(Sync);
bnp = 0;
for (cpu = 0; cpu < mp_ncpus; cpu++) {
if ((cpus & (1 << cpu)) != 0) {
if (CPU_ISSET(cpu, &cpus)) {
stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
IDC_ITID_SHIFT) | bnp << IDC_BN_SHIFT,
ASI_SDB_INTR_W, 0);
@ -698,9 +717,9 @@ cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
return;
bnp = 0;
for (cpu = 0; cpu < mp_ncpus; cpu++) {
if ((cpus & (1 << cpu)) != 0) {
if (CPU_ISSET(cpu, &cpus)) {
if ((ids & (IDR_NACK << (2 * bnp))) == 0)
cpus &= ~(1 << cpu);
CPU_CLR(cpu, &cpus);
bnp++;
}
}
@ -709,7 +728,7 @@ cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
* CPUs we actually haven't tried to send an IPI to,
* but which apparently can be safely ignored.
*/
if (cpus == 0)
if (CPU_EMPTY(&cpus))
return;
/*
* Leave interrupts enabled for a bit before retrying
@ -719,11 +738,11 @@ cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
DELAY(2 * mp_ncpus);
}
if (kdb_active != 0 || panicstr != NULL)
printf("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)\n",
__func__, cpus, ids);
printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
__func__, cpusetobj_strprint(pbuf, &cpus), ids);
else
panic("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)",
__func__, cpus, ids);
panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)",
__func__, cpusetobj_strprint(pbuf, &cpus), ids);
}
static void
@ -772,19 +791,20 @@ jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
}
static void
jalapeno_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
jalapeno_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
{
char pbuf[CPUSETBUFSIZ];
register_t s;
u_long ids;
u_int cpu;
int i;
KASSERT((cpus & (1 << curcpu)) == 0,
("%s: CPU can't IPI itself", __func__));
KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself",
__func__));
KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
IDR_CHEETAH_ALL_BUSY) == 0,
("%s: outstanding dispatch", __func__));
if (cpus == 0)
if (CPU_EMPTY(&cpus))
return;
ids = 0;
for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
@ -794,7 +814,7 @@ jalapeno_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
membar(Sync);
for (cpu = 0; cpu < mp_ncpus; cpu++) {
if ((cpus & (1 << cpu)) != 0) {
if (CPU_ISSET(cpu, &cpus)) {
stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
IDC_ITID_SHIFT), ASI_SDB_INTR_W, 0);
membar(Sync);
@ -808,10 +828,10 @@ jalapeno_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
(IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0)
return;
for (cpu = 0; cpu < mp_ncpus; cpu++)
if ((cpus & (1 << cpu)) != 0)
if (CPU_ISSET(cpu, &cpus))
if ((ids & (IDR_NACK <<
(2 * cpuid_to_mid[cpu]))) == 0)
cpus &= ~(1 << cpu);
CPU_CLR(cpu, &cpus);
/*
* Leave interrupts enabled for a bit before retrying
* in order to avoid deadlocks if the other CPUs are
@ -820,9 +840,9 @@ jalapeno_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
DELAY(2 * mp_ncpus);
}
if (kdb_active != 0 || panicstr != NULL)
printf("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)\n",
__func__, cpus, ids);
printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
__func__, cpusetobj_strprint(pbuf, &cpus), ids);
else
panic("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)",
__func__, cpus, ids);
panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)",
__func__, cpusetobj_strprint(pbuf, &cpus), ids);
}

View File

@ -664,7 +664,7 @@ pmap_bootstrap(u_int cpu_impl)
pm = kernel_pmap;
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = TLB_CTX_KERNEL;
pm->pm_active = ~0;
CPU_ZERO(&pm->pm_active);
/*
* Flush all non-locked TLB entries possibly left over by the
@ -1189,7 +1189,7 @@ pmap_pinit0(pmap_t pm)
PMAP_LOCK_INIT(pm);
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = TLB_CTX_KERNEL;
pm->pm_active = 0;
CPU_ZERO(&pm->pm_active);
pm->pm_tsb = NULL;
pm->pm_tsb_obj = NULL;
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
@ -1229,7 +1229,7 @@ pmap_pinit(pmap_t pm)
mtx_lock_spin(&sched_lock);
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = -1;
pm->pm_active = 0;
CPU_ZERO(&pm->pm_active);
mtx_unlock_spin(&sched_lock);
VM_OBJECT_LOCK(pm->pm_tsb_obj);
@ -2230,7 +2230,7 @@ pmap_activate(struct thread *td)
PCPU_SET(tlb_ctx, context + 1);
pm->pm_context[curcpu] = context;
pm->pm_active |= PCPU_GET(cpumask);
CPU_OR(&pm->pm_active, PCPU_PTR(cpumask));
PCPU_SET(pmap, pm);
stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);

View File

@ -80,7 +80,7 @@ tlb_context_demap(struct pmap *pm)
PMAP_STATS_INC(tlb_ncontext_demap);
cookie = ipi_tlb_context_demap(pm);
s = intr_disable();
if (pm->pm_active & PCPU_GET(cpumask)) {
if (CPU_OVERLAP(&pm->pm_active, PCPU_PTR(cpumask))) {
KASSERT(pm->pm_context[curcpu] != -1,
("tlb_context_demap: inactive pmap?"));
stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
@ -101,7 +101,7 @@ tlb_page_demap(struct pmap *pm, vm_offset_t va)
PMAP_STATS_INC(tlb_npage_demap);
cookie = ipi_tlb_page_demap(pm, va);
s = intr_disable();
if (pm->pm_active & PCPU_GET(cpumask)) {
if (CPU_OVERLAP(&pm->pm_active, PCPU_PTR(cpumask))) {
KASSERT(pm->pm_context[curcpu] != -1,
("tlb_page_demap: inactive pmap?"));
if (pm == kernel_pmap)
@ -128,7 +128,7 @@ tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
PMAP_STATS_INC(tlb_nrange_demap);
cookie = ipi_tlb_range_demap(pm, start, end);
s = intr_disable();
if (pm->pm_active & PCPU_GET(cpumask)) {
if (CPU_OVERLAP(&pm->pm_active, PCPU_PTR(cpumask))) {
KASSERT(pm->pm_context[curcpu] != -1,
("tlb_range_demap: inactive pmap?"));
if (pm == kernel_pmap)