Remove the pc_cpumask usage from amd64.
Reviewed by: alc Tested by: pluknet
This commit is contained in:
parent
cfdfd32d34
commit
6b6603b30e
@ -604,10 +604,10 @@ cpu_mp_announce(void)
|
||||
void
|
||||
init_secondary(void)
|
||||
{
|
||||
cpuset_t tcpuset;
|
||||
struct pcpu *pc;
|
||||
struct nmi_pcpu *np;
|
||||
u_int64_t msr, cr0;
|
||||
u_int cpuid;
|
||||
int cpu, gsel_tss, x;
|
||||
struct region_descriptor ap_gdt;
|
||||
|
||||
@ -711,8 +711,9 @@ init_secondary(void)
|
||||
fpuinit();
|
||||
|
||||
/* A quick check from sanity claus */
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
if (PCPU_GET(apic_id) != lapic_id()) {
|
||||
printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
|
||||
printf("SMP: cpuid = %d\n", cpuid);
|
||||
printf("SMP: actual apic_id = %d\n", lapic_id());
|
||||
printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
|
||||
panic("cpuid mismatch! boom!!");
|
||||
@ -734,14 +735,13 @@ init_secondary(void)
|
||||
|
||||
smp_cpus++;
|
||||
|
||||
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
|
||||
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
|
||||
tcpuset = PCPU_GET(cpumask);
|
||||
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
|
||||
printf("SMP: AP CPU #%d Launched!\n", cpuid);
|
||||
|
||||
/* Determine if we are a logical CPU. */
|
||||
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
|
||||
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
|
||||
CPU_OR(&logical_cpus_mask, &tcpuset);
|
||||
CPU_SET(cpuid, &logical_cpus_mask);
|
||||
|
||||
if (bootverbose)
|
||||
lapic_dump("AP");
|
||||
@ -1138,9 +1138,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_of
|
||||
if (othercpus < 1)
|
||||
return;
|
||||
} else {
|
||||
sched_pin();
|
||||
CPU_NAND(&mask, PCPU_PTR(cpumask));
|
||||
sched_unpin();
|
||||
CPU_CLR(PCPU_GET(cpuid), &mask);
|
||||
if (CPU_EMPTY(&mask))
|
||||
return;
|
||||
}
|
||||
@ -1362,7 +1360,7 @@ ipi_all_but_self(u_int ipi)
|
||||
int
|
||||
ipi_nmi_handler()
|
||||
{
|
||||
cpuset_t cpumask;
|
||||
u_int cpuid;
|
||||
|
||||
/*
|
||||
* As long as there is not a simple way to know about a NMI's
|
||||
@ -1370,13 +1368,11 @@ ipi_nmi_handler()
|
||||
* the global pending bitword an IPI_STOP_HARD has been issued
|
||||
* and should be handled.
|
||||
*/
|
||||
sched_pin();
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
sched_unpin();
|
||||
if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
|
||||
return (1);
|
||||
|
||||
CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
|
||||
CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
|
||||
cpustop_handler();
|
||||
return (0);
|
||||
}
|
||||
@ -1388,25 +1384,21 @@ ipi_nmi_handler()
|
||||
void
|
||||
cpustop_handler(void)
|
||||
{
|
||||
cpuset_t cpumask;
|
||||
u_int cpu;
|
||||
|
||||
sched_pin();
|
||||
cpu = PCPU_GET(cpuid);
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
sched_unpin();
|
||||
|
||||
savectx(&stoppcbs[cpu]);
|
||||
|
||||
/* Indicate that we are stopped */
|
||||
CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
|
||||
CPU_SET_ATOMIC(cpu, &stopped_cpus);
|
||||
|
||||
/* Wait for restart */
|
||||
while (!CPU_OVERLAP(&started_cpus, &cpumask))
|
||||
while (!CPU_ISSET(cpu, &started_cpus))
|
||||
ia32_pause();
|
||||
|
||||
CPU_NAND_ATOMIC(&started_cpus, &cpumask);
|
||||
CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
|
||||
CPU_CLR_ATOMIC(cpu, &started_cpus);
|
||||
CPU_CLR_ATOMIC(cpu, &stopped_cpus);
|
||||
|
||||
if (cpu == 0 && cpustop_restartfunc != NULL) {
|
||||
cpustop_restartfunc();
|
||||
@ -1421,19 +1413,17 @@ cpustop_handler(void)
|
||||
void
|
||||
cpususpend_handler(void)
|
||||
{
|
||||
cpuset_t cpumask;
|
||||
register_t cr3, rf;
|
||||
u_int cpu;
|
||||
|
||||
cpu = PCPU_GET(cpuid);
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
|
||||
rf = intr_disable();
|
||||
cr3 = rcr3();
|
||||
|
||||
if (savectx(susppcbs[cpu])) {
|
||||
wbinvd();
|
||||
CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
|
||||
CPU_SET_ATOMIC(cpu, &stopped_cpus);
|
||||
} else {
|
||||
pmap_init_pat();
|
||||
PCPU_SET(switchtime, 0);
|
||||
@ -1441,11 +1431,11 @@ cpususpend_handler(void)
|
||||
}
|
||||
|
||||
/* Wait for resume */
|
||||
while (!CPU_OVERLAP(&started_cpus, &cpumask))
|
||||
while (!CPU_ISSET(cpu, &started_cpus))
|
||||
ia32_pause();
|
||||
|
||||
CPU_NAND_ATOMIC(&started_cpus, &cpumask);
|
||||
CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
|
||||
CPU_CLR_ATOMIC(cpu, &started_cpus);
|
||||
CPU_CLR_ATOMIC(cpu, &stopped_cpus);
|
||||
|
||||
/* Restore CR3 and enable interrupts */
|
||||
load_cr3(cr3);
|
||||
|
@ -925,17 +925,18 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
|
||||
void
|
||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
cpuset_t cpumask, other_cpus;
|
||||
cpuset_t other_cpus;
|
||||
u_int cpuid;
|
||||
|
||||
sched_pin();
|
||||
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
|
||||
invlpg(va);
|
||||
smp_invlpg(va);
|
||||
} else {
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
if (CPU_ISSET(cpuid, &pmap->pm_active))
|
||||
invlpg(va);
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
if (!CPU_EMPTY(&other_cpus))
|
||||
@ -947,8 +948,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
|
||||
void
|
||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
cpuset_t cpumask, other_cpus;
|
||||
cpuset_t other_cpus;
|
||||
vm_offset_t addr;
|
||||
u_int cpuid;
|
||||
|
||||
sched_pin();
|
||||
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
|
||||
@ -956,10 +958,10 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
invlpg(addr);
|
||||
smp_invlpg_range(sva, eva);
|
||||
} else {
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
if (CPU_ISSET(cpuid, &pmap->pm_active))
|
||||
for (addr = sva; addr < eva; addr += PAGE_SIZE)
|
||||
invlpg(addr);
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
@ -972,17 +974,18 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
void
|
||||
pmap_invalidate_all(pmap_t pmap)
|
||||
{
|
||||
cpuset_t cpumask, other_cpus;
|
||||
cpuset_t other_cpus;
|
||||
u_int cpuid;
|
||||
|
||||
sched_pin();
|
||||
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
|
||||
invltlb();
|
||||
smp_invltlb();
|
||||
} else {
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
if (CPU_ISSET(cpuid, &pmap->pm_active))
|
||||
invltlb();
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
if (!CPU_EMPTY(&other_cpus))
|
||||
@ -1002,11 +1005,11 @@ pmap_invalidate_cache(void)
|
||||
}
|
||||
|
||||
struct pde_action {
|
||||
cpuset_t store; /* processor that updates the PDE */
|
||||
cpuset_t invalidate; /* processors that invalidate their TLB */
|
||||
vm_offset_t va;
|
||||
pd_entry_t *pde;
|
||||
pd_entry_t newpde;
|
||||
u_int store; /* processor that updates the PDE */
|
||||
};
|
||||
|
||||
static void
|
||||
@ -1014,12 +1017,8 @@ pmap_update_pde_action(void *arg)
|
||||
{
|
||||
struct pde_action *act = arg;
|
||||
|
||||
sched_pin();
|
||||
if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
|
||||
sched_unpin();
|
||||
if (act->store == PCPU_GET(cpuid))
|
||||
pde_store(act->pde, act->newpde);
|
||||
} else
|
||||
sched_unpin();
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1027,12 +1026,8 @@ pmap_update_pde_teardown(void *arg)
|
||||
{
|
||||
struct pde_action *act = arg;
|
||||
|
||||
sched_pin();
|
||||
if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) {
|
||||
sched_unpin();
|
||||
if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
|
||||
pmap_update_pde_invalidate(act->va, act->newpde);
|
||||
} else
|
||||
sched_unpin();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1047,29 +1042,30 @@ static void
|
||||
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
|
||||
{
|
||||
struct pde_action act;
|
||||
cpuset_t active, cpumask, other_cpus;
|
||||
cpuset_t active, other_cpus;
|
||||
u_int cpuid;
|
||||
|
||||
sched_pin();
|
||||
cpumask = PCPU_GET(cpumask);
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
if (pmap == kernel_pmap)
|
||||
active = all_cpus;
|
||||
else
|
||||
active = pmap->pm_active;
|
||||
if (CPU_OVERLAP(&active, &other_cpus)) {
|
||||
act.store = cpumask;
|
||||
act.store = cpuid;
|
||||
act.invalidate = active;
|
||||
act.va = va;
|
||||
act.pde = pde;
|
||||
act.newpde = newpde;
|
||||
CPU_OR(&cpumask, &active);
|
||||
smp_rendezvous_cpus(cpumask,
|
||||
CPU_SET(cpuid, &active);
|
||||
smp_rendezvous_cpus(active,
|
||||
smp_no_rendevous_barrier, pmap_update_pde_action,
|
||||
pmap_update_pde_teardown, &act);
|
||||
} else {
|
||||
pde_store(pde, newpde);
|
||||
if (CPU_OVERLAP(&active, &cpumask))
|
||||
if (CPU_ISSET(cpuid, &active))
|
||||
pmap_update_pde_invalidate(va, newpde);
|
||||
}
|
||||
sched_unpin();
|
||||
@ -5099,17 +5095,19 @@ void
|
||||
pmap_activate(struct thread *td)
|
||||
{
|
||||
pmap_t pmap, oldpmap;
|
||||
u_int cpuid;
|
||||
u_int64_t cr3;
|
||||
|
||||
critical_enter();
|
||||
pmap = vmspace_pmap(td->td_proc->p_vmspace);
|
||||
oldpmap = PCPU_GET(curpmap);
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
#ifdef SMP
|
||||
CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
|
||||
CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
|
||||
CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
|
||||
CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
|
||||
#else
|
||||
CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
|
||||
CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
|
||||
CPU_CLR(cpuid, &oldpmap->pm_active);
|
||||
CPU_SET(cpuid, &pmap->pm_active);
|
||||
#endif
|
||||
cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
|
||||
td->td_pcb->pcb_cr3 = cr3;
|
||||
|
Loading…
x
Reference in New Issue
Block a user