- Unlike cache invalidation and TLB demapping IPIs, reading registers from
other CPUs doesn't require locking so get rid of it. As the latter is used for the timecounter on certain machine models, using a spin lock in this case can lead to a deadlock with the upcoming callout(9) rework. - Merge r134227/r167250 from x86: Avoid cross-IPI SMP deadlock by using the smp_ipi_mtx spin lock not only for smp_rendezvous_cpus() but also for the MD cache invalidation and TLB demapping IPIs. - Mark some unused function arguments as such. MFC after: 1 week
This commit is contained in:
parent
57bc16315f
commit
bf38cf8ab3
@ -669,9 +669,6 @@ static struct witness_order_list_entry order_lists[] = {
|
||||
*/
|
||||
{ "intrcnt", &lock_class_mtx_spin },
|
||||
{ "icu", &lock_class_mtx_spin },
|
||||
#if defined(SMP) && defined(__sparc64__)
|
||||
{ "ipi", &lock_class_mtx_spin },
|
||||
#endif
|
||||
#ifdef __i386__
|
||||
{ "allpmaps", &lock_class_mtx_spin },
|
||||
{ "descriptor tables", &lock_class_mtx_spin },
|
||||
|
@ -109,7 +109,6 @@ extern cpu_ipi_single_t *cpu_ipi_single;
|
||||
|
||||
void mp_init(u_int cpu_impl);
|
||||
|
||||
extern struct mtx ipi_mtx;
|
||||
extern struct ipi_cache_args ipi_cache_args;
|
||||
extern struct ipi_rd_args ipi_rd_args;
|
||||
extern struct ipi_tlb_args ipi_tlb_args;
|
||||
@ -169,7 +168,7 @@ ipi_dcache_page_inval(void *func, vm_paddr_t pa)
|
||||
return (NULL);
|
||||
sched_pin();
|
||||
ica = &ipi_cache_args;
|
||||
mtx_lock_spin(&ipi_mtx);
|
||||
mtx_lock_spin(&smp_ipi_mtx);
|
||||
ica->ica_mask = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
|
||||
ica->ica_pa = pa;
|
||||
@ -186,7 +185,7 @@ ipi_icache_page_inval(void *func, vm_paddr_t pa)
|
||||
return (NULL);
|
||||
sched_pin();
|
||||
ica = &ipi_cache_args;
|
||||
mtx_lock_spin(&ipi_mtx);
|
||||
mtx_lock_spin(&smp_ipi_mtx);
|
||||
ica->ica_mask = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
|
||||
ica->ica_pa = pa;
|
||||
@ -203,7 +202,6 @@ ipi_rd(u_int cpu, void *func, u_long *val)
|
||||
return (NULL);
|
||||
sched_pin();
|
||||
ira = &ipi_rd_args;
|
||||
mtx_lock_spin(&ipi_mtx);
|
||||
CPU_SETOF(cpu, &ira->ira_mask);
|
||||
ira->ira_val = val;
|
||||
cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
|
||||
@ -227,7 +225,7 @@ ipi_tlb_context_demap(struct pmap *pm)
|
||||
return (NULL);
|
||||
}
|
||||
ita = &ipi_tlb_args;
|
||||
mtx_lock_spin(&ipi_mtx);
|
||||
mtx_lock_spin(&smp_ipi_mtx);
|
||||
ita->ita_mask = cpus;
|
||||
ita->ita_pmap = pm;
|
||||
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
|
||||
@ -252,7 +250,7 @@ ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
|
||||
return (NULL);
|
||||
}
|
||||
ita = &ipi_tlb_args;
|
||||
mtx_lock_spin(&ipi_mtx);
|
||||
mtx_lock_spin(&smp_ipi_mtx);
|
||||
ita->ita_mask = cpus;
|
||||
ita->ita_pmap = pm;
|
||||
ita->ita_va = va;
|
||||
@ -277,7 +275,7 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
|
||||
return (NULL);
|
||||
}
|
||||
ita = &ipi_tlb_args;
|
||||
mtx_lock_spin(&ipi_mtx);
|
||||
mtx_lock_spin(&smp_ipi_mtx);
|
||||
ita->ita_mask = cpus;
|
||||
ita->ita_pmap = pm;
|
||||
ita->ita_start = start;
|
||||
@ -295,7 +293,19 @@ ipi_wait(void *cookie)
|
||||
if ((mask = cookie) != NULL) {
|
||||
while (!CPU_EMPTY(mask))
|
||||
;
|
||||
mtx_unlock_spin(&ipi_mtx);
|
||||
mtx_unlock_spin(&smp_ipi_mtx);
|
||||
sched_unpin();
|
||||
}
|
||||
}
|
||||
|
||||
static __inline void
|
||||
ipi_wait_unlocked(void *cookie)
|
||||
{
|
||||
volatile cpuset_t *mask;
|
||||
|
||||
if ((mask = cookie) != NULL) {
|
||||
while (!CPU_EMPTY(mask))
|
||||
;
|
||||
sched_unpin();
|
||||
}
|
||||
}
|
||||
@ -352,7 +362,13 @@ ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
|
||||
}
|
||||
|
||||
static __inline void
|
||||
ipi_wait(void *cookie)
|
||||
ipi_wait(void *cookie __unused)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static __inline void
|
||||
ipi_wait_unlocked(void *cookie __unused)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -113,8 +113,6 @@ struct ipi_rd_args ipi_rd_args;
|
||||
struct ipi_tlb_args ipi_tlb_args;
|
||||
struct pcb stoppcbs[MAXCPU];
|
||||
|
||||
struct mtx ipi_mtx;
|
||||
|
||||
cpu_ipi_selected_t *cpu_ipi_selected;
|
||||
cpu_ipi_single_t *cpu_ipi_single;
|
||||
|
||||
@ -280,8 +278,6 @@ void
|
||||
cpu_mp_start(void)
|
||||
{
|
||||
|
||||
mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
|
||||
|
||||
intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
|
||||
intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
|
||||
-1, NULL, NULL);
|
||||
@ -503,13 +499,13 @@ cpu_mp_shutdown(void)
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_ipi_ast(struct trapframe *tf)
|
||||
cpu_ipi_ast(struct trapframe *tf __unused)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_ipi_stop(struct trapframe *tf)
|
||||
cpu_ipi_stop(struct trapframe *tf __unused)
|
||||
{
|
||||
u_int cpuid;
|
||||
|
||||
|
@ -332,7 +332,7 @@ stick_get_timecount_mp(struct timecounter *tc)
|
||||
if (curcpu == 0)
|
||||
stick = rdstick();
|
||||
else
|
||||
ipi_wait(ipi_rd(0, tl_ipi_stick_rd, &stick));
|
||||
ipi_wait_unlocked(ipi_rd(0, tl_ipi_stick_rd, &stick));
|
||||
sched_unpin();
|
||||
return (stick);
|
||||
}
|
||||
@ -346,7 +346,7 @@ tick_get_timecount_mp(struct timecounter *tc)
|
||||
if (curcpu == 0)
|
||||
tick = rd(tick);
|
||||
else
|
||||
ipi_wait(ipi_rd(0, tl_ipi_tick_rd, &tick));
|
||||
ipi_wait_unlocked(ipi_rd(0, tl_ipi_tick_rd, &tick));
|
||||
sched_unpin();
|
||||
return (tick);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user