freebsd-dev/sys/sparc64/include/smp.h

308 lines
6.7 KiB
C
Raw Normal View History

/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
#ifdef SMP
- USIII-based machines can consist of CPUs running at different frequencies (and having different cache sizes) so use the STICK (System TICK) timer, which was introduced due to this and is driven by the same frequency across all CPUs, instead of the TICK timer, whose frequency varies with the CPU clock, to drive hardclock. We try to use the STICK counter with all CPUs that are USIII or beyond, even when not necessary due to identical CPUs, as we can can also avoid the workaround for the BlackBird erratum #1 there. Unfortunately, using the STICK counter currently causes a hang with USIIIi MP machines for reasons unknown, so we still use the TICK timer there (which is okay as they can only consist of identical CPUs). - Given that we only (try to) synchronize the (S)TICK timers of APs with the BSP during startup, we could end up spinning forever in DELAY(9) if that function is migrated to another CPU while we're spinning due to clock drift afterwards, so pin to the CPU in order to avoid migration. Unfortunately, pinning doesn't work at the point DELAY(9) is required by the low-level console drivers, yet, so switch to a function pointer, which is updated accordingly, for implementing DELAY(9). For USIII and beyond, this would also allow to easily use the STICK counter instead of the TICK one here, there's no benefit in doing so however. While at it, use cpu_spinwait(9) for spinning in the delay- functions. This currently is a NOP though. - Don't set the TICK timer of the BSP to 0 during at startup as there's no need to do so. - Implement cpu_est_clockrate(). - Unfortunately, USIIIi-based machines don't provide a timecounter device besides the STICK and TICK counters (well, in theory the Tomatillo bridges have a performance counter that can be (ab)used as timecounter by configuring it to count bus cycles, though unlike the performance counter of Schizo bridges, the Tomatillo one is broken and counts Sun knows what in this mode). This means that we've to use a (S)TICK counter for timecounting, which has the old problem of not being in sync across CPUs, so provide an additional timecounter function which binds itself to the BSP but has an adequate low priority.
2008-09-03 17:39:19 +00:00
#define CPU_TICKSYNC 1
#define CPU_STICKSYNC 2
#define CPU_INIT 3
#define CPU_BOOTSTRAP 4
#ifndef LOCORE
#include <machine/intr_machdep.h>
#include <machine/pcb.h>
#include <machine/tte.h>
- Add support for sending IPIs with USIII and greater sun4u CPUs. These CPUs use an enhanced layout of the interrupt vector dispatch and dispatch status registers in order to allow sending IPIs to multiple targets simultaneously. Thus support for these CPUs was put in a newly added cheetah_ipi_selected(). This is intended to be pointed to by cpu_ipi_selected, which now is a function pointer, in order to avoid cpu_impl checks once booted. Alternatively it can point to spitfire_ipi_selected(), which was renamed from cpu_ipi_selected(). Consequently cpu_ipi_send() was also renamed to spitfire_ipi_send() (there's no need for a cheetah equivalent of this so far). Initialization of the cpu_ipi_selected pointer and other requirements is done in mp_init(), which was renamed from mp_tramp_alloc(), as cpu_mp_start() isn't called on UP systems while cpu_ipi_selected() is. As a side-effect this allows to make mp_tramp static to sys/sparc64/sparc64/mp_machdep.c. For the sake of avoiding #ifdef SMP and for keeping the history in place cheetah_ipi_selected() and spitfire_ipi_{selected,send}() where not put into/moved to sys/sparc64/sparc64/{cheetah,spitfire}.c - Add some CTASSERTs and KASSERTs ensuring that MAXCPU doesn't exceed the data types we use to store the CPU bit fields or the number of USIII and greater CPUs supported by the current cheetah_ipi_selected() implementation (which for JBus-CPUs is only 4; that should be fine though as according to OpenSolaris there are no sun4u machines with more than 4 JBus-CPUs). - In cpu_mp_start() don't enumerate and start more than MAXCPU CPUs as we can't handle more than that. - In cpu_mp_start() check for upa-portid vs. portid depending on cpu_impl for consistency with nexus(4). - In spitfire_ipi_selected() add KASSERTs ensuring that a CPU isn't told to IPI itself as sun4u CPUs just can't do that. - In spitfire_ipi_send() do a MEMBAR #Sync after writing the interrupt vector data as we want to make sure the payload was actually written before we trigger the dispatch. - In spitfire_ipi_send() also verify IDR_BUSY when checking whether the dispatch was successful as it has to be cleared for this to be the case. - Remove some redundant variables.
2007-06-16 23:26:00 +00:00
#define IDR_BUSY 0x0000000000000001ULL
#define IDR_NACK 0x0000000000000002ULL
#define IDR_CHEETAH_ALL_BUSY 0x5555555555555555ULL
#define IDR_CHEETAH_ALL_NACK (~IDR_CHEETAH_ALL_BUSY)
#define IDR_CHEETAH_MAX_BN_PAIRS 32
#define IDR_JALAPENO_MAX_BN_PAIRS 4
#define IDC_ITID_SHIFT 14
- Add support for sending IPIs with USIII and greater sun4u CPUs. These CPUs use an enhanced layout of the interrupt vector dispatch and dispatch status registers in order to allow sending IPIs to multiple targets simultaneously. Thus support for these CPUs was put in a newly added cheetah_ipi_selected(). This is intended to be pointed to by cpu_ipi_selected, which now is a function pointer, in order to avoid cpu_impl checks once booted. Alternatively it can point to spitfire_ipi_selected(), which was renamed from cpu_ipi_selected(). Consequently cpu_ipi_send() was also renamed to spitfire_ipi_send() (there's no need for a cheetah equivalent of this so far). Initialization of the cpu_ipi_selected pointer and other requirements is done in mp_init(), which was renamed from mp_tramp_alloc(), as cpu_mp_start() isn't called on UP systems while cpu_ipi_selected() is. As a side-effect this allows to make mp_tramp static to sys/sparc64/sparc64/mp_machdep.c. For the sake of avoiding #ifdef SMP and for keeping the history in place cheetah_ipi_selected() and spitfire_ipi_{selected,send}() where not put into/moved to sys/sparc64/sparc64/{cheetah,spitfire}.c - Add some CTASSERTs and KASSERTs ensuring that MAXCPU doesn't exceed the data types we use to store the CPU bit fields or the number of USIII and greater CPUs supported by the current cheetah_ipi_selected() implementation (which for JBus-CPUs is only 4; that should be fine though as according to OpenSolaris there are no sun4u machines with more than 4 JBus-CPUs). - In cpu_mp_start() don't enumerate and start more than MAXCPU CPUs as we can't handle more than that. - In cpu_mp_start() check for upa-portid vs. portid depending on cpu_impl for consistency with nexus(4). - In spitfire_ipi_selected() add KASSERTs ensuring that a CPU isn't told to IPI itself as sun4u CPUs just can't do that. - In spitfire_ipi_send() do a MEMBAR #Sync after writing the interrupt vector data as we want to make sure the payload was actually written before we trigger the dispatch. - In spitfire_ipi_send() also verify IDR_BUSY when checking whether the dispatch was successful as it has to be cleared for this to be the case. - Remove some redundant variables.
2007-06-16 23:26:00 +00:00
#define IDC_BN_SHIFT 24
#define IPI_AST PIL_AST
#define IPI_RENDEZVOUS PIL_RENDEZVOUS
#define IPI_PREEMPT PIL_PREEMPT
#define IPI_STOP PIL_STOP
2009-08-13 17:54:11 +00:00
#define IPI_STOP_HARD PIL_STOP
#define IPI_RETRIES 5000
struct cpu_start_args {
u_int csa_count;
u_int csa_mid;
u_int csa_state;
vm_offset_t csa_pcpu;
u_long csa_tick;
- USIII-based machines can consist of CPUs running at different frequencies (and having different cache sizes) so use the STICK (System TICK) timer, which was introduced due to this and is driven by the same frequency across all CPUs, instead of the TICK timer, whose frequency varies with the CPU clock, to drive hardclock. We try to use the STICK counter with all CPUs that are USIII or beyond, even when not necessary due to identical CPUs, as we can can also avoid the workaround for the BlackBird erratum #1 there. Unfortunately, using the STICK counter currently causes a hang with USIIIi MP machines for reasons unknown, so we still use the TICK timer there (which is okay as they can only consist of identical CPUs). - Given that we only (try to) synchronize the (S)TICK timers of APs with the BSP during startup, we could end up spinning forever in DELAY(9) if that function is migrated to another CPU while we're spinning due to clock drift afterwards, so pin to the CPU in order to avoid migration. Unfortunately, pinning doesn't work at the point DELAY(9) is required by the low-level console drivers, yet, so switch to a function pointer, which is updated accordingly, for implementing DELAY(9). For USIII and beyond, this would also allow to easily use the STICK counter instead of the TICK one here, there's no benefit in doing so however. While at it, use cpu_spinwait(9) for spinning in the delay- functions. This currently is a NOP though. - Don't set the TICK timer of the BSP to 0 during at startup as there's no need to do so. - Implement cpu_est_clockrate(). - Unfortunately, USIIIi-based machines don't provide a timecounter device besides the STICK and TICK counters (well, in theory the Tomatillo bridges have a performance counter that can be (ab)used as timecounter by configuring it to count bus cycles, though unlike the performance counter of Schizo bridges, the Tomatillo one is broken and counts Sun knows what in this mode). This means that we've to use a (S)TICK counter for timecounting, which has the old problem of not being in sync across CPUs, so provide an additional timecounter function which binds itself to the BSP but has an adequate low priority.
2008-09-03 17:39:19 +00:00
u_long csa_stick;
u_long csa_ver;
struct tte csa_ttes[PCPU_PAGES];
};
struct ipi_cache_args {
u_int ica_mask;
2003-04-08 06:35:09 +00:00
vm_paddr_t ica_pa;
};
struct ipi_tlb_args {
u_int ita_mask;
struct pmap *ita_pmap;
u_long ita_start;
u_long ita_end;
};
#define ita_va ita_start
struct pcpu;
extern struct pcb stoppcbs[];
void cpu_mp_bootstrap(struct pcpu *pc);
void cpu_mp_shutdown(void);
- Add support for sending IPIs with USIII and greater sun4u CPUs. These CPUs use an enhanced layout of the interrupt vector dispatch and dispatch status registers in order to allow sending IPIs to multiple targets simultaneously. Thus support for these CPUs was put in a newly added cheetah_ipi_selected(). This is intended to be pointed to by cpu_ipi_selected, which now is a function pointer, in order to avoid cpu_impl checks once booted. Alternatively it can point to spitfire_ipi_selected(), which was renamed from cpu_ipi_selected(). Consequently cpu_ipi_send() was also renamed to spitfire_ipi_send() (there's no need for a cheetah equivalent of this so far). Initialization of the cpu_ipi_selected pointer and other requirements is done in mp_init(), which was renamed from mp_tramp_alloc(), as cpu_mp_start() isn't called on UP systems while cpu_ipi_selected() is. As a side-effect this allows to make mp_tramp static to sys/sparc64/sparc64/mp_machdep.c. For the sake of avoiding #ifdef SMP and for keeping the history in place cheetah_ipi_selected() and spitfire_ipi_{selected,send}() where not put into/moved to sys/sparc64/sparc64/{cheetah,spitfire}.c - Add some CTASSERTs and KASSERTs ensuring that MAXCPU doesn't exceed the data types we use to store the CPU bit fields or the number of USIII and greater CPUs supported by the current cheetah_ipi_selected() implementation (which for JBus-CPUs is only 4; that should be fine though as according to OpenSolaris there are no sun4u machines with more than 4 JBus-CPUs). - In cpu_mp_start() don't enumerate and start more than MAXCPU CPUs as we can't handle more than that. - In cpu_mp_start() check for upa-portid vs. portid depending on cpu_impl for consistency with nexus(4). - In spitfire_ipi_selected() add KASSERTs ensuring that a CPU isn't told to IPI itself as sun4u CPUs just can't do that. - In spitfire_ipi_send() do a MEMBAR #Sync after writing the interrupt vector data as we want to make sure the payload was actually written before we trigger the dispatch. - In spitfire_ipi_send() also verify IDR_BUSY when checking whether the dispatch was successful as it has to be cleared for this to be the case. - Remove some redundant variables.
2007-06-16 23:26:00 +00:00
typedef void cpu_ipi_selected_t(u_int, u_long, u_long, u_long);
extern cpu_ipi_selected_t *cpu_ipi_selected;
- Add support for sending IPIs with USIII and greater sun4u CPUs. These CPUs use an enhanced layout of the interrupt vector dispatch and dispatch status registers in order to allow sending IPIs to multiple targets simultaneously. Thus support for these CPUs was put in a newly added cheetah_ipi_selected(). This is intended to be pointed to by cpu_ipi_selected, which now is a function pointer, in order to avoid cpu_impl checks once booted. Alternatively it can point to spitfire_ipi_selected(), which was renamed from cpu_ipi_selected(). Consequently cpu_ipi_send() was also renamed to spitfire_ipi_send() (there's no need for a cheetah equivalent of this so far). Initialization of the cpu_ipi_selected pointer and other requirements is done in mp_init(), which was renamed from mp_tramp_alloc(), as cpu_mp_start() isn't called on UP systems while cpu_ipi_selected() is. As a side-effect this allows to make mp_tramp static to sys/sparc64/sparc64/mp_machdep.c. For the sake of avoiding #ifdef SMP and for keeping the history in place cheetah_ipi_selected() and spitfire_ipi_{selected,send}() where not put into/moved to sys/sparc64/sparc64/{cheetah,spitfire}.c - Add some CTASSERTs and KASSERTs ensuring that MAXCPU doesn't exceed the data types we use to store the CPU bit fields or the number of USIII and greater CPUs supported by the current cheetah_ipi_selected() implementation (which for JBus-CPUs is only 4; that should be fine though as according to OpenSolaris there are no sun4u machines with more than 4 JBus-CPUs). - In cpu_mp_start() don't enumerate and start more than MAXCPU CPUs as we can't handle more than that. - In cpu_mp_start() check for upa-portid vs. portid depending on cpu_impl for consistency with nexus(4). - In spitfire_ipi_selected() add KASSERTs ensuring that a CPU isn't told to IPI itself as sun4u CPUs just can't do that. - In spitfire_ipi_send() do a MEMBAR #Sync after writing the interrupt vector data as we want to make sure the payload was actually written before we trigger the dispatch. - In spitfire_ipi_send() also verify IDR_BUSY when checking whether the dispatch was successful as it has to be cleared for this to be the case. - Remove some redundant variables.
2007-06-16 23:26:00 +00:00
void mp_init(void);
extern struct mtx ipi_mtx;
extern struct ipi_cache_args ipi_cache_args;
extern struct ipi_tlb_args ipi_tlb_args;
extern char *mp_tramp_code;
extern u_long mp_tramp_code_len;
extern u_long mp_tramp_tlb_slots;
extern u_long mp_tramp_func;
extern void mp_startup(void);
extern char tl_ipi_cheetah_dcache_page_inval[];
extern char tl_ipi_spitfire_dcache_page_inval[];
extern char tl_ipi_spitfire_icache_page_inval[];
extern char tl_ipi_level[];
extern char tl_ipi_tlb_context_demap[];
extern char tl_ipi_tlb_page_demap[];
extern char tl_ipi_tlb_range_demap[];
static __inline void
ipi_all_but_self(u_int ipi)
{
cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi);
}
static __inline void
ipi_selected(u_int cpus, u_int ipi)
{
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
}
#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
static __inline void *
2003-04-08 06:35:09 +00:00
ipi_dcache_page_inval(void *func, vm_paddr_t pa)
{
struct ipi_cache_args *ica;
if (smp_cpus == 1)
return (NULL);
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
return (&ica->ica_mask);
}
static __inline void *
2003-04-08 06:35:09 +00:00
ipi_icache_page_inval(void *func, vm_paddr_t pa)
{
struct ipi_cache_args *ica;
if (smp_cpus == 1)
return (NULL);
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
return (&ica->ica_mask);
}
static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
struct ipi_tlb_args *ita;
u_int cpus;
if (smp_cpus == 1)
return (NULL);
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
(u_long)ita);
return (&ita->ita_mask);
}
static __inline void *
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
struct ipi_tlb_args *ita;
u_int cpus;
if (smp_cpus == 1)
return (NULL);
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_va = va;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
return (&ita->ita_mask);
}
static __inline void *
ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
{
struct ipi_tlb_args *ita;
u_int cpus;
if (smp_cpus == 1)
return (NULL);
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_start = start;
ita->ita_end = end;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
return (&ita->ita_mask);
}
static __inline void
ipi_wait(void *cookie)
{
volatile u_int *mask;
if ((mask = cookie) != NULL) {
atomic_clear_int(mask, PCPU_GET(cpumask));
while (*mask != 0)
;
mtx_unlock_spin(&ipi_mtx);
}
}
#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
#endif /* !LOCORE */
#else
#ifndef LOCORE
static __inline void *
2003-04-08 06:35:09 +00:00
ipi_dcache_page_inval(void *func, vm_paddr_t pa)
{
return (NULL);
}
static __inline void *
2003-04-08 06:35:09 +00:00
ipi_icache_page_inval(void *func, vm_paddr_t pa)
{
return (NULL);
}
static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
return (NULL);
}
static __inline void *
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
return (NULL);
}
static __inline void *
ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
{
return (NULL);
}
static __inline void
ipi_wait(void *cookie)
{
}
static __inline void
tl_ipi_cheetah_dcache_page_inval(void)
{
}
static __inline void
tl_ipi_spitfire_dcache_page_inval(void)
{
}
static __inline void
tl_ipi_spitfire_icache_page_inval(void)
{
}
#endif /* !LOCORE */
#endif /* SMP */
#endif /* !_MACHINE_SMP_H_ */