freebsd-skq/sys/sparc64/include/smp.h
marius ee65d962e2 - Add support for sending IPIs with USIII and greater sun4u CPUs.
These CPUs use an enhanced layout of the interrupt vector dispatch
  and dispatch status registers in order to allow sending IPIs to
  multiple targets simultaneously. Thus support for these CPUs was
  put in a newly added cheetah_ipi_selected(). This is intended to
  be pointed to by cpu_ipi_selected, which now is a function pointer,
  in order to avoid cpu_impl checks once booted. Alternatively it
  can point to spitfire_ipi_selected(), which was renamed from
  cpu_ipi_selected(). Consequently cpu_ipi_send() was also renamed
  to spitfire_ipi_send() (there's no need for a cheetah equivalent
  of this so far). Initialization of the cpu_ipi_selected pointer
  and other requirements is done in mp_init(), which was renamed
  from mp_tramp_alloc(), as cpu_mp_start() isn't called on UP
  systems while cpu_ipi_selected() is. As a side-effect this allows
  to make mp_tramp static to sys/sparc64/sparc64/mp_machdep.c.
  For the sake of avoiding #ifdef SMP and for keeping the history in
  place cheetah_ipi_selected() and spitfire_ipi_{selected,send}()
  where not put into/moved to sys/sparc64/sparc64/{cheetah,spitfire}.c
- Add some CTASSERTs and KASSERTs ensuring that MAXCPU doesn't
  exceed the data types we use to store the CPU bit fields or the
  number of USIII and greater CPUs supported by the current
  cheetah_ipi_selected() implementation (which for JBus-CPUs is
  only 4; that should be fine though as according to OpenSolaris
  there are no sun4u machines with more than 4 JBus-CPUs).
- In cpu_mp_start() don't enumerate and start more than MAXCPU CPUs
  as we can't handle more than that.
- In cpu_mp_start() check for upa-portid vs. portid depending on
  cpu_impl for consistency with nexus(4).
- In spitfire_ipi_selected() add KASSERTs ensuring that a CPU isn't
  told to IPI itself as sun4u CPUs just can't do that.
- In spitfire_ipi_send() do a MEMBAR #Sync after writing the
  interrupt vector data as we want to make sure the payload was
  actually written before we trigger the dispatch.
- In spitfire_ipi_send() also verify IDR_BUSY when checking whether
  the dispatch was successful as it has to be cleared for this to
  be the case.
- Remove some redundant variables.
2007-06-16 23:26:00 +00:00

272 lines
6.2 KiB
C

/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
#define CPU_CLKSYNC 1
#define CPU_INIT 2
#define CPU_BOOTSTRAP 3
#ifndef LOCORE
#include <machine/intr_machdep.h>
#include <machine/pcb.h>
#include <machine/tte.h>
#define IDR_BUSY 0x0000000000000001ULL
#define IDR_NACK 0x0000000000000002ULL
#define IDR_CHEETAH_ALL_BUSY 0x5555555555555555ULL
#define IDR_CHEETAH_ALL_NACK (~IDR_CHEETAH_ALL_BUSY)
#define IDR_CHEETAH_MAX_BN_PAIRS 32
#define IDR_JALAPENO_MAX_BN_PAIRS 4
#define IDC_ITID_SHIFT 14
#define IDC_BN_SHIFT 24
#define IPI_AST PIL_AST
#define IPI_RENDEZVOUS PIL_RENDEZVOUS
#define IPI_STOP PIL_STOP
#define IPI_RETRIES 5000
struct cpu_start_args {
u_int csa_count;
u_int csa_mid;
u_int csa_state;
vm_offset_t csa_pcpu;
u_long csa_tick;
u_long csa_ver;
struct tte csa_ttes[PCPU_PAGES];
};
struct ipi_cache_args {
u_int ica_mask;
vm_paddr_t ica_pa;
};
struct ipi_tlb_args {
u_int ita_mask;
struct pmap *ita_pmap;
u_long ita_start;
u_long ita_end;
};
#define ita_va ita_start
struct pcpu;
extern struct pcb stoppcbs[];
void cpu_mp_bootstrap(struct pcpu *pc);
void cpu_mp_shutdown(void);
typedef void cpu_ipi_selected_t(u_int, u_long, u_long, u_long);
extern cpu_ipi_selected_t *cpu_ipi_selected;
void ipi_selected(u_int cpus, u_int ipi);
void ipi_all(u_int ipi);
void ipi_all_but_self(u_int ipi);
void mp_init(void);
extern struct mtx ipi_mtx;
extern struct ipi_cache_args ipi_cache_args;
extern struct ipi_tlb_args ipi_tlb_args;
extern char *mp_tramp_code;
extern u_long mp_tramp_code_len;
extern u_long mp_tramp_tlb_slots;
extern u_long mp_tramp_func;
extern void mp_startup(void);
extern char tl_ipi_cheetah_dcache_page_inval[];
extern char tl_ipi_spitfire_dcache_page_inval[];
extern char tl_ipi_spitfire_icache_page_inval[];
extern char tl_ipi_level[];
extern char tl_ipi_tlb_context_demap[];
extern char tl_ipi_tlb_page_demap[];
extern char tl_ipi_tlb_range_demap[];
#ifdef SMP
#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
static __inline void *
ipi_dcache_page_inval(void *func, vm_paddr_t pa)
{
struct ipi_cache_args *ica;
if (smp_cpus == 1)
return (NULL);
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
return (&ica->ica_mask);
}
static __inline void *
ipi_icache_page_inval(void *func, vm_paddr_t pa)
{
struct ipi_cache_args *ica;
if (smp_cpus == 1)
return (NULL);
ica = &ipi_cache_args;
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
return (&ica->ica_mask);
}
static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
struct ipi_tlb_args *ita;
u_int cpus;
if (smp_cpus == 1)
return (NULL);
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
(u_long)ita);
return (&ita->ita_mask);
}
static __inline void *
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
struct ipi_tlb_args *ita;
u_int cpus;
if (smp_cpus == 1)
return (NULL);
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_va = va;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
return (&ita->ita_mask);
}
static __inline void *
ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
{
struct ipi_tlb_args *ita;
u_int cpus;
if (smp_cpus == 1)
return (NULL);
if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
return (NULL);
ita = &ipi_tlb_args;
mtx_lock_spin(&ipi_mtx);
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_pmap = pm;
ita->ita_start = start;
ita->ita_end = end;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
return (&ita->ita_mask);
}
static __inline void
ipi_wait(void *cookie)
{
volatile u_int *mask;
if ((mask = cookie) != NULL) {
atomic_clear_int(mask, PCPU_GET(cpumask));
while (*mask != 0)
;
mtx_unlock_spin(&ipi_mtx);
}
}
#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
#else
static __inline void *
ipi_dcache_page_inval(void *func, vm_paddr_t pa)
{
return (NULL);
}
static __inline void *
ipi_icache_page_inval(void *func, vm_paddr_t pa)
{
return (NULL);
}
static __inline void *
ipi_tlb_context_demap(struct pmap *pm)
{
return (NULL);
}
static __inline void *
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
return (NULL);
}
static __inline void *
ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
{
return (NULL);
}
static __inline void
ipi_wait(void *cookie)
{
}
#endif /* SMP */
#endif /* !LOCORE */
#endif /* !_MACHINE_SMP_H_ */