[PowerPC] hwpmc: add support for POWER8/9 PMCs

This change adds support for POWER8 and POWER9 PMCs (bare metal and
pseries).
All PowerISA 2.07B non-random events are supported.

Implementation was based on that of PPC970.

Reviewed by:	jhibbits
Sponsored by:	Eldorado Research Institute (eldorado.org.br)
Differential Revision:	https://reviews.freebsd.org/D26110
This commit is contained in:
Leandro Lupori 2020-11-05 16:36:39 +00:00
parent 16b971ed6d
commit 68dd718256
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=367390
10 changed files with 1093 additions and 1039 deletions

View File

@ -41,14 +41,6 @@ __FBSDID("$FreeBSD$");
#include "hwpmc_powerpc.h"
#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
PMC_CAP_THRESHOLD | PMC_CAP_READ | \
PMC_CAP_WRITE | PMC_CAP_INVERT | \
PMC_CAP_QUALIFIER)
#define E500_PMC_HAS_OVERFLOWED(x) (e500_pmcn_read(x) & (0x1 << 31))
struct e500_event_code_map {
enum pmc_event pe_ev; /* enum value */
uint8_t pe_counter_mask; /* Which counter this can be counted in. */
@ -246,20 +238,16 @@ static pmc_value_t
e500_pmcn_read(unsigned int pmc)
{
switch (pmc) {
case 0:
return mfpmr(PMR_PMC0);
break;
case 1:
return mfpmr(PMR_PMC1);
break;
case 2:
return mfpmr(PMR_PMC2);
break;
case 3:
return mfpmr(PMR_PMC3);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
case 0:
return (mfpmr(PMR_PMC0));
case 1:
return (mfpmr(PMR_PMC1));
case 2:
return (mfpmr(PMR_PMC2));
case 3:
return (mfpmr(PMR_PMC3));
default:
panic("Invalid PMC number: %d\n", pmc);
}
}
@ -267,206 +255,98 @@ static void
e500_pmcn_write(unsigned int pmc, uint32_t val)
{
switch (pmc) {
case 0:
mtpmr(PMR_PMC0, val);
break;
case 1:
mtpmr(PMR_PMC1, val);
break;
case 2:
mtpmr(PMR_PMC2, val);
break;
case 3:
mtpmr(PMR_PMC3, val);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
}
}
static void
e500_set_pmc(int cpu, int ri, int config)
{
struct pmc *pm;
struct pmc_hw *phw;
register_t pmc_pmlc;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
config &= ~POWERPC_PMC_ENABLE;
if (config != PMCN_NONE) {
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
config |= PMLCax_CE;
/* Enable the PMC. */
switch (ri) {
case 0:
mtpmr(PMR_PMC0, val);
mtpmr(PMR_PMLCa0, config);
break;
case 1:
mtpmr(PMR_PMC1, val);
mtpmr(PMR_PMLCa1, config);
break;
case 2:
mtpmr(PMR_PMC2, val);
mtpmr(PMR_PMLCa2, config);
break;
case 3:
mtpmr(PMR_PMC3, val);
mtpmr(PMR_PMLCa3, config);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
}
} else {
/* Disable the PMC. */
switch (ri) {
case 0:
pmc_pmlc = mfpmr(PMR_PMLCa0);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa0, pmc_pmlc);
break;
case 1:
pmc_pmlc = mfpmr(PMR_PMLCa1);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa1, pmc_pmlc);
break;
case 2:
pmc_pmlc = mfpmr(PMR_PMLCa2);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa2, pmc_pmlc);
break;
case 3:
pmc_pmlc = mfpmr(PMR_PMLCa3);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa3, pmc_pmlc);
break;
}
}
}
static int
e500_read_pmc(int cpu, int ri, pmc_value_t *v)
{
struct pmc *pm;
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
KASSERT(pm,
("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
ri));
tmp = e500_pmcn_read(ri);
PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
*v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
else
*v = tmp;
return 0;
}
static int
e500_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct pmc *pm;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
e500_pmcn_write(ri, v);
return 0;
}
static int
e500_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(pm == NULL || phw->phw_pmc == NULL,
("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
__LINE__, pm, phw->phw_pmc));
phw->phw_pmc = pm;
return 0;
}
static int
e500_start_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
config = pm->pm_md.pm_powerpc.pm_powerpc_evsel;
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
config |= PMLCax_CE;
/* Enable the PMC. */
switch (ri) {
case 0:
mtpmr(PMR_PMLCa0, config);
break;
case 1:
mtpmr(PMR_PMLCa1, config);
break;
case 2:
mtpmr(PMR_PMLCa2, config);
break;
case 3:
mtpmr(PMR_PMLCa3, config);
break;
default:
break;
}
return 0;
}
static int
e500_stop_pmc(int cpu, int ri)
{
struct pmc *pm;
struct pmc_hw *phw;
register_t pmc_pmlc;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
/*
* Disable the PMCs.
*/
switch (ri) {
case 0:
pmc_pmlc = mfpmr(PMR_PMLCa0);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa0, pmc_pmlc);
break;
case 1:
pmc_pmlc = mfpmr(PMR_PMLCa1);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa1, pmc_pmlc);
break;
case 2:
pmc_pmlc = mfpmr(PMR_PMLCa2);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa2, pmc_pmlc);
break;
case 3:
pmc_pmlc = mfpmr(PMR_PMLCa3);
pmc_pmlc |= PMLCax_FC;
mtpmr(PMR_PMLCa3, pmc_pmlc);
break;
default:
break;
}
return 0;
}
static int
e500_pcpu_init(struct pmc_mdep *md, int cpu)
{
int first_ri, i;
struct pmc_cpu *pc;
struct powerpc_cpu *pac;
struct pmc_hw *phw;
int i;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu);
powerpc_pcpu_init(md, cpu);
/* Freeze all counters. */
mtpmr(PMR_PMGC0, PMGC_FAC | PMGC_PMIE | PMGC_FCECE);
powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
M_WAITOK|M_ZERO);
pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * E500_MAX_PMCS,
M_PMC, M_WAITOK|M_ZERO);
pac->pc_class = PMC_CLASS_E500;
pc = pmc_pcpu[cpu];
first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri;
KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
for (i = 0, phw = pac->pc_ppcpmcs; i < E500_MAX_PMCS; i++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
phw->phw_pmc = NULL;
pc->pc_hwpmcs[i + first_ri] = phw;
for (i = 0; i < E500_MAX_PMCS; i++)
/* Initialize the PMC to stopped */
e500_stop_pmc(cpu, i);
}
powerpc_stop_pmc(cpu, i);
/* Unfreeze global register. */
mtpmr(PMR_PMGC0, PMGC_PMIE | PMGC_FCECE);
return 0;
return (0);
}
static int
@ -478,10 +358,7 @@ e500_pcpu_fini(struct pmc_mdep *md, int cpu)
mtpmr(PMR_PMGC0, pmgc0);
mtmsr(mfmsr() & ~PSL_PMM);
free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
free(powerpc_pcpu[cpu], M_PMC);
return 0;
return (powerpc_pcpu_fini(md, cpu));
}
static int
@ -547,85 +424,12 @@ e500_allocate_pmc(int cpu, int ri, struct pmc *pm,
return 0;
}
static int
e500_release_pmc(int cpu, int ri, struct pmc *pmc)
static void
e500_resume_pmc(bool ie)
{
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
return 0;
}
static int
e500_intr(struct trapframe *tf)
{
int i, error, retval, cpu;
uint32_t config;
struct pmc *pm;
struct powerpc_cpu *pac;
cpu = curcpu;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
TRAPF_USERMODE(tf));
retval = 0;
pac = powerpc_pcpu[cpu];
config = mfpmr(PMR_PMGC0) & ~PMGC_FAC;
/*
* look for all PMCs that have interrupted:
* - look for a running, sampling PMC which has overflowed
* and which has a valid 'struct pmc' association
*
* If found, we call a helper to process the interrupt.
*/
for (i = 0; i < E500_MAX_PMCS; i++) {
if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
continue;
}
if (!E500_PMC_HAS_OVERFLOWED(i))
continue;
retval = 1; /* Found an interrupting PMC. */
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
/* Stop the counter if logging fails. */
error = pmc_process_interrupt(PMC_HR, pm, tf);
if (error != 0)
e500_stop_pmc(cpu, i);
/* reload count. */
e500_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
}
if (retval)
counter_u64_add(pmc_stats.pm_intr_processed, 1);
else
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
/* Re-enable PERF exceptions. */
if (retval)
mtpmr(PMR_PMGC0, config | PMGC_PMIE);
return (retval);
if (ie)
mtpmr(PMR_PMGC0, (mfpmr(PMR_PMGC0) & ~PMGC_FAC) | PMGC_PMIE);
}
int
@ -643,19 +447,26 @@ pmc_e500_initialize(struct pmc_mdep *pmc_mdep)
pcd->pcd_width = 32;
pcd->pcd_allocate_pmc = e500_allocate_pmc;
pcd->pcd_config_pmc = e500_config_pmc;
pcd->pcd_config_pmc = powerpc_config_pmc;
pcd->pcd_pcpu_fini = e500_pcpu_fini;
pcd->pcd_pcpu_init = e500_pcpu_init;
pcd->pcd_describe = powerpc_describe;
pcd->pcd_get_config = powerpc_get_config;
pcd->pcd_read_pmc = e500_read_pmc;
pcd->pcd_release_pmc = e500_release_pmc;
pcd->pcd_start_pmc = e500_start_pmc;
pcd->pcd_stop_pmc = e500_stop_pmc;
pcd->pcd_write_pmc = e500_write_pmc;
pcd->pcd_read_pmc = powerpc_read_pmc;
pcd->pcd_release_pmc = powerpc_release_pmc;
pcd->pcd_start_pmc = powerpc_start_pmc;
pcd->pcd_stop_pmc = powerpc_stop_pmc;
pcd->pcd_write_pmc = powerpc_write_pmc;
pmc_mdep->pmd_npmc += E500_MAX_PMCS;
pmc_mdep->pmd_intr = e500_intr;
pmc_mdep->pmd_intr = powerpc_pmc_intr;
ppc_max_pmcs = E500_MAX_PMCS;
powerpc_set_pmc = e500_set_pmc;
powerpc_pmcn_read = e500_pmcn_read;
powerpc_pmcn_write = e500_pmcn_write;
powerpc_resume_pmc = e500_resume_pmc;
return (0);
}

View File

@ -42,12 +42,6 @@ __FBSDID("$FreeBSD$");
#include "hwpmc_powerpc.h"
#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
PMC_CAP_THRESHOLD | PMC_CAP_READ | \
PMC_CAP_WRITE | PMC_CAP_INVERT | \
PMC_CAP_QUALIFIER)
#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_74XX_PMC1SEL(0x3f))) | \
SPR_MMCR0_74XX_PMC1SEL(x))
#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_74XX_PMC2SEL(0x3f))) | \
@ -61,8 +55,6 @@ __FBSDID("$FreeBSD$");
/* Change this when we support more than just the 7450. */
#define MPC7XXX_MAX_PMCS 6
#define MPC7XXX_PMC_HAS_OVERFLOWED(x) (mpc7xxx_pmcn_read(x) & (0x1 << 31))
/*
* Things to improve on this:
* - It stops (clears to 0) the PMC and resets it at every context switch
@ -74,23 +66,11 @@ __FBSDID("$FreeBSD$");
* specifically).
*/
struct mpc7xxx_event_code_map {
enum pmc_event pe_ev; /* enum value */
uint8_t pe_counter_mask; /* Which counter this can be counted in. */
uint8_t pe_code; /* numeric code */
};
#define PPC_PMC_MASK1 0
#define PPC_PMC_MASK2 1
#define PPC_PMC_MASK3 2
#define PPC_PMC_MASK4 3
#define PPC_PMC_MASK5 4
#define PPC_PMC_MASK6 5
#define PPC_PMC_MASK_ALL 0x3f
#define PMC_POWERPC_EVENT(id, mask, number) \
{ .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number }
{ .pe_event = PMC_EV_PPC7450_##id, .pe_flags = mask, .pe_code = number }
static struct mpc7xxx_event_code_map mpc7xxx_event_codes[] = {
static struct pmc_ppc_event mpc7xxx_event_codes[] = {
PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1),
PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2),
PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3),
@ -315,30 +295,26 @@ static struct mpc7xxx_event_code_map mpc7xxx_event_codes[] = {
PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56),
PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57)
};
static size_t mpc7xxx_event_codes_size = nitems(mpc7xxx_event_codes);
static pmc_value_t
mpc7xxx_pmcn_read(unsigned int pmc)
{
switch (pmc) {
case 0:
return mfspr(SPR_PMC1_74XX);
break;
case 1:
return mfspr(SPR_PMC2_74XX);
break;
case 2:
return mfspr(SPR_PMC3_74XX);
break;
case 3:
return mfspr(SPR_PMC4_74XX);
break;
case 4:
return mfspr(SPR_PMC5_74XX);
break;
case 5:
return mfspr(SPR_PMC6_74XX);
default:
panic("Invalid PMC number: %d\n", pmc);
case 0:
return (mfspr(SPR_PMC1_74XX));
case 1:
return (mfspr(SPR_PMC2_74XX));
case 2:
return (mfspr(SPR_PMC3_74XX));
case 3:
return (mfspr(SPR_PMC4_74XX));
case 4:
return (mfspr(SPR_PMC5_74XX));
case 5:
return (mfspr(SPR_PMC6_74XX));
default:
panic("Invalid PMC number: %d\n", pmc);
}
}
@ -346,113 +322,46 @@ static void
mpc7xxx_pmcn_write(unsigned int pmc, uint32_t val)
{
switch (pmc) {
case 0:
mtspr(SPR_PMC1_74XX, val);
break;
case 1:
mtspr(SPR_PMC2_74XX, val);
break;
case 2:
mtspr(SPR_PMC3_74XX, val);
break;
case 3:
mtspr(SPR_PMC4_74XX, val);
break;
case 4:
mtspr(SPR_PMC5_74XX, val);
break;
case 5:
mtspr(SPR_PMC6_74XX, val);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
case 0:
mtspr(SPR_PMC1_74XX, val);
break;
case 1:
mtspr(SPR_PMC2_74XX, val);
break;
case 2:
mtspr(SPR_PMC3_74XX, val);
break;
case 3:
mtspr(SPR_PMC4_74XX, val);
break;
case 4:
mtspr(SPR_PMC5_74XX, val);
break;
case 5:
mtspr(SPR_PMC6_74XX, val);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
}
}
static int
mpc7xxx_read_pmc(int cpu, int ri, pmc_value_t *v)
static void
mpc7xxx_set_pmc(int cpu, int ri, int config)
{
struct pmc *pm;
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
KASSERT(pm,
("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
ri));
tmp = mpc7xxx_pmcn_read(ri);
PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
*v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
else
*v = tmp;
return 0;
}
static int
mpc7xxx_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct pmc *pm;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
mpc7xxx_pmcn_write(ri, v);
return 0;
}
static int
mpc7xxx_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(pm == NULL || phw->phw_pmc == NULL,
("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
__LINE__, pm, phw->phw_pmc));
phw->phw_pmc = pm;
return 0;
}
static int
mpc7xxx_start_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
register_t pmc_mmcr;
uint32_t config_mask;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
/* The mask is inverted (enable is 1) compared to the flags in
* MMCR0, which are Freeze flags.
*/
config_mask = ~config & POWERPC_PMC_ENABLE;
config &= ~POWERPC_PMC_ENABLE;
/* Enable the PMC. */
/* Enable/disable the PMC. */
switch (ri) {
case 0:
pmc_mmcr = mfspr(SPR_MMCR0_74XX);
@ -484,107 +393,27 @@ mpc7xxx_start_pmc(int cpu, int ri)
pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config);
mtspr(SPR_MMCR1_74XX, pmc_mmcr);
break;
default:
break;
}
/* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
* are Freeze flags.
*/
config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
pmc_mmcr = mfspr(SPR_MMCR0_74XX);
pmc_mmcr &= ~SPR_MMCR0_FC;
pmc_mmcr |= config;
mtspr(SPR_MMCR0_74XX, pmc_mmcr);
return 0;
}
static int
mpc7xxx_stop_pmc(int cpu, int ri)
{
struct pmc *pm;
struct pmc_hw *phw;
register_t pmc_mmcr;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
/*
* Disable the PMCs.
*/
switch (ri) {
case 0:
if (config != PMCN_NONE) {
pmc_mmcr = mfspr(SPR_MMCR0_74XX);
pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0);
pmc_mmcr &= ~SPR_MMCR0_FC;
pmc_mmcr |= config;
mtspr(SPR_MMCR0_74XX, pmc_mmcr);
break;
case 1:
pmc_mmcr = mfspr(SPR_MMCR0_74XX);
pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0);
mtspr(SPR_MMCR0_74XX, pmc_mmcr);
break;
case 2:
pmc_mmcr = mfspr(SPR_MMCR1_74XX);
pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0);
mtspr(SPR_MMCR1_74XX, pmc_mmcr);
break;
case 3:
pmc_mmcr = mfspr(SPR_MMCR0_74XX);
pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0);
mtspr(SPR_MMCR0_74XX, pmc_mmcr);
break;
case 4:
pmc_mmcr = mfspr(SPR_MMCR1_74XX);
pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0);
mtspr(SPR_MMCR1_74XX, pmc_mmcr);
break;
case 5:
pmc_mmcr = mfspr(SPR_MMCR1_74XX);
pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0);
mtspr(SPR_MMCR1_74XX, pmc_mmcr);
break;
default:
break;
}
return 0;
}
static int
mpc7xxx_pcpu_init(struct pmc_mdep *md, int cpu)
{
int first_ri, i;
struct pmc_cpu *pc;
struct powerpc_cpu *pac;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu);
powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
M_WAITOK|M_ZERO);
pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * MPC7XXX_MAX_PMCS,
M_PMC, M_WAITOK|M_ZERO);
pac->pc_class = PMC_CLASS_PPC7450;
pc = pmc_pcpu[cpu];
first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri;
KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
for (i = 0, phw = pac->pc_ppcpmcs; i < MPC7XXX_MAX_PMCS; i++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
phw->phw_pmc = NULL;
pc->pc_hwpmcs[i + first_ri] = phw;
}
powerpc_pcpu_init(md, cpu);
/* Clear the MMCRs, and set FC, to disable all PMCs. */
mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC | SPR_MMCR0_PMXE |
SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE);
mtspr(SPR_MMCR1_74XX, 0);
return 0;
return (0);
}
static int
@ -596,133 +425,16 @@ mpc7xxx_pcpu_fini(struct pmc_mdep *md, int cpu)
mmcr0 |= SPR_MMCR0_FC;
mtspr(SPR_MMCR0_74XX, mmcr0);
free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
free(powerpc_pcpu[cpu], M_PMC);
return 0;
return (powerpc_pcpu_fini(md, cpu));
}
static int
mpc7xxx_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
static void
mpc7xxx_resume_pmc(bool ie)
{
enum pmc_event pe;
uint32_t caps, config, counter;
int i;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
caps = a->pm_caps;
pe = a->pm_ev;
for (i = 0; i < nitems(mpc7xxx_event_codes); i++) {
if (mpc7xxx_event_codes[i].pe_ev == pe) {
config = mpc7xxx_event_codes[i].pe_code;
counter = mpc7xxx_event_codes[i].pe_counter_mask;
break;
}
}
if (i == nitems(mpc7xxx_event_codes))
return (EINVAL);
if ((counter & (1 << ri)) == 0)
return (EINVAL);
if (caps & PMC_CAP_SYSTEM)
config |= POWERPC_PMC_KERNEL_ENABLE;
if (caps & PMC_CAP_USER)
config |= POWERPC_PMC_USER_ENABLE;
if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
config |= POWERPC_PMC_ENABLE;
pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
PMCDBG2(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
return 0;
}
static int
mpc7xxx_release_pmc(int cpu, int ri, struct pmc *pmc)
{
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
return 0;
}
static int
mpc7xxx_intr(struct trapframe *tf)
{
int i, error, retval, cpu;
uint32_t config;
struct pmc *pm;
struct powerpc_cpu *pac;
cpu = curcpu;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
TRAPF_USERMODE(tf));
retval = 0;
pac = powerpc_pcpu[cpu];
config = mfspr(SPR_MMCR0_74XX) & ~SPR_MMCR0_FC;
/*
* look for all PMCs that have interrupted:
* - look for a running, sampling PMC which has overflowed
* and which has a valid 'struct pmc' association
*
* If found, we call a helper to process the interrupt.
*/
for (i = 0; i < MPC7XXX_MAX_PMCS; i++) {
if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
continue;
}
if (!MPC7XXX_PMC_HAS_OVERFLOWED(i))
continue;
retval = 1; /* Found an interrupting PMC. */
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
/* Stop the counter if logging fails. */
error = pmc_process_interrupt(PMC_HR, pm, tf);
if (error != 0)
mpc7xxx_stop_pmc(cpu, i);
/* reload count. */
mpc7xxx_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
}
if (retval)
counter_u64_add(pmc_stats.pm_intr_processed, 1);
else
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
/* Re-enable PERF exceptions. */
if (retval)
mtspr(SPR_MMCR0_74XX, config | SPR_MMCR0_PMXE);
return (retval);
if (ie)
mtspr(SPR_MMCR0_74XX,
(mfspr(SPR_MMCR0_74XX) & ~SPR_MMCR0_FC) | SPR_MMCR0_PMXE);
}
int
@ -739,20 +451,31 @@ pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep)
pcd->pcd_ri = pmc_mdep->pmd_npmc;
pcd->pcd_width = 32; /* All PMCs, even in ppc970, are 32-bit */
pcd->pcd_allocate_pmc = mpc7xxx_allocate_pmc;
pcd->pcd_config_pmc = mpc7xxx_config_pmc;
pcd->pcd_allocate_pmc = powerpc_allocate_pmc;
pcd->pcd_config_pmc = powerpc_config_pmc;
pcd->pcd_pcpu_fini = mpc7xxx_pcpu_fini;
pcd->pcd_pcpu_init = mpc7xxx_pcpu_init;
pcd->pcd_describe = powerpc_describe;
pcd->pcd_get_config = powerpc_get_config;
pcd->pcd_read_pmc = mpc7xxx_read_pmc;
pcd->pcd_release_pmc = mpc7xxx_release_pmc;
pcd->pcd_start_pmc = mpc7xxx_start_pmc;
pcd->pcd_stop_pmc = mpc7xxx_stop_pmc;
pcd->pcd_write_pmc = mpc7xxx_write_pmc;
pcd->pcd_read_pmc = powerpc_read_pmc;
pcd->pcd_release_pmc = powerpc_release_pmc;
pcd->pcd_start_pmc = powerpc_start_pmc;
pcd->pcd_stop_pmc = powerpc_stop_pmc;
pcd->pcd_write_pmc = powerpc_write_pmc;
pmc_mdep->pmd_npmc += MPC7XXX_MAX_PMCS;
pmc_mdep->pmd_intr = mpc7xxx_intr;
pmc_mdep->pmd_intr = powerpc_pmc_intr;
ppc_event_codes = mpc7xxx_event_codes;
ppc_event_codes_size = mpc7xxx_event_codes_size;
ppc_event_first = PMC_EV_PPC7450_FIRST;
ppc_event_last = PMC_EV_PPC7450_LAST;
ppc_max_pmcs = MPC7XXX_MAX_PMCS;
powerpc_set_pmc = mpc7xxx_set_pmc;
powerpc_pmcn_read = mpc7xxx_pmcn_read;
powerpc_pmcn_write = mpc7xxx_pmcn_write;
powerpc_resume_pmc = mpc7xxx_resume_pmc;
return (0);
}

View File

@ -0,0 +1,319 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2013 Justin Hibbits
* Copyright (c) 2020 Leandro Lupori
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/systm.h>
#include <machine/pmc_mdep.h>
#include <machine/spr.h>
#include <machine/cpu.h>
#include "hwpmc_powerpc.h"
#define POWER8_MAX_PMCS 6
static struct pmc_ppc_event power8_event_codes[] = {
{PMC_EV_POWER8_INSTR_COMPLETED,
.pe_flags = PMC_FLAG_PMC5,
.pe_code = 0x00
},
/*
* PMC1 can also count cycles, but as PMC6 can only count cycles
* it's better to always use it and leave PMC1 free to count
* other events.
*/
{PMC_EV_POWER8_CYCLES,
.pe_flags = PMC_FLAG_PMC6,
.pe_code = 0xf0
},
{PMC_EV_POWER8_CYCLES_WITH_INSTRS_COMPLETED,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0xf2
},
{PMC_EV_POWER8_FPU_INSTR_COMPLETED,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0xf4
},
{PMC_EV_POWER8_ERAT_INSTR_MISS,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0xf6
},
{PMC_EV_POWER8_CYCLES_IDLE,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0xf8
},
{PMC_EV_POWER8_CYCLES_WITH_ANY_THREAD_RUNNING,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0xfa
},
{PMC_EV_POWER8_STORE_COMPLETED,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xf0
},
{PMC_EV_POWER8_INSTR_DISPATCHED,
.pe_flags = PMC_FLAG_PMC2 | PMC_FLAG_PMC3,
.pe_code = 0xf2
},
{PMC_EV_POWER8_CYCLES_RUNNING,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xf4
},
{PMC_EV_POWER8_ERAT_DATA_MISS,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xf6
},
{PMC_EV_POWER8_EXTERNAL_INTERRUPT,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xf8
},
{PMC_EV_POWER8_BRANCH_TAKEN,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xfa
},
{PMC_EV_POWER8_L1_INSTR_MISS,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xfc
},
{PMC_EV_POWER8_L2_LOAD_MISS,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xfe
},
{PMC_EV_POWER8_STORE_NO_REAL_ADDR,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xf0
},
{PMC_EV_POWER8_INSTR_COMPLETED_WITH_ALL_THREADS_RUNNING,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xf4
},
{PMC_EV_POWER8_L1_LOAD_MISS,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xf6
},
{PMC_EV_POWER8_TIMEBASE_EVENT,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xf8
},
{PMC_EV_POWER8_L3_INSTR_MISS,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xfa
},
{PMC_EV_POWER8_TLB_DATA_MISS,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xfc
},
{PMC_EV_POWER8_L3_LOAD_MISS,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0xfe
},
{PMC_EV_POWER8_LOAD_NO_REAL_ADDR,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xf0
},
{PMC_EV_POWER8_CYCLES_WITH_INSTRS_DISPATCHED,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xf2
},
{PMC_EV_POWER8_CYCLES_RUNNING_PURR_INC,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xf4
},
{PMC_EV_POWER8_BRANCH_MISPREDICTED,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xf6
},
{PMC_EV_POWER8_PREFETCHED_INSTRS_DISCARDED,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xf8
},
{PMC_EV_POWER8_INSTR_COMPLETED_RUNNING,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xfa
},
{PMC_EV_POWER8_TLB_INSTR_MISS,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xfc
},
{PMC_EV_POWER8_CACHE_LOAD_MISS,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0xfe
}
};
static size_t power8_event_codes_size = nitems(power8_event_codes);
static void
power8_set_pmc(int cpu, int ri, int config)
{
register_t mmcr;
/* Select event */
switch (ri) {
case 0:
case 1:
case 2:
case 3:
mmcr = mfspr(SPR_MMCR1);
mmcr &= ~SPR_MMCR1_P8_PMCNSEL_MASK(ri);
mmcr |= SPR_MMCR1_P8_PMCNSEL(ri, config & ~POWERPC_PMC_ENABLE);
mtspr(SPR_MMCR1, mmcr);
break;
}
/*
* By default, freeze counter in all states.
* If counter is being started, unfreeze it in selected states.
*/
mmcr = mfspr(SPR_MMCR2) | SPR_MMCR2_FCNHSP(ri);
if (config != PMCN_NONE) {
if (config & POWERPC_PMC_USER_ENABLE)
mmcr &= ~(SPR_MMCR2_FCNP0(ri) |
SPR_MMCR2_FCNP1(ri));
if (config & POWERPC_PMC_KERNEL_ENABLE)
mmcr &= ~(SPR_MMCR2_FCNH(ri) |
SPR_MMCR2_FCNS(ri));
}
mtspr(SPR_MMCR2, mmcr);
}
static int
power8_pcpu_init(struct pmc_mdep *md, int cpu)
{
register_t mmcr0;
int i;
powerpc_pcpu_init(md, cpu);
/* Freeze all counters before modifying PMC registers */
mmcr0 = mfspr(SPR_MMCR0) | SPR_MMCR0_FC;
mtspr(SPR_MMCR0, mmcr0);
/*
* Now setup MMCR0:
* - PMAO=0: clear alerts
* - FCPC=0, FCP=0: don't freeze counters in problem state
* - FCECE: Freeze Counters on Enabled Condition or Event
* - PMC1CE/PMCNCE: PMC1/N Condition Enable
*/
mmcr0 &= ~(SPR_MMCR0_PMAO | SPR_MMCR0_FCPC | SPR_MMCR0_FCP);
mmcr0 |= SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE;
mtspr(SPR_MMCR0, mmcr0);
/* Clear all PMCs to prevent enabled condition interrupts */
for (i = 0; i < POWER8_MAX_PMCS; i++)
powerpc_pmcn_write(i, 0);
/* Disable events in PMCs 1-4 */
mtspr(SPR_MMCR1, mfspr(SPR_MMCR1) & ~SPR_MMCR1_P8_PMCSEL_ALL);
/* Freeze each counter, in all states */
mtspr(SPR_MMCR2, mfspr(SPR_MMCR2) |
SPR_MMCR2_FCNHSP(0) | SPR_MMCR2_FCNHSP(1) | SPR_MMCR2_FCNHSP(2) |
SPR_MMCR2_FCNHSP(3) | SPR_MMCR2_FCNHSP(4) | SPR_MMCR2_FCNHSP(5));
/* Enable interrupts, unset global freeze */
mmcr0 &= ~SPR_MMCR0_FC;
mmcr0 |= SPR_MMCR0_PMAE;
mtspr(SPR_MMCR0, mmcr0);
return (0);
}
static int
power8_pcpu_fini(struct pmc_mdep *md, int cpu)
{
register_t mmcr0;
/* Freeze counters, disable interrupts */
mmcr0 = mfspr(SPR_MMCR0);
mmcr0 &= ~SPR_MMCR0_PMAE;
mmcr0 |= SPR_MMCR0_FC;
mtspr(SPR_MMCR0, mmcr0);
return (powerpc_pcpu_fini(md, cpu));
}
static void
power8_resume_pmc(bool ie)
{
register_t mmcr0;
/* Unfreeze counters and re-enable PERF exceptions if requested. */
mmcr0 = mfspr(SPR_MMCR0);
mmcr0 &= ~(SPR_MMCR0_FC | SPR_MMCR0_PMAO | SPR_MMCR0_PMAE);
if (ie)
mmcr0 |= SPR_MMCR0_PMAE;
mtspr(SPR_MMCR0, mmcr0);
}
int
pmc_power8_initialize(struct pmc_mdep *pmc_mdep)
{
struct pmc_classdep *pcd;
pmc_mdep->pmd_cputype = PMC_CPU_PPC_POWER8;
pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC];
pcd->pcd_caps = POWERPC_PMC_CAPS;
pcd->pcd_class = PMC_CLASS_POWER8;
pcd->pcd_num = POWER8_MAX_PMCS;
pcd->pcd_ri = pmc_mdep->pmd_npmc;
pcd->pcd_width = 32;
pcd->pcd_pcpu_init = power8_pcpu_init;
pcd->pcd_pcpu_fini = power8_pcpu_fini;
pcd->pcd_allocate_pmc = powerpc_allocate_pmc;
pcd->pcd_release_pmc = powerpc_release_pmc;
pcd->pcd_start_pmc = powerpc_start_pmc;
pcd->pcd_stop_pmc = powerpc_stop_pmc;
pcd->pcd_get_config = powerpc_get_config;
pcd->pcd_config_pmc = powerpc_config_pmc;
pcd->pcd_describe = powerpc_describe;
pcd->pcd_read_pmc = powerpc_read_pmc;
pcd->pcd_write_pmc = powerpc_write_pmc;
pmc_mdep->pmd_npmc += POWER8_MAX_PMCS;
pmc_mdep->pmd_intr = powerpc_pmc_intr;
ppc_event_codes = power8_event_codes;
ppc_event_codes_size = power8_event_codes_size;
ppc_event_first = PMC_EV_POWER8_FIRST;
ppc_event_last = PMC_EV_POWER8_LAST;
ppc_max_pmcs = POWER8_MAX_PMCS;
powerpc_set_pmc = power8_set_pmc;
powerpc_pmcn_read = powerpc_pmcn_read_default;
powerpc_pmcn_write = powerpc_pmcn_write_default;
powerpc_resume_pmc = power8_resume_pmc;
return (0);
}

View File

@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/sysent.h>
#include <sys/syslog.h>
#include <sys/systm.h>
#include <machine/pmc_mdep.h>
@ -53,6 +54,17 @@ __FBSDID("$FreeBSD$");
#endif
struct powerpc_cpu **powerpc_pcpu;
struct pmc_ppc_event *ppc_event_codes;
size_t ppc_event_codes_size;
int ppc_event_first;
int ppc_event_last;
int ppc_max_pmcs;
void (*powerpc_set_pmc)(int cpu, int ri, int config);
pmc_value_t (*powerpc_pmcn_read)(unsigned int pmc);
void (*powerpc_pmcn_write)(unsigned int pmc, uint32_t val);
void (*powerpc_resume_pmc)(bool ie);
int
pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
@ -142,6 +154,398 @@ powerpc_get_config(int cpu, int ri, struct pmc **ppm)
return (0);
}
int
powerpc_pcpu_init(struct pmc_mdep *md, int cpu)
{
struct pmc_cpu *pc;
struct powerpc_cpu *pac;
struct pmc_hw *phw;
int first_ri, i;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu);
powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
M_WAITOK|M_ZERO);
pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * ppc_max_pmcs,
M_PMC, M_WAITOK|M_ZERO);
pac->pc_class =
md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_class;
pc = pmc_pcpu[cpu];
first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri;
KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
for (i = 0, phw = pac->pc_ppcpmcs; i < ppc_max_pmcs; i++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
phw->phw_pmc = NULL;
pc->pc_hwpmcs[i + first_ri] = phw;
}
return (0);
}
int
powerpc_pcpu_fini(struct pmc_mdep *md, int cpu)
{
PMCDBG1(MDP,INI,1,"powerpc-fini cpu=%d", cpu);
free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
free(powerpc_pcpu[cpu], M_PMC);
return (0);
}
int
powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
enum pmc_event pe;
uint32_t caps, config = 0, counter = 0;
int i;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < ppc_max_pmcs,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
caps = a->pm_caps;
pe = a->pm_ev;
if (pe < ppc_event_first || pe > ppc_event_last)
return (EINVAL);
for (i = 0; i < ppc_event_codes_size; i++) {
if (ppc_event_codes[i].pe_event == pe) {
config = ppc_event_codes[i].pe_code;
counter = ppc_event_codes[i].pe_flags;
break;
}
}
if (i == ppc_event_codes_size)
return (EINVAL);
if ((counter & (1 << ri)) == 0)
return (EINVAL);
if (caps & PMC_CAP_SYSTEM)
config |= POWERPC_PMC_KERNEL_ENABLE;
if (caps & PMC_CAP_USER)
config |= POWERPC_PMC_USER_ENABLE;
if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
config |= POWERPC_PMC_ENABLE;
pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
PMCDBG3(MDP,ALL,1,"powerpc-allocate cpu=%d ri=%d -> config=0x%x",
cpu, ri, config);
return (0);
}
int
powerpc_release_pmc(int cpu, int ri, struct pmc *pmc)
{
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < ppc_max_pmcs,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
return (0);
}
int
powerpc_start_pmc(int cpu, int ri)
{
struct pmc *pm;
PMCDBG2(MDP,STA,1,"powerpc-start cpu=%d ri=%d", cpu, ri);
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
powerpc_set_pmc(cpu, ri, pm->pm_md.pm_powerpc.pm_powerpc_evsel);
return (0);
}
int
powerpc_stop_pmc(int cpu, int ri)
{
PMCDBG2(MDP,STO,1, "powerpc-stop cpu=%d ri=%d", cpu, ri);
powerpc_set_pmc(cpu, ri, PMCN_NONE);
return (0);
}
int
powerpc_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
PMCDBG3(MDP,CFG,1, "powerpc-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < ppc_max_pmcs,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(pm == NULL || phw->phw_pmc == NULL,
("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
__LINE__, pm, phw->phw_pmc));
phw->phw_pmc = pm;
return (0);
}
pmc_value_t
powerpc_pmcn_read_default(unsigned int pmc)
{
pmc_value_t val;
if (pmc > ppc_max_pmcs)
panic("Invalid PMC number: %d\n", pmc);
switch (pmc) {
case 0:
val = mfspr(SPR_PMC1);
break;
case 1:
val = mfspr(SPR_PMC2);
break;
case 2:
val = mfspr(SPR_PMC3);
break;
case 3:
val = mfspr(SPR_PMC4);
break;
case 4:
val = mfspr(SPR_PMC5);
break;
case 5:
val = mfspr(SPR_PMC6);
break;
case 6:
val = mfspr(SPR_PMC7);
break;
case 7:
val = mfspr(SPR_PMC8);
break;
}
return (val);
}
void
powerpc_pmcn_write_default(unsigned int pmc, uint32_t val)
{
if (pmc > ppc_max_pmcs)
panic("Invalid PMC number: %d\n", pmc);
switch (pmc) {
case 0:
mtspr(SPR_PMC1, val);
break;
case 1:
mtspr(SPR_PMC2, val);
break;
case 2:
mtspr(SPR_PMC3, val);
break;
case 3:
mtspr(SPR_PMC4, val);
break;
case 4:
mtspr(SPR_PMC5, val);
break;
case 5:
mtspr(SPR_PMC6, val);
break;
case 6:
mtspr(SPR_PMC7, val);
break;
case 7:
mtspr(SPR_PMC8, val);
break;
}
}
int
powerpc_read_pmc(int cpu, int ri, pmc_value_t *v)
{
struct pmc *pm;
pmc_value_t p, r, tmp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < ppc_max_pmcs,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
KASSERT(pm,
("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
ri));
/*
* After an interrupt occurs because of a PMC overflow, the PMC value
* is not always MAX_PMC_VALUE + 1, but may be a little above it.
* This may mess up calculations and frustrate machine independent
* layer expectations, such as that no value read should be greater
* than reload count in sampling mode.
* To avoid these issues, use MAX_PMC_VALUE as an upper limit.
*/
p = MIN(powerpc_pmcn_read(ri), POWERPC_MAX_PMC_VALUE);
r = pm->pm_sc.pm_reloadcount;
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
/*
* Special case 1: r is too big
* This usually happens when a PMC write fails, the PMC is
* stopped and then it is read.
*
* Special case 2: PMC was reseted or has a value
* that should not be possible with current r.
*
* In the above cases, just return 0 instead of an arbitrary
* value.
*/
if (r > POWERPC_MAX_PMC_VALUE || p + r <= POWERPC_MAX_PMC_VALUE)
tmp = 0;
else
tmp = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(p);
} else
tmp = p + (POWERPC_MAX_PMC_VALUE + 1) * PPC_OVERFLOWCNT(pm);
PMCDBG5(MDP,REA,1,"ppc-read cpu=%d ri=%d -> %jx (%jx,%jx)",
cpu, ri, (uintmax_t)tmp, (uintmax_t)PPC_OVERFLOWCNT(pm),
(uintmax_t)p);
*v = tmp;
return (0);
}
int
powerpc_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct pmc *pm;
pmc_value_t vlo;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < ppc_max_pmcs,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
if (PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) {
PPC_OVERFLOWCNT(pm) = v / (POWERPC_MAX_PMC_VALUE + 1);
vlo = v % (POWERPC_MAX_PMC_VALUE + 1);
} else if (v > POWERPC_MAX_PMC_VALUE) {
PMCDBG3(MDP,WRI,2,
"powerpc-write cpu=%d ri=%d: PMC value is too big: %jx",
cpu, ri, (uintmax_t)v);
return (EINVAL);
} else
vlo = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
PMCDBG5(MDP,WRI,1,"powerpc-write cpu=%d ri=%d -> %jx (%jx,%jx)",
cpu, ri, (uintmax_t)v, (uintmax_t)PPC_OVERFLOWCNT(pm),
(uintmax_t)vlo);
powerpc_pmcn_write(ri, vlo);
return (0);
}
int
powerpc_pmc_intr(struct trapframe *tf)
{
struct pmc *pm;
struct powerpc_cpu *pc;
int cpu, error, i, retval;
cpu = curcpu;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
TRAPF_USERMODE(tf));
retval = 0;
pc = powerpc_pcpu[cpu];
/*
* Look for a running, sampling PMC which has overflowed
* and which has a valid 'struct pmc' association.
*/
for (i = 0; i < ppc_max_pmcs; i++) {
if (!POWERPC_PMC_HAS_OVERFLOWED(i))
continue;
retval = 1; /* Found an interrupting PMC. */
/*
* Always clear the PMC, to make it stop interrupting.
* If pm is available and in sampling mode, use reload
* count, to make PMC read after stop correct.
* Otherwise, just reset the PMC.
*/
if ((pm = pc->pc_ppcpmcs[i].phw_pmc) != NULL &&
PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
if (pm->pm_state != PMC_STATE_RUNNING) {
powerpc_write_pmc(cpu, i,
pm->pm_sc.pm_reloadcount);
continue;
}
} else {
if (pm != NULL) { /* !PMC_IS_SAMPLING_MODE */
PPC_OVERFLOWCNT(pm) = (PPC_OVERFLOWCNT(pm) +
1) % PPC_OVERFLOWCNT_MAX;
PMCDBG3(MDP,INT,2,
"cpu=%d ri=%d: overflowcnt=%d",
cpu, i, PPC_OVERFLOWCNT(pm));
}
powerpc_pmcn_write(i, 0);
continue;
}
error = pmc_process_interrupt(PMC_HR, pm, tf);
if (error != 0) {
PMCDBG3(MDP,INT,3,
"cpu=%d ri=%d: error %d processing interrupt",
cpu, i, error);
powerpc_stop_pmc(cpu, i);
}
/* Reload sampling count */
powerpc_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
}
if (retval)
counter_u64_add(pmc_stats.pm_intr_processed, 1);
else
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
/*
* Re-enable PERF exceptions if we were able to find the interrupt
* source and handle it. Otherwise, it's better to disable PERF
* interrupts, to avoid the risk of processing the same interrupt
* forever.
*/
powerpc_resume_pmc(retval != 0);
if (retval == 0)
log(LOG_WARNING,
"pmc_intr: couldn't find interrupting PMC on cpu %d - "
"disabling PERF interrupts\n", cpu);
return (retval);
}
struct pmc_mdep *
pmc_md_initialize()
{
@ -177,6 +581,12 @@ pmc_md_initialize()
case IBM970MP:
error = pmc_ppc970_initialize(pmc_mdep);
break;
case IBMPOWER8E:
case IBMPOWER8NVL:
case IBMPOWER8:
case IBMPOWER9:
error = pmc_power8_initialize(pmc_mdep);
break;
case FSL_E500v1:
case FSL_E500v2:
case FSL_E500mc:

View File

@ -46,19 +46,69 @@
#define POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (0x80000000-(V))
#define POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (0x80000000-(P))
#define POWERPC_MAX_PMC_VALUE 0x7fffffffUL
#define POWERPC_PMC_HAS_OVERFLOWED(n) (powerpc_pmcn_read(n) & (0x1 << 31))
/*
* PMC value is used with OVERFLOWCNT to simulate a 64-bit counter to the
* machine independent part of hwpmc.
*/
#define PPC_OVERFLOWCNT(pm) (pm)->pm_md.pm_powerpc.pm_powerpc_overflowcnt
#define PPC_OVERFLOWCNT_MAX 0x200000000UL
struct powerpc_cpu {
struct pmc_hw *pc_ppcpmcs;
enum pmc_class pc_class;
};
struct pmc_ppc_event {
enum pmc_event pe_event;
uint32_t pe_flags;
#define PMC_FLAG_PMC1 0x01
#define PMC_FLAG_PMC2 0x02
#define PMC_FLAG_PMC3 0x04
#define PMC_FLAG_PMC4 0x08
#define PMC_FLAG_PMC5 0x10
#define PMC_FLAG_PMC6 0x20
#define PMC_FLAG_PMC7 0x40
#define PMC_FLAG_PMC8 0x80
uint32_t pe_code;
};
extern struct powerpc_cpu **powerpc_pcpu;
extern struct pmc_ppc_event *ppc_event_codes;
extern size_t ppc_event_codes_size;
extern int ppc_event_first;
extern int ppc_event_last;
extern int ppc_max_pmcs;
extern int pmc_e500_initialize(struct pmc_mdep *pmc_mdep);
extern int pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep);
extern int pmc_ppc970_initialize(struct pmc_mdep *pmc_mdep);
extern void (*powerpc_set_pmc)(int cpu, int ri, int config);
extern pmc_value_t (*powerpc_pmcn_read)(unsigned int pmc);
extern void (*powerpc_pmcn_write)(unsigned int pmc, uint32_t val);
extern void (*powerpc_resume_pmc)(bool ie);
int pmc_e500_initialize(struct pmc_mdep *pmc_mdep);
int pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep);
int pmc_ppc970_initialize(struct pmc_mdep *pmc_mdep);
int pmc_power8_initialize(struct pmc_mdep *pmc_mdep);
int powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc);
int powerpc_get_config(int cpu, int ri, struct pmc **ppm);
int powerpc_pcpu_init(struct pmc_mdep *md, int cpu);
int powerpc_pcpu_fini(struct pmc_mdep *md, int cpu);
int powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a);
int powerpc_release_pmc(int cpu, int ri, struct pmc *pmc);
int powerpc_start_pmc(int cpu, int ri);
int powerpc_stop_pmc(int cpu, int ri);
int powerpc_config_pmc(int cpu, int ri, struct pmc *pm);
pmc_value_t powerpc_pmcn_read_default(unsigned int pmc);
void powerpc_pmcn_write_default(unsigned int pmc, uint32_t val);
int powerpc_read_pmc(int cpu, int ri, pmc_value_t *v);
int powerpc_write_pmc(int cpu, int ri, pmc_value_t v);
int powerpc_pmc_intr(struct trapframe *tf);
extern int powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc);
extern int powerpc_get_config(int cpu, int ri, struct pmc **ppm);
#endif /* _KERNEL */
#endif /* _DEV_HWPMC_POWERPC_H_ */

View File

@ -41,7 +41,8 @@ __FBSDID("$FreeBSD$");
#include "hwpmc_powerpc.h"
#define PPC970_MAX_PMCS 8
#define PPC970_MAX_PMCS 8
#define PMC_PPC970_FLAG_PMCS 0x000000ff
/* MMCR0, PMC1 is 8 bytes in, PMC2 is 1 byte in. */
#define PPC970_SET_MMCR0_PMCSEL(r, x, i) \
@ -50,8 +51,6 @@ __FBSDID("$FreeBSD$");
#define PPC970_SET_MMCR1_PMCSEL(r, x, i) \
((r & ~(0x1f << (5 * (7 - i) + 2))) | (x << (5 * (7 - i) + 2)))
#define PPC970_PMC_HAS_OVERFLOWED(x) (ppc970_pmcn_read(x) & (0x1 << 31))
/* How PMC works on PPC970:
*
* Any PMC can count a direct event. Indirect events are handled specially.
@ -90,40 +89,25 @@ __FBSDID("$FreeBSD$");
* Add byte lane for PMC (above), bit 0+4, 1+5, 2+6, 3+7
*/
struct pmc_ppc970_event {
enum pmc_event pe_event;
uint32_t pe_flags;
#define PMC_PPC970_FLAG_PMCS 0x000000ff
#define PMC_PPC970_FLAG_PMC1 0x01
#define PMC_PPC970_FLAG_PMC2 0x02
#define PMC_PPC970_FLAG_PMC3 0x04
#define PMC_PPC970_FLAG_PMC4 0x08
#define PMC_PPC970_FLAG_PMC5 0x10
#define PMC_PPC970_FLAG_PMC6 0x20
#define PMC_PPC970_FLAG_PMC7 0x40
#define PMC_PPC970_FLAG_PMC8 0x80
uint32_t pe_code;
};
static struct pmc_ppc970_event ppc970_event_codes[] = {
static struct pmc_ppc_event ppc970_event_codes[] = {
{PMC_EV_PPC970_INSTR_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMCS,
.pe_code = 0x09
},
{PMC_EV_PPC970_MARKED_GROUP_DISPATCH,
.pe_flags = PMC_PPC970_FLAG_PMC1,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0x2
},
{PMC_EV_PPC970_MARKED_STORE_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMC1,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0x03
},
{PMC_EV_PPC970_GCT_EMPTY,
.pe_flags = PMC_PPC970_FLAG_PMC1,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0x04
},
{PMC_EV_PPC970_RUN_CYCLES,
.pe_flags = PMC_PPC970_FLAG_PMC1,
.pe_flags = PMC_FLAG_PMC1,
.pe_code = 0x05
},
{PMC_EV_PPC970_OVERFLOW,
@ -135,123 +119,123 @@ static struct pmc_ppc970_event ppc970_event_codes[] = {
.pe_code = 0x0f
},
{PMC_EV_PPC970_THRESHOLD_TIMEOUT,
.pe_flags = PMC_PPC970_FLAG_PMC2,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0x3
},
{PMC_EV_PPC970_GROUP_DISPATCH,
.pe_flags = PMC_PPC970_FLAG_PMC2,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0x4
},
{PMC_EV_PPC970_BR_MARKED_INSTR_FINISH,
.pe_flags = PMC_PPC970_FLAG_PMC2,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0x5
},
{PMC_EV_PPC970_GCT_EMPTY_BY_SRQ_FULL,
.pe_flags = PMC_PPC970_FLAG_PMC2,
.pe_flags = PMC_FLAG_PMC2,
.pe_code = 0xb
},
{PMC_EV_PPC970_STOP_COMPLETION,
.pe_flags = PMC_PPC970_FLAG_PMC3,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0x1
},
{PMC_EV_PPC970_LSU_EMPTY,
.pe_flags = PMC_PPC970_FLAG_PMC3,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0x2
},
{PMC_EV_PPC970_MARKED_STORE_WITH_INTR,
.pe_flags = PMC_PPC970_FLAG_PMC3,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0x3
},
{PMC_EV_PPC970_CYCLES_IN_SUPER,
.pe_flags = PMC_PPC970_FLAG_PMC3,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0x4
},
{PMC_EV_PPC970_VPU_MARKED_INSTR_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMC3,
.pe_flags = PMC_FLAG_PMC3,
.pe_code = 0x5
},
{PMC_EV_PPC970_FXU0_IDLE_FXU1_BUSY,
.pe_flags = PMC_PPC970_FLAG_PMC4,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0x2
},
{PMC_EV_PPC970_SRQ_EMPTY,
.pe_flags = PMC_PPC970_FLAG_PMC4,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0x3
},
{PMC_EV_PPC970_MARKED_GROUP_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMC4,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0x4
},
{PMC_EV_PPC970_CR_MARKED_INSTR_FINISH,
.pe_flags = PMC_PPC970_FLAG_PMC4,
.pe_flags = PMC_FLAG_PMC4,
.pe_code = 0x5
},
{PMC_EV_PPC970_DISPATCH_SUCCESS,
.pe_flags = PMC_PPC970_FLAG_PMC5,
.pe_flags = PMC_FLAG_PMC5,
.pe_code = 0x1
},
{PMC_EV_PPC970_FXU0_IDLE_FXU1_IDLE,
.pe_flags = PMC_PPC970_FLAG_PMC5,
.pe_flags = PMC_FLAG_PMC5,
.pe_code = 0x2
},
{PMC_EV_PPC970_ONE_PLUS_INSTR_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMC5,
.pe_flags = PMC_FLAG_PMC5,
.pe_code = 0x3
},
{PMC_EV_PPC970_GROUP_MARKED_IDU,
.pe_flags = PMC_PPC970_FLAG_PMC5,
.pe_flags = PMC_FLAG_PMC5,
.pe_code = 0x4
},
{PMC_EV_PPC970_MARKED_GROUP_COMPLETE_TIMEOUT,
.pe_flags = PMC_PPC970_FLAG_PMC5,
.pe_flags = PMC_FLAG_PMC5,
.pe_code = 0x5
},
{PMC_EV_PPC970_FXU0_BUSY_FXU1_BUSY,
.pe_flags = PMC_PPC970_FLAG_PMC6,
.pe_flags = PMC_FLAG_PMC6,
.pe_code = 0x2
},
{PMC_EV_PPC970_MARKED_STORE_SENT_TO_STS,
.pe_flags = PMC_PPC970_FLAG_PMC6,
.pe_flags = PMC_FLAG_PMC6,
.pe_code = 0x3
},
{PMC_EV_PPC970_FXU_MARKED_INSTR_FINISHED,
.pe_flags = PMC_PPC970_FLAG_PMC6,
.pe_flags = PMC_FLAG_PMC6,
.pe_code = 0x4
},
{PMC_EV_PPC970_MARKED_GROUP_ISSUED,
.pe_flags = PMC_PPC970_FLAG_PMC6,
.pe_flags = PMC_FLAG_PMC6,
.pe_code = 0x5
},
{PMC_EV_PPC970_FXU0_BUSY_FXU1_IDLE,
.pe_flags = PMC_PPC970_FLAG_PMC7,
.pe_flags = PMC_FLAG_PMC7,
.pe_code = 0x2
},
{PMC_EV_PPC970_GROUP_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMC7,
.pe_flags = PMC_FLAG_PMC7,
.pe_code = 0x3
},
{PMC_EV_PPC970_FPU_MARKED_INSTR_COMPLETED,
.pe_flags = PMC_PPC970_FLAG_PMC7,
.pe_flags = PMC_FLAG_PMC7,
.pe_code = 0x4
},
{PMC_EV_PPC970_MARKED_INSTR_FINISH_ANY_UNIT,
.pe_flags = PMC_PPC970_FLAG_PMC7,
.pe_flags = PMC_FLAG_PMC7,
.pe_code = 0x5
},
{PMC_EV_PPC970_EXTERNAL_INTERRUPT,
.pe_flags = PMC_PPC970_FLAG_PMC8,
.pe_flags = PMC_FLAG_PMC8,
.pe_code = 0x2
},
{PMC_EV_PPC970_GROUP_DISPATCH_REJECT,
.pe_flags = PMC_PPC970_FLAG_PMC8,
.pe_flags = PMC_FLAG_PMC8,
.pe_code = 0x3
},
{PMC_EV_PPC970_LSU_MARKED_INSTR_FINISH,
.pe_flags = PMC_PPC970_FLAG_PMC8,
.pe_flags = PMC_FLAG_PMC8,
.pe_code = 0x4
},
{PMC_EV_PPC970_TIMEBASE_EVENT,
.pe_flags = PMC_PPC970_FLAG_PMC8,
.pe_flags = PMC_FLAG_PMC8,
.pe_code = 0x5
},
#if 0
@ -269,108 +253,26 @@ static struct pmc_ppc970_event ppc970_event_codes[] = {
};
static size_t ppc970_event_codes_size = nitems(ppc970_event_codes);
static pmc_value_t
ppc970_pmcn_read(unsigned int pmc)
{
pmc_value_t val;
switch (pmc) {
case 0:
val = mfspr(SPR_PMC1);
break;
case 1:
val = mfspr(SPR_PMC2);
break;
case 2:
val = mfspr(SPR_PMC3);
break;
case 3:
val = mfspr(SPR_PMC4);
break;
case 4:
val = mfspr(SPR_PMC5);
break;
case 5:
val = mfspr(SPR_PMC6);
break;
case 6:
val = mfspr(SPR_PMC7);
break;
case 7:
val = mfspr(SPR_PMC8);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
}
return (val);
}
static void
ppc970_pmcn_write(unsigned int pmc, uint32_t val)
{
switch (pmc) {
case 0:
mtspr(SPR_PMC1, val);
break;
case 1:
mtspr(SPR_PMC2, val);
break;
case 2:
mtspr(SPR_PMC3, val);
break;
case 3:
mtspr(SPR_PMC4, val);
break;
case 4:
mtspr(SPR_PMC5, val);
break;
case 5:
mtspr(SPR_PMC6, val);
break;
case 6:
mtspr(SPR_PMC7, val);
break;
case 7:
mtspr(SPR_PMC8, val);
break;
default:
panic("Invalid PMC number: %d\n", pmc);
}
}
static int
ppc970_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(pm == NULL || phw->phw_pmc == NULL,
("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
__LINE__, pm, phw->phw_pmc));
phw->phw_pmc = pm;
return 0;
}
static int
ppc970_set_pmc(int cpu, int ri, int config)
{
struct pmc *pm;
struct pmc_hw *phw;
register_t pmc_mmcr;
int config_mask;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
if (config == PMCN_NONE)
config = PMC970N_NONE;
/*
* The mask is inverted (enable is 1) compared to the flags in MMCR0,
* which are Freeze flags.
*/
config_mask = ~config & POWERPC_PMC_ENABLE;
config &= ~POWERPC_PMC_ENABLE;
/*
* Disable the PMCs.
@ -393,181 +295,19 @@ ppc970_set_pmc(int cpu, int ri, int config)
mtspr(SPR_MMCR1, pmc_mmcr);
break;
}
return 0;
}
static int
ppc970_start_pmc(int cpu, int ri)
{
struct pmc *pm;
struct pmc_hw *phw;
register_t pmc_mmcr;
uint32_t config;
int error;
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
pm = phw->phw_pmc;
config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
error = ppc970_set_pmc(cpu, ri, config);
/* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
* are Freeze flags.
*/
config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
pmc_mmcr = mfspr(SPR_MMCR0);
pmc_mmcr &= ~SPR_MMCR0_FC;
pmc_mmcr |= config;
mtspr(SPR_MMCR0, pmc_mmcr);
return 0;
}
static int
ppc970_stop_pmc(int cpu, int ri)
{
return ppc970_set_pmc(cpu, ri, PMC970N_NONE);
}
static int
ppc970_read_pmc(int cpu, int ri, pmc_value_t *v)
{
struct pmc *pm;
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
KASSERT(pm,
("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
ri));
tmp = ppc970_pmcn_read(ri);
PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
*v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
else
*v = tmp;
return 0;
}
static int
ppc970_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct pmc *pm;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
ppc970_pmcn_write(ri, v);
return 0;
}
static int
ppc970_intr(struct trapframe *tf)
{
struct pmc *pm;
struct powerpc_cpu *pac;
uint32_t config;
int i, error, retval, cpu;
cpu = curcpu;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
TRAPF_USERMODE(tf));
retval = 0;
pac = powerpc_pcpu[cpu];
/*
* look for all PMCs that have interrupted:
* - look for a running, sampling PMC which has overflowed
* and which has a valid 'struct pmc' association
*
* If found, we call a helper to process the interrupt.
*/
config = mfspr(SPR_MMCR0) & ~SPR_MMCR0_FC;
for (i = 0; i < PPC970_MAX_PMCS; i++) {
if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
continue;
}
if (!PPC970_PMC_HAS_OVERFLOWED(i))
continue;
retval = 1; /* Found an interrupting PMC. */
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
error = pmc_process_interrupt(PMC_HR, pm, tf);
if (error != 0)
ppc970_stop_pmc(cpu, i);
/* reload sampling count. */
ppc970_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
if (config != PMC970N_NONE) {
pmc_mmcr = mfspr(SPR_MMCR0);
pmc_mmcr &= ~SPR_MMCR0_FC;
pmc_mmcr |= config_mask;
mtspr(SPR_MMCR0, pmc_mmcr);
}
if (retval)
counter_u64_add(pmc_stats.pm_intr_processed, 1);
else
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
/* Re-enable PERF exceptions. */
if (retval)
mtspr(SPR_MMCR0, config | SPR_MMCR0_PMXE);
return (retval);
}
static int
ppc970_pcpu_init(struct pmc_mdep *md, int cpu)
{
struct pmc_cpu *pc;
struct powerpc_cpu *pac;
struct pmc_hw *phw;
int first_ri, i;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu);
powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
M_WAITOK|M_ZERO);
pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * PPC970_MAX_PMCS,
M_PMC, M_WAITOK|M_ZERO);
pac->pc_class = PMC_CLASS_PPC970;
pc = pmc_pcpu[cpu];
first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri;
KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
for (i = 0, phw = pac->pc_ppcpmcs; i < PPC970_MAX_PMCS; i++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
phw->phw_pmc = NULL;
pc->pc_hwpmcs[i + first_ri] = phw;
}
powerpc_pcpu_init(md, cpu);
/* Clear the MMCRs, and set FC, to disable all PMCs. */
/* 970 PMC is not counted when set to 0x08 */
@ -576,86 +316,34 @@ ppc970_pcpu_init(struct pmc_mdep *md, int cpu)
SPR_MMCR0_PMC1SEL(0x8) | SPR_MMCR0_PMC2SEL(0x8));
mtspr(SPR_MMCR1, 0x4218420);
return 0;
return (0);
}
static int
ppc970_pcpu_fini(struct pmc_mdep *md, int cpu)
{
register_t mmcr0 = mfspr(SPR_MMCR0);
register_t mmcr0;
mmcr0 |= SPR_MMCR0_FC;
/* Freeze counters, disable interrupts */
mmcr0 = mfspr(SPR_MMCR0);
mmcr0 &= ~SPR_MMCR0_PMXE;
mmcr0 |= SPR_MMCR0_FC;
mtspr(SPR_MMCR0, mmcr0);
free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
free(powerpc_pcpu[cpu], M_PMC);
return 0;
return (powerpc_pcpu_fini(md, cpu));
}
static int
ppc970_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
static void
ppc970_resume_pmc(bool ie)
{
enum pmc_event pe;
uint32_t caps, config = 0, counter = 0;
int i;
register_t mmcr0;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS,
("[powerpc,%d] illegal row index %d", __LINE__, ri));
caps = a->pm_caps;
pe = a->pm_ev;
if (pe < PMC_EV_PPC970_FIRST || pe > PMC_EV_PPC970_LAST)
return (EINVAL);
for (i = 0; i < ppc970_event_codes_size; i++) {
if (ppc970_event_codes[i].pe_event == pe) {
config = ppc970_event_codes[i].pe_code;
counter = ppc970_event_codes[i].pe_flags;
break;
}
}
if (i == ppc970_event_codes_size)
return (EINVAL);
if ((counter & (1 << ri)) == 0)
return (EINVAL);
if (caps & PMC_CAP_SYSTEM)
config |= POWERPC_PMC_KERNEL_ENABLE;
if (caps & PMC_CAP_USER)
config |= POWERPC_PMC_USER_ENABLE;
if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
config |= POWERPC_PMC_ENABLE;
pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
PMCDBG2(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
return 0;
}
static int
ppc970_release_pmc(int cpu, int ri, struct pmc *pmc)
{
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS,
("[powerpc,%d] illegal row-index %d", __LINE__, ri));
phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
return 0;
/* Unfreeze counters and re-enable PERF exceptions if requested. */
mmcr0 = mfspr(SPR_MMCR0);
mmcr0 &= ~(SPR_MMCR0_FC | SPR_MMCR0_PMXE);
if (ie)
mmcr0 |= SPR_MMCR0_PMXE;
mtspr(SPR_MMCR0, mmcr0);
}
int
@ -672,20 +360,31 @@ pmc_ppc970_initialize(struct pmc_mdep *pmc_mdep)
pcd->pcd_ri = pmc_mdep->pmd_npmc;
pcd->pcd_width = 32;
pcd->pcd_allocate_pmc = ppc970_allocate_pmc;
pcd->pcd_config_pmc = ppc970_config_pmc;
pcd->pcd_allocate_pmc = powerpc_allocate_pmc;
pcd->pcd_config_pmc = powerpc_config_pmc;
pcd->pcd_pcpu_fini = ppc970_pcpu_fini;
pcd->pcd_pcpu_init = ppc970_pcpu_init;
pcd->pcd_describe = powerpc_describe;
pcd->pcd_get_config = powerpc_get_config;
pcd->pcd_read_pmc = ppc970_read_pmc;
pcd->pcd_release_pmc = ppc970_release_pmc;
pcd->pcd_start_pmc = ppc970_start_pmc;
pcd->pcd_stop_pmc = ppc970_stop_pmc;
pcd->pcd_write_pmc = ppc970_write_pmc;
pcd->pcd_read_pmc = powerpc_read_pmc;
pcd->pcd_release_pmc = powerpc_release_pmc;
pcd->pcd_start_pmc = powerpc_start_pmc;
pcd->pcd_stop_pmc = powerpc_stop_pmc;
pcd->pcd_write_pmc = powerpc_write_pmc;
pmc_mdep->pmd_npmc += PPC970_MAX_PMCS;
pmc_mdep->pmd_intr = ppc970_intr;
pmc_mdep->pmd_intr = powerpc_pmc_intr;
ppc_event_codes = ppc970_event_codes;
ppc_event_codes_size = ppc970_event_codes_size;
ppc_event_first = PMC_EV_PPC970_FIRST;
ppc_event_last = PMC_EV_PPC970_LAST;
ppc_max_pmcs = PPC970_MAX_PMCS;
powerpc_set_pmc = ppc970_set_pmc;
powerpc_pmcn_read = powerpc_pmcn_read_default;
powerpc_pmcn_write = powerpc_pmcn_write_default;
powerpc_resume_pmc = ppc970_resume_pmc;
return (0);
}

View File

@ -1686,6 +1686,41 @@ __PMC_EV_ALIAS("unhalted-core-cycles", IAP_ARCH_UNH_COR_CYC)
#define PMC_EV_PPC970_FIRST PMC_EV_PPC970_INSTR_COMPLETED
#define PMC_EV_PPC970_LAST PMC_EV_PPC970_ADDER
#define __PMC_EV_POWER8() \
__PMC_EV(POWER8, CYCLES) \
__PMC_EV(POWER8, CYCLES_WITH_INSTRS_COMPLETED) \
__PMC_EV(POWER8, FPU_INSTR_COMPLETED) \
__PMC_EV(POWER8, ERAT_INSTR_MISS) \
__PMC_EV(POWER8, CYCLES_IDLE) \
__PMC_EV(POWER8, CYCLES_WITH_ANY_THREAD_RUNNING) \
__PMC_EV(POWER8, STORE_COMPLETED) \
__PMC_EV(POWER8, INSTR_DISPATCHED) \
__PMC_EV(POWER8, CYCLES_RUNNING) \
__PMC_EV(POWER8, ERAT_DATA_MISS) \
__PMC_EV(POWER8, EXTERNAL_INTERRUPT) \
__PMC_EV(POWER8, BRANCH_TAKEN) \
__PMC_EV(POWER8, L1_INSTR_MISS) \
__PMC_EV(POWER8, L2_LOAD_MISS) \
__PMC_EV(POWER8, STORE_NO_REAL_ADDR) \
__PMC_EV(POWER8, INSTR_COMPLETED_WITH_ALL_THREADS_RUNNING) \
__PMC_EV(POWER8, L1_LOAD_MISS) \
__PMC_EV(POWER8, TIMEBASE_EVENT) \
__PMC_EV(POWER8, L3_INSTR_MISS) \
__PMC_EV(POWER8, TLB_DATA_MISS) \
__PMC_EV(POWER8, L3_LOAD_MISS) \
__PMC_EV(POWER8, LOAD_NO_REAL_ADDR) \
__PMC_EV(POWER8, CYCLES_WITH_INSTRS_DISPATCHED) \
__PMC_EV(POWER8, CYCLES_RUNNING_PURR_INC) \
__PMC_EV(POWER8, BRANCH_MISPREDICTED) \
__PMC_EV(POWER8, PREFETCHED_INSTRS_DISCARDED) \
__PMC_EV(POWER8, INSTR_COMPLETED_RUNNING) \
__PMC_EV(POWER8, TLB_INSTR_MISS) \
__PMC_EV(POWER8, CACHE_LOAD_MISS) \
__PMC_EV(POWER8, INSTR_COMPLETED)
#define PMC_EV_POWER8_FIRST PMC_EV_POWER8_CYCLES
#define PMC_EV_POWER8_LAST PMC_EV_POWER8_INSTR_COMPLETED
#define __PMC_EV_E500() \
__PMC_EV(E500, CYCLES) \
__PMC_EV(E500, INSTR_COMPLETED) \
@ -1871,6 +1906,7 @@ __PMC_EV_ALIAS("unhalted-core-cycles", IAP_ARCH_UNH_COR_CYC)
* 0x11600 0x00FF BERI statcounters
* 0x13000 0x00FF MPC7450 events
* 0x13100 0x00FF IBM PPC970 events
* 0x13200 0x00FF IBM POWER8 events
* 0x13300 0x00FF Freescale e500 events
* 0x14000 0x0100 ARMv7 events
* 0x14100 0x0100 ARMv8 events
@ -1901,6 +1937,8 @@ __PMC_EV_ALIAS("unhalted-core-cycles", IAP_ARCH_UNH_COR_CYC)
__PMC_EV_PPC7450() \
__PMC_EV_BLOCK(PPC970, 0x13100) \
__PMC_EV_PPC970() \
__PMC_EV_BLOCK(POWER8, 0x13200) \
__PMC_EV_POWER8() \
__PMC_EV_BLOCK(E500, 0x13300) \
__PMC_EV_E500() \
__PMC_EV_BLOCK(ARMV7, 0x14000) \

View File

@ -32,7 +32,8 @@ SRCS+= hwpmc_tsc.c hwpmc_x86.c hwpmc_uncore.c
.endif
.if ${MACHINE_CPUARCH} == "powerpc"
SRCS+= hwpmc_powerpc.c hwpmc_e500.c hwpmc_mpc7xxx.c hwpmc_ppc970.c
SRCS+= hwpmc_powerpc.c hwpmc_e500.c hwpmc_mpc7xxx.c hwpmc_ppc970.c \
hwpmc_power8.c
.endif
.include <bsd.kmod.mk>

View File

@ -79,6 +79,7 @@ union pmc_md_op_pmcallocate {
#if _KERNEL
struct pmc_md_powerpc_pmc {
uint64_t pm_powerpc_overflowcnt;
uint32_t pm_powerpc_evsel;
};

View File

@ -119,6 +119,7 @@ extern char pmc_cpuid[PMC_CPUID_LEN];
__PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \
__PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \
__PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \
__PMC_CPU(PPC_POWER8, 0x390, "IBM POWER8") \
__PMC_CPU(GENERIC, 0x400, "Generic") \
__PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \
__PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \
@ -164,7 +165,8 @@ enum pmc_cputype {
__PMC_CLASS(ARMV8, 0x11, "ARMv8") \
__PMC_CLASS(MIPS74K, 0x12, "MIPS 74K") \
__PMC_CLASS(E500, 0x13, "Freescale e500 class") \
__PMC_CLASS(BERI, 0x14, "MIPS BERI")
__PMC_CLASS(BERI, 0x14, "MIPS BERI") \
__PMC_CLASS(POWER8, 0x15, "IBM POWER8 class")
enum pmc_class {
#undef __PMC_CLASS
@ -173,7 +175,7 @@ enum pmc_class {
};
#define PMC_CLASS_FIRST PMC_CLASS_TSC
#define PMC_CLASS_LAST PMC_CLASS_E500
#define PMC_CLASS_LAST PMC_CLASS_POWER8
/*
* A PMC can be in the following states: