2005-04-19 04:01:25 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2008-09-22 10:37:02 +00:00
|
|
|
* Copyright (c) 2003-2008 Joseph Koshy
|
2007-12-07 08:20:17 +00:00
|
|
|
* Copyright (c) 2007 The FreeBSD Foundation
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Portions of this software were developed by A. Joseph Koshy under
|
|
|
|
* sponsorship from the FreeBSD Foundation and Google, Inc.
|
2005-04-19 04:01:25 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2005-07-30 09:02:42 +00:00
|
|
|
#include "opt_hwpmc_hooks.h"
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
2012-03-28 20:58:30 +00:00
|
|
|
#include <sys/ctype.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
2005-07-30 09:02:42 +00:00
|
|
|
#include <sys/pmc.h>
|
2005-04-19 04:01:25 +00:00
|
|
|
#include <sys/pmckern.h>
|
|
|
|
#include <sys/smp.h>
|
2011-02-25 10:11:01 +00:00
|
|
|
#include <sys/sysctl.h>
|
2012-03-28 20:58:30 +00:00
|
|
|
#include <sys/systm.h>
|
2005-04-19 04:01:25 +00:00
|
|
|
|
2018-05-14 00:21:04 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
|
2005-12-04 02:12:43 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
2011-02-25 10:11:01 +00:00
|
|
|
FEATURE(hwpmc_hooks, "Kernel support for HW PMC");
|
2005-07-30 09:02:42 +00:00
|
|
|
#define PMC_KERNEL_VERSION PMC_VERSION
|
|
|
|
#else
|
|
|
|
#define PMC_KERNEL_VERSION 0
|
|
|
|
#endif
|
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
MALLOC_DECLARE(M_PMCHOOKS);
|
|
|
|
MALLOC_DEFINE(M_PMCHOOKS, "pmchooks", "Memory space for PMC hooks");
|
|
|
|
|
2018-05-14 00:21:04 +00:00
|
|
|
/* memory pool */
|
|
|
|
MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
|
|
|
|
|
2005-07-30 09:02:42 +00:00
|
|
|
const int pmc_kernel_version = PMC_KERNEL_VERSION;
|
2005-04-19 04:01:25 +00:00
|
|
|
|
|
|
|
/* Hook variable. */
|
2017-01-27 22:14:42 +00:00
|
|
|
int __read_mostly (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
|
2005-04-19 04:01:25 +00:00
|
|
|
|
|
|
|
/* Interrupt handler */
|
hwpmc: simplify calling convention for hwpmc interrupt handling
pmc_process_interrupt takes 5 arguments when only 3 are needed.
cpu is always available in curcpu and inuserspace can always be
derived from the passed trapframe.
While facially a reasonable cleanup this change was motivated
by the need to workaround a compiler bug.
core2_intr(cpu, tf) ->
pmc_process_interrupt(cpu, ring, pmc, tf, inuserspace) ->
pmc_add_sample(cpu, ring, pm, tf, inuserspace)
In the process of optimizing the tail call the tf pointer was getting
clobbered:
(kgdb) up
at /storage/mmacy/devel/freebsd/sys/dev/hwpmc/hwpmc_mod.c:4709
4709 pmc_save_kernel_callchain(ps->ps_pc,
(kgdb) up
1205 error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
resulting in a crash in pmc_save_kernel_callchain.
2018-06-08 04:58:03 +00:00
|
|
|
int __read_mostly (*pmc_intr)(struct trapframe *tf) = NULL;
|
2005-05-30 06:29:29 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
DPCPU_DEFINE(uint8_t, pmc_sampled);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A global count of SS mode PMCs. When non-zero, this means that
|
|
|
|
* we have processes that are sampling the system as a whole.
|
|
|
|
*/
|
|
|
|
volatile int pmc_ss_count;
|
2005-04-19 04:01:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since PMC(4) may not be loaded in the current kernel, the
|
|
|
|
* convention followed is that a non-NULL value of 'pmc_hook' implies
|
|
|
|
* the presence of this kernel module.
|
|
|
|
*
|
|
|
|
* This requires us to protect 'pmc_hook' with a
|
|
|
|
* shared (sx) lock -- thus making the process of calling into PMC(4)
|
|
|
|
* somewhat more expensive than a simple 'if' check and indirect call.
|
|
|
|
*/
|
2005-07-30 09:02:42 +00:00
|
|
|
struct sx pmc_sx;
|
2018-05-14 00:21:04 +00:00
|
|
|
SX_SYSINIT(pmcsx, &pmc_sx, "pmc-sx");
|
2007-12-07 08:20:17 +00:00
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
/*
|
|
|
|
* PMC Soft per cpu trapframe.
|
|
|
|
*/
|
|
|
|
struct trapframe pmc_tf[MAXCPU];
|
|
|
|
|
2018-05-14 00:21:04 +00:00
|
|
|
/*
|
|
|
|
* Per domain list of buffer headers
|
|
|
|
*/
|
|
|
|
__read_mostly struct pmc_domain_buffer_header *pmc_dom_hdrs[MAXMEMDOM];
|
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
/*
|
|
|
|
* PMC Soft use a global table to store registered events.
|
|
|
|
*/
|
|
|
|
|
|
|
|
SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
|
|
|
|
|
|
|
|
static int pmc_softevents = 16;
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_kern_hwpmc, OID_AUTO, softevents, CTLFLAG_RDTUN,
|
2012-03-28 20:58:30 +00:00
|
|
|
&pmc_softevents, 0, "maximum number of soft events");
|
|
|
|
|
|
|
|
int pmc_softs_count;
|
|
|
|
struct pmc_soft **pmc_softs;
|
|
|
|
|
2018-05-14 00:21:04 +00:00
|
|
|
struct mtx pmc_softs_mtx;
|
2012-03-28 20:58:30 +00:00
|
|
|
MTX_SYSINIT(pmc_soft_mtx, &pmc_softs_mtx, "pmc-softs", MTX_SPIN);
|
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
/*
|
2008-09-22 10:37:02 +00:00
|
|
|
* Helper functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A note on the CPU numbering scheme used by the hwpmc(4) driver.
|
|
|
|
*
|
|
|
|
* CPUs are denoted using numbers in the range 0..[pmc_cpu_max()-1].
|
|
|
|
* CPUs could be numbered "sparsely" in this range; the predicate
|
|
|
|
* `pmc_cpu_is_present()' is used to test whether a given CPU is
|
|
|
|
* physically present.
|
|
|
|
*
|
|
|
|
* Further, a CPU that is physically present may be administratively
|
|
|
|
* disabled or otherwise unavailable for use by hwpmc(4). The
|
|
|
|
* `pmc_cpu_is_active()' predicate tests for CPU usability. An
|
|
|
|
* "active" CPU participates in thread scheduling and can field
|
|
|
|
* interrupts raised by PMC hardware.
|
|
|
|
*
|
|
|
|
* On systems with hyperthreaded CPUs, multiple logical CPUs may share
|
|
|
|
* PMC hardware resources. For such processors one logical CPU is
|
|
|
|
* denoted as the primary owner of the in-CPU PMC resources. The
|
|
|
|
* pmc_cpu_is_primary() predicate is used to distinguish this primary
|
|
|
|
* CPU from the others.
|
2005-04-19 04:01:25 +00:00
|
|
|
*/
|
|
|
|
|
2008-09-22 10:37:02 +00:00
|
|
|
int
|
|
|
|
pmc_cpu_is_active(int cpu)
|
|
|
|
{
|
|
|
|
#ifdef SMP
|
|
|
|
return (pmc_cpu_is_present(cpu) &&
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
!CPU_ISSET(cpu, &hlt_cpus_mask));
|
2008-09-22 10:37:02 +00:00
|
|
|
#else
|
|
|
|
return (1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Deprecated. */
|
2005-04-19 04:01:25 +00:00
|
|
|
int
|
|
|
|
pmc_cpu_is_disabled(int cpu)
|
2008-09-22 10:37:02 +00:00
|
|
|
{
|
|
|
|
return (!pmc_cpu_is_active(cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
pmc_cpu_is_present(int cpu)
|
|
|
|
{
|
|
|
|
#ifdef SMP
|
|
|
|
return (!CPU_ABSENT(cpu));
|
|
|
|
#else
|
|
|
|
return (1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
pmc_cpu_is_primary(int cpu)
|
2005-04-19 04:01:25 +00:00
|
|
|
{
|
|
|
|
#ifdef SMP
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
return (!CPU_ISSET(cpu, &logical_cpus_mask));
|
2005-04-19 04:01:25 +00:00
|
|
|
#else
|
2008-09-22 10:37:02 +00:00
|
|
|
return (1);
|
2005-04-19 04:01:25 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-09-22 10:37:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the maximum CPU number supported by the system. The return
|
|
|
|
* value is used for scaling internal data structures and for runtime
|
|
|
|
* checks.
|
|
|
|
*/
|
|
|
|
unsigned int
|
|
|
|
pmc_cpu_max(void)
|
|
|
|
{
|
|
|
|
#ifdef SMP
|
|
|
|
return (mp_maxid+1);
|
|
|
|
#else
|
|
|
|
return (1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the count of CPUs in the `active' state in the system.
|
|
|
|
*/
|
2005-04-19 04:01:25 +00:00
|
|
|
int
|
2008-09-22 10:37:02 +00:00
|
|
|
pmc_cpu_max_active(void)
|
2005-04-19 04:01:25 +00:00
|
|
|
{
|
|
|
|
#ifdef SMP
|
2008-09-22 10:37:02 +00:00
|
|
|
/*
|
|
|
|
* When support for CPU hot-plugging is added to the kernel,
|
|
|
|
* this function would change to return the current number
|
|
|
|
* of "active" CPUs.
|
|
|
|
*/
|
|
|
|
return (mp_ncpus);
|
2005-04-19 04:01:25 +00:00
|
|
|
#else
|
2008-09-22 10:37:02 +00:00
|
|
|
return (1);
|
2005-04-19 04:01:25 +00:00
|
|
|
#endif
|
|
|
|
}
|
2008-09-22 10:37:02 +00:00
|
|
|
|
|
|
|
#endif
|
2012-03-28 20:58:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup event name:
|
|
|
|
* - remove duplicate '_'
|
|
|
|
* - all uppercase
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pmc_soft_namecleanup(char *name)
|
|
|
|
{
|
|
|
|
char *p, *q;
|
|
|
|
|
|
|
|
p = q = name;
|
|
|
|
|
|
|
|
for ( ; *p == '_' ; p++)
|
|
|
|
;
|
|
|
|
for ( ; *p ; p++) {
|
|
|
|
if (*p == '_' && (*(p + 1) == '_' || *(p + 1) == '\0'))
|
|
|
|
continue;
|
|
|
|
else
|
|
|
|
*q++ = toupper(*p);
|
|
|
|
}
|
|
|
|
*q = '\0';
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmc_soft_ev_register(struct pmc_soft *ps)
|
|
|
|
{
|
|
|
|
static int warned = 0;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
ps->ps_running = 0;
|
|
|
|
ps->ps_ev.pm_ev_code = 0; /* invalid */
|
|
|
|
pmc_soft_namecleanup(ps->ps_ev.pm_ev_name);
|
|
|
|
|
|
|
|
mtx_lock_spin(&pmc_softs_mtx);
|
|
|
|
|
|
|
|
if (pmc_softs_count >= pmc_softevents) {
|
|
|
|
/*
|
|
|
|
* XXX Reusing events can enter a race condition where
|
|
|
|
* new allocated event will be used as an old one.
|
|
|
|
*/
|
|
|
|
for (n = 0; n < pmc_softevents; n++)
|
|
|
|
if (pmc_softs[n] == NULL)
|
|
|
|
break;
|
|
|
|
if (n == pmc_softevents) {
|
|
|
|
mtx_unlock_spin(&pmc_softs_mtx);
|
|
|
|
if (!warned) {
|
|
|
|
printf("hwpmc: too many soft events, "
|
|
|
|
"increase kern.hwpmc.softevents tunable\n");
|
|
|
|
warned = 1;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + n;
|
|
|
|
pmc_softs[n] = ps;
|
|
|
|
} else {
|
|
|
|
ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + pmc_softs_count;
|
|
|
|
pmc_softs[pmc_softs_count++] = ps;
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_unlock_spin(&pmc_softs_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmc_soft_ev_deregister(struct pmc_soft *ps)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(ps != NULL, ("pmc_soft_deregister: called with NULL"));
|
|
|
|
|
|
|
|
mtx_lock_spin(&pmc_softs_mtx);
|
|
|
|
|
|
|
|
if (ps->ps_ev.pm_ev_code != 0 &&
|
|
|
|
(ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST) < pmc_softevents) {
|
2017-02-24 01:39:12 +00:00
|
|
|
KASSERT((int)ps->ps_ev.pm_ev_code >= PMC_EV_SOFT_FIRST &&
|
|
|
|
(int)ps->ps_ev.pm_ev_code <= PMC_EV_SOFT_LAST,
|
2012-03-28 20:58:30 +00:00
|
|
|
("pmc_soft_deregister: invalid event value"));
|
|
|
|
pmc_softs[ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_unlock_spin(&pmc_softs_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct pmc_soft *
|
|
|
|
pmc_soft_ev_acquire(enum pmc_event ev)
|
|
|
|
{
|
|
|
|
struct pmc_soft *ps;
|
|
|
|
|
|
|
|
if (ev == 0 || (ev - PMC_EV_SOFT_FIRST) >= pmc_softevents)
|
|
|
|
return NULL;
|
|
|
|
|
2017-02-24 01:39:12 +00:00
|
|
|
KASSERT((int)ev >= PMC_EV_SOFT_FIRST &&
|
|
|
|
(int)ev <= PMC_EV_SOFT_LAST,
|
2012-03-28 20:58:30 +00:00
|
|
|
("event out of range"));
|
|
|
|
|
|
|
|
mtx_lock_spin(&pmc_softs_mtx);
|
|
|
|
|
|
|
|
ps = pmc_softs[ev - PMC_EV_SOFT_FIRST];
|
|
|
|
if (ps == NULL)
|
|
|
|
mtx_unlock_spin(&pmc_softs_mtx);
|
|
|
|
|
|
|
|
return ps;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmc_soft_ev_release(struct pmc_soft *ps)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_unlock_spin(&pmc_softs_mtx);
|
|
|
|
}
|
|
|
|
|
2018-05-14 00:21:04 +00:00
|
|
|
#ifdef NUMA
|
|
|
|
#define NDOMAINS vm_ndomains
|
|
|
|
|
|
|
|
static int
|
|
|
|
getdomain(int cpu)
|
|
|
|
{
|
|
|
|
struct pcpu *pc;
|
|
|
|
|
|
|
|
pc = pcpu_find(cpu);
|
|
|
|
return (pc->pc_domain);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define NDOMAINS 1
|
|
|
|
#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags))
|
|
|
|
#define getdomain(cpu) 0
|
|
|
|
#endif
|
2012-03-28 20:58:30 +00:00
|
|
|
/*
|
|
|
|
* Initialise hwpmc.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
init_hwpmc(void *dummy __unused)
|
|
|
|
{
|
2018-05-14 00:21:04 +00:00
|
|
|
int domain, cpu;
|
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
if (pmc_softevents <= 0 ||
|
|
|
|
pmc_softevents > PMC_EV_DYN_COUNT) {
|
|
|
|
(void) printf("hwpmc: tunable \"softevents\"=%d out of "
|
|
|
|
"range.\n", pmc_softevents);
|
|
|
|
pmc_softevents = PMC_EV_DYN_COUNT;
|
|
|
|
}
|
2018-01-21 15:42:36 +00:00
|
|
|
pmc_softs = malloc(pmc_softevents * sizeof(struct pmc_soft *), M_PMCHOOKS, M_NOWAIT|M_ZERO);
|
2012-03-28 20:58:30 +00:00
|
|
|
KASSERT(pmc_softs != NULL, ("cannot allocate soft events table"));
|
2018-05-14 00:21:04 +00:00
|
|
|
|
|
|
|
for (domain = 0; domain < NDOMAINS; domain++) {
|
|
|
|
pmc_dom_hdrs[domain] = malloc_domain(sizeof(struct pmc_domain_buffer_header), M_PMC, domain,
|
|
|
|
M_WAITOK|M_ZERO);
|
|
|
|
mtx_init(&pmc_dom_hdrs[domain]->pdbh_mtx, "pmc_bufferlist_mtx", "pmc-leaf", MTX_SPIN);
|
|
|
|
TAILQ_INIT(&pmc_dom_hdrs[domain]->pdbh_head);
|
|
|
|
}
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
domain = getdomain(cpu);
|
|
|
|
KASSERT(pmc_dom_hdrs[domain] != NULL, ("no mem allocated for domain: %d", domain));
|
|
|
|
pmc_dom_hdrs[domain]->pdbh_ncpus++;
|
|
|
|
}
|
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SYSINIT(hwpmc, SI_SUB_KDTRACE, SI_ORDER_FIRST, init_hwpmc, NULL);
|
|
|
|
|