Bring a working snapshot of hwpmc(4), its associated libraries, userland utilities

and documentation into -CURRENT.

Bump FreeBSD_version.

Reviewed by:	alc, jhb (kernel changes)
This commit is contained in:
Joseph Koshy 2005-04-19 04:01:25 +00:00
parent 5b930f51c9
commit ebccf1e3a6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=145256
51 changed files with 23703 additions and 5 deletions

View File

@ -48,6 +48,7 @@
.ds doc-str-Lb-libkiconv Kernel side iconv library (libkiconv, \-lkiconv)
.ds doc-str-Lb-libmd Message Digest (MD4, MD5, etc.) Support Library (libmd, \-lmd)
.ds doc-str-Lb-libnetgraph Netgraph User Library (libnetgraph, \-lnetgraph)
.ds doc-str-Lb-libpmc Performance monitoring counters API (libpmc, \-lpmc)
.ds doc-str-Lb-librpcsvc RPC Service Library (librpcsvc, \-lrpcsvc)
.ds doc-str-Lb-libsdp Bluetooth Service Discovery Protocol User Library (libsdp, \-lsdp)
.ds doc-str-Lb-libthr 1:1 Threading Library (libthr, \-lthr)

View File

@ -31,7 +31,7 @@ SUBDIR= ${_csu} libcom_err libcrypt libkvm msun libmd libncurses \
${_libio} libipsec \
libipx libkiconv libmagic libmenu ${_libmilter} ${_libmp} \
${_libncp} ${_libngatm} libopie libpam libpanel libpcap \
${_libpthread} ${_libsdp} ${_libsm} ${_libsmb} ${_libsmdb} \
${_libpmc} ${_libpthread} ${_libsdp} ${_libsm} ${_libsmb} ${_libsmdb} \
${_libsmutil} libstand libtelnet ${_libthr} ${_libthread_db} libufs \
libugidfw ${_libusbhid} ${_libvgl} libwrap liby libz ${_bind}
@ -59,6 +59,7 @@ _libsdp= libsdp
.if ${MACHINE_ARCH} == "i386"
_libncp= libncp
_libpmc= libpmc
_libsmb= libsmb
_libvgl= libvgl
.endif
@ -89,6 +90,7 @@ _libmp= libmp
.if ${MACHINE_ARCH} == "amd64"
_libncp= libncp
_libpmc= libpmc
_libsmb= libsmb
.endif

43
lib/libpmc/Makefile Normal file
View File

@ -0,0 +1,43 @@
# $FreeBSD$
LIB= pmc
SRCS= libpmc.c
INCS= pmc.h
CFLAGS+= -I${.CURDIR} -I${.CURDIR}/../../sys
WARNS?= 6
MAN= pmc.3
MLINKS+= \
pmc.3 pmc_allocate.3 \
pmc.3 pmc_attach.3 \
pmc.3 pmc_configure_logfile.3 \
pmc.3 pmc_cpuinfo.3 \
pmc.3 pmc_detach.3 \
pmc.3 pmc_disable.3 \
pmc.3 pmc_enable.3 \
pmc.3 pmc_event_names_of_class.3 \
pmc.3 pmc_get_driver_stats.3 \
pmc.3 pmc_init.3 \
pmc.3 pmc_name_of_capability.3 \
pmc.3 pmc_name_of_class.3 \
pmc.3 pmc_name_of_cputype.3 \
pmc.3 pmc_name_of_event.3 \
pmc.3 pmc_name_of_mode.3 \
pmc.3 pmc_name_of_state.3 \
pmc.3 pmc_ncpu.3 \
pmc.3 pmc_npmc.3 \
pmc.3 pmc_pmcinfo.3 \
pmc.3 pmc_read.3 \
pmc.3 pmc_release.3 \
pmc.3 pmc_rw.3 \
pmc.3 pmc_set.3 \
pmc.3 pmc_start.3 \
pmc.3 pmc_stop.3 \
pmc.3 pmc_write.3 \
pmc.3 pmc_x86_get_msr.3
.include <bsd.lib.mk>

2136
lib/libpmc/libpmc.c Normal file

File diff suppressed because it is too large Load Diff

3090
lib/libpmc/pmc.3 Normal file

File diff suppressed because it is too large Load Diff

79
lib/libpmc/pmc.h Normal file
View File

@ -0,0 +1,79 @@
/*-
* Copyright (c) 2003,2004 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PMC_H_
#define _PMC_H_
#include <sys/pmc.h>
/*
* Prototypes
*/
int pmc_allocate(const char *_ctrspec, enum pmc_mode _mode, uint32_t _flags,
int _cpu, pmc_id_t *_pmcid);
int pmc_attach(pmc_id_t _pmcid, pid_t _pid);
int pmc_configure_logfile(int _fd);
int pmc_detach(pmc_id_t _pmcid, pid_t _pid);
int pmc_disable(int _cpu, int _pmc);
int pmc_enable(int _cpu, int _pmc);
int pmc_get_driver_stats(struct pmc_op_getdriverstats *_gms);
int pmc_init(void);
int pmc_read(pmc_id_t _pmc, pmc_value_t *_value);
int pmc_release(pmc_id_t _pmc);
int pmc_rw(pmc_id_t _pmc, pmc_value_t _newvalue, pmc_value_t *_oldvalue);
int pmc_set(pmc_id_t _pmc, pmc_value_t _value);
int pmc_start(pmc_id_t _pmc);
int pmc_stop(pmc_id_t _pmc);
int pmc_write(pmc_id_t _pmc, pmc_value_t _value);
int pmc_ncpu(void);
int pmc_npmc(int _cpu);
int pmc_cpuinfo(const struct pmc_op_getcpuinfo **_cpu_info);
int pmc_pmcinfo(int _cpu, struct pmc_op_getpmcinfo **_pmc_info);
const char *pmc_name_of_capability(uint32_t _c);
const char *pmc_name_of_class(enum pmc_class _pc);
const char *pmc_name_of_cputype(enum pmc_cputype _cp);
const char *pmc_name_of_disposition(enum pmc_disp _pd);
const char *pmc_name_of_event(enum pmc_event _pe);
const char *pmc_name_of_mode(enum pmc_mode _pm);
const char *pmc_name_of_state(enum pmc_state _ps);
int pmc_event_names_of_class(enum pmc_class _cl, const char ***_eventnames,
int *_nevents);
/*
* Architecture specific extensions
*/
#if __i386__ || __amd64__
int pmc_x86_get_msr(pmc_id_t _pmc, uint32_t *_msr);
#endif
#endif

View File

@ -5,6 +5,7 @@ SUBDIR= beyond4.3 \
contents \
devfs \
diskperf \
hwpmc \
fsinterface \
jail \
kernmalloc \

View File

@ -0,0 +1,8 @@
# $FreeBSD$
VOLUME= papers
DOC= hwpmc
SRCS= hwpmc.ms
MACROS= -ms
.include <bsd.doc.mk>

View File

@ -0,0 +1,34 @@
.\" Copyright (c) 2004 Joseph Koshy.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY JOSEPH KOSHY AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL JOSEPH KOSHY OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.OH '''Using Hardware Performance Monitoring Counters'
.EH 'HWPMC'''
.TL
Using Hardware Performance Monitoring Counters in FreeBSD
.sp
\s-2FreeBSD 5.2.1\s+2
.sp
\fRJuly, 2004\fR
.PP

View File

@ -0,0 +1,8 @@
# $FreeBSD$
Examples illustrating the use of the hwpmc(4) driver and pmc(3)
library interface.
While there is nothing here yet, the source code for pmccontrol(8)
and pmcstat(8) could serve as examples.

View File

@ -92,6 +92,7 @@ MAN= aac.4 \
hifn.4 \
hme.4 \
hptmv.4 \
hwpmc.4 \
ichsmb.4 \
icmp.4 \
icmp6.4 \

583
share/man/man4/hwpmc.4 Normal file
View File

@ -0,0 +1,583 @@
.\" Copyright (c) 2003-2005 Joseph Koshy
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd Apr 15, 2005
.Dt HWPMC 4
.Os
.Sh NAME
.Nm hwpmc
.Nd Hardware performance monitoring counter support
.Sh SYNOPSIS
.Cd options PMC_HOOKS
.br
.Cd device hwpmc
.Sh DESCRIPTION
The
.Nm
driver virtualizes the hardware performance monitoring facilities in
modern CPUs and provides support for using these facilities from
user level processes.
.Pp
The driver supports multi-processor systems.
.Pp
PMCs are allocated using the
.Ic PMC_OP_PMCALLOCATE
request.
A successful
.Ic PMC_OP_PMCALLOCATE
request will return an integer handle (typically a small integer) to
the requesting process.
Subsequent operations on the allocated PMC use this handle to denote
the specific PMC.
A process that has successfully allocated a PMC is termed an
.Dq "owner process" .
.Pp
PMCs may be allocated to operate in process-private or in system-wide
modes.
.Bl -hang -width "XXXXXXXXXXXXXXX"
.It Em Process-private
In process-private mode, a PMC is active only when a thread belonging
to a process it is attached to is scheduled on a CPU.
.It Em System-wide
In system-wide mode a PMC operates independently of processes and
measures hardware events for the system as a whole.
.El
.Pp
The
.Nm
driver supports the use of hardware PMCs for counting or for
sampling:
.Bl -hang -width "XXXXXXXXX"
.It Em Counting
In counting modes, the PMCs count hardware events.
These counts are retrievable using the
.Ic PMC_OP_PMCREAD
system call on all architectures, though some architectures like the
x86 and amd64 offer faster methods of reading these counts.
.It Em Sampling
In sampling modes, where PMCs are configured to sample the CPU
instruction pointer after a configurable number of hardware events
have been observed.
These instruction pointer samples are directed to a log file for
subsequent analysis.
.El
.Pp
These modes of operation are orthogonal; a PMC may be configured to
operate in one of four modes:
.Bl -tag -width indent
.It Process-private, counting
These PMCs count hardware events whenever a thread in their attached process is
scheduled on a CPU.
These PMCs normally count from zero, but the initial count may be
set using the
.Ic PMC_OP_SETCOUNT
operation.
Applications can read the value of the PMC anytime using the
.Ic PMC_OP_PMCRW
operation.
.It Process-private, sampling
These PMCs sample the target processes instruction pointer after they
have seen the configured number of hardware events.
The PMCs only count events when a thread belonging to their attached
process is active.
The desired frequency of sampling is set using the
.Ic PMC_OP_SETCOUNT
operation prior to starting the PMC.
Log files are configured using the
.Ic PMC_OP_CONFIGURELOG
operation.
.It System-wide, counting
These PMCs count hardware events seen by them independent of the
processes that are executing.
The current count on these PMCs can be read using the
.Ic PMC_OP_PMCRW
request.
These PMCs normally count from zero, but the initial count may be
set using the
.Ic PMC_OP_SETCOUNT
operation.
.It System-wide, sampling
These PMCs will periodically sample the instruction pointer of the CPU
they are allocated on, and will write the sample to a log for further
processing.
The desired frequency of sampling is set using the
.Ic PMC_OP_SETCOUNT
operation prior to starting the PMC.
Log files are configured using the
.Ic PMC_OP_CONFIGURELOG
operation.
.Pp
System-wide statistical sampling can only be enabled by a process with
super-user privileges.
.El
.Pp
Processes are allowed to allocate as many PMCs are the hardware and
current operating conditions permit.
Processes may mix allocations of system-wide and process-private
PMCs.
Multiple processes are allowed to be concurrently using the facilities
of the
.Nm
driver.
.Pp
Allocated PMCs are started using the
.Ic PMC_OP_PMCSTART
operation, and stopped using the
.Ic PMC_OP_PMCSTOP
operation.
Stopping and starting a PMC is permitted at any time the owner process
has a valid handle to the PMC.
.Pp
Process-private PMCs need to be attached to a target process before
they can be used.
Attaching a process to a PMC is done using the
.Ic PMC_OP_PMCATTACH
operation.
An already attached PMC may be detached from its target process
using the converse
.Ic PMC_OP_PMCDETACH
operation.
Issuing an
.Ic PMC_OP_PMCSTART
operation on an as yet unattached PMC will cause it to be attached
to its owner process.
The following rules determine whether a given process may attach
a PMC to another target process:
.Bl -bullet -compact
.It
A non-jailed process with super-user privileges is allowed to attach
to any other process in the system.
.It
Other processes are only allowed to attach to targets that they would
be able to attach to for debugging (as determined by
.Xr p_candebug 9 ) .
.El
.Pp
PMCs are released using
.Ic PMC_OP_PMCRELEASE .
After a successful
.Ic PMC_OP_PMCRELEASE
operation the handle to the PMC will become invalid.
.Ss MODIFIER FLAGS
The
.Ic PMC_OP_PMCALLOCATE
operation supports the following flags that modify the behavior
of an allocated PMC:
.Bl -tag -width indent
.It Dv PMC_F_DESCENDANTS
This flag is valid only for a PMC being allocated in process-private
mode.
It signifies that the PMC will track hardware events for its
target process and the target's current and future descendants.
.El
.Ss SIGNALS
The
.Nm
driver may deliver signals to processes that have allocated PMCs:
.Bl -tag -width indent
.It Bq SIGIO
A
.Ic PMC_OP_PMCRW
operation was attempted on a process-private PMC that does not have
attached target processes.
.It Bq SIGBUS
The
.Nm
driver is being unloaded from the kernel.
.El
.Sh PROGRAMMING API
The recommended way for application programs to use the facilities of
the
.Nm
driver is using the API provided by the library
.Xr pmc 3 .
.Pp
The
.Nm
driver operates using a system call number that is dynamically
allotted to it when it is loaded into the kernel.
.Pp
The
.Nm
driver supports the following operations:
.Bl -tag -width indent
.It Ic PMC_OP_CONFIGURELOG
Configure a log file for sampling mode PMCs.
.It Ic PMC_OP_GETCPUINFO
Retrieve information about the number of CPUs on the system and
the number of hardware performance monitoring counters available per-CPU.
.It Ic PMC_OP_GETDRIVERSTATS
Retrieve module statistics (for analyzing the behavior of
.Nm
itself).
.It Ic PMC_OP_GETMODULEVERSION
Retrieve the version number of API.
.It Ic PMC_OP_GETPMCINFO
Retrieve information about the current state of the PMCs on a
given CPU.
.It Ic PMC_OP_PMCADMIN
Set the administrative state (i.e., whether enabled or disabled) for
the hardware PMCs managed by the
.Nm
driver.
.It Ic PMC_OP_PMCALLOCATE
Allocate and configure a PMC.
On successful allocation, a handle to the PMC (a small integer)
is returned.
.It Ic PMC_OP_PMCATTACH
Attach a process mode PMC to a target process.
The PMC will be active whenever a thread in the target process is
scheduled on a CPU.
.Pp
If the
.Dv PMC_F_DESCENDANTS
flag had been specified at PMC allocation time, then the PMC is
attached to all current and future descendants of the target process.
.It Ic PMC_OP_PMCDETACH
Detach a PMC from its target process.
.It Ic PMC_OP_PMCRELEASE
Release a PMC.
.It Ic PMC_OP_PMCRW
Read and write a PMC.
This operation is valid only for PMCs configured in counting modes.
.It Ic PMC_OP_SETCOUNT
Set the initial count (for counting mode PMCs) or the desired sampling
rate (for sampling mode PMCs).
.It Ic PMC_OP_PMCSTART
Start a PMC.
.It Ic PMC_OP_PMCSTOP
Stop a PMC.
.It Ic PMC_OP_WRITELOG
Insert a timestamped user record into the log file.
.El
.Ss i386 SPECIFIC API
Some i386 family CPUs support the RDPMC instruction which allows a
user process to read a PMC value without needing to invoke a
.Ic PMC_OP_PMCRW
operation.
On such CPUs, the machine address associated with an allocated PMC is
retrievable using the
.Ic PMC_OP_PMCX86GETMSR
system call.
.Bl -tag -width indent
.It Ic PMC_OP_PMCX86GETMSR
Retrieve the MSR (machine specific register) number associated with
the given PMC handle.
.Pp
This operation is only valid for PMCs allocated in process-private modes.
.El
.Ss amd64 SPECIFIC API
AMD64 cpus support the RDPMC instruction which allows a
user process to read a PMC value without needing to invoke a
.Ic PMC_OP_PMCRW
operation.
The machine address associated with an allocated PMC is
retrievable using the
.Ic PMC_OP_PMCX86GETMSR
system call.
.Bl -tag -width indent
.It Ic PMC_OP_PMCX86GETMSR
Retrieve the MSR (machine specific register) number associated with
the given PMC handle.
.Pp
This operation is only valid for PMCs allocated in process-private modes.
.El
.Sh SYSCTL TUNABLES
The behavior of
.Nm
is influenced by the following
.Xr sysctl 8
tunables:
.Bl -tag -width indent
.It Va kern.hwpmc.debugflags
(Only available if the
.Nm
driver was compiled with
.Fl DDEBUG ) .
Control the verbosity of debug messages from the
.Nm
driver.
.It Va kern.hwpmc.hashsize
The number of rows in the hash-tables used to keep track of owner and
target processes.
.It Va kern.hwpmc.mtxpoolsize
The size of the spin mutex pool used by the PMC driver.
.It Va kern.hwpmc.pcpubuffersize
The size of the per-cpu hash table used when performing system-wide
statistical profiling.
.It Va security.bsd.unprivileged_syspmcs
If set to non-zero, allow unprivileged processes to allocate system-wide
PMCs.
The default value is 0.
.It Va security.bsd.unprivileged_proc_debug
If set to 0, the
.Nm
driver will only allow privileged process to attach PMCs to other
processes.
.El
.Pp
These variables may be set in the kernel environment using
.Xr kenv 1
before
.Nm
is loaded.
.Sh SECURITY CONSIDERATIONS
PMCs may be used to monitor the actual behaviour of the system on hardware.
In situations where this constitutes an undesirable information leak,
the following options are available:
.Bl -enum
.It
Set the
.Xr sysctl 8
tunable
.Va "security.bsd.unprivileged_syspmcs"
to 0.
.Pp
This ensures that unprivileged processes cannot allocate system-wide
PMCs and thus cannot observe the hardware behavior of the system
as a whole.
.Pp
This tunable may also be set at boot time using
.Xr loader 8 ,
or with
.Xr kenv 1
prior to loading the
.Nm
driver into the kernel.
.It
Set the
.Xr sysctl 8
tunable
.Va "security.bsd.unprivileged_proc_debug"
to 0.
.Pp
This will ensure that an unprivileged process cannot attach a PMC
to any process other than itself and thus cannot observe the hardware
behavior of other processes with the same credentials.
.El
.Pp
System administrators should note that on IA-32 platforms
.Fx
makes the content of the IA-32 TSC counter available to all processes
via the RDTSC instruction.
.Sh IMPLEMENTATION NOTES
.Ss i386 TSC Handling
Historically, on the x86 architecture,
.Fx
has permitted user processes running at a processor CPL of 3 to
read the TSC using the RDTSC instruction.
The
.Nm
driver preserves this semantic.
.Pp
TSCs are treated as shared, read-only counters and hence are
only allowed to be allocated in system-wide counting mode.
.Ss Intel P4/HTT Handling
On CPUs with HTT support, Intel P4 PMCs are capable of qualifying
only a subset of hardware events on a per-logical CPU basis.
Consequently, if HTT is enabled on a system with Intel Pentium P4
PMCs, then the
.Nm
driver will reject allocation requests for process-private PMCs that
request counting of hardware events that cannot be counted separately
for each logical CPU.
.Sh ERRORS
An command issued to the
.Nm
driver may fail with the following errors:
.Bl -tag -width Er
.It Bq Er EBUSY
An
.Ic OP_CONFIGURELOG
operation was requested while an existing log was active.
.It Bq Er EBUSY
A
.Ic DISABLE
operation was requested using the
.Ic PMC_OP_PMCADMIN
request for a set of hardware resources currently in use for
process-private PMCs.
.It Bq Er EBUSY
A
.Ic PMC_OP_PMCADMIN
operation was requested on an active system mode PMC.
.It Bq Er EBUSY
A
.Ic PMC_OP_PMCATTACH
operation was requested for a target process that already had another
PMC using the same hardware resources attached to it.
.It Bq Er EBUSY
An
.Ic PMC_OP_PMCRW
request writing a new value was issued on a PMC that was active.
.It Bq Er EBUSY
An
.Ic PMC_OP_PMCSETCOUNT
request was issued on a PMC that was active.
.It Bq Er EEXIST
A
.Ic PMC_OP_PMCATTACH
request was reissued for a target process that already is the target
of this PMC.
.It Bq Er EFAULT
A bad address was passed in to the driver.
.It Bq Er EINVAL
A process specified an invalid PMC handle.
.It Bq Er EINVAL
An invalid CPU number was passed in for an
.Ic PMC_OP_GETPMCINFO
operation.
.It Bq Er EINVAL
An invalid CPU number was passed in for an
.Ic PMC_OP_PMCADMIN
operation.
.It Bq Er EINVAL
An invalid operation request was passed in for an
.Ic PMC_OP_PMCADMIN
operation.
.It Bq Er EINVAL
An invalid PMC id was passed in for an
.Ic PMC_OP_PMCADMIN
operation.
.It Bq Er EINVAL
A suitable PMC matching the parameters passed in to a
.Ic PMC_OP_PMCALLOCATE
request could not be allocated.
.It Bq Er EINVAL
An invalid PMC mode was requested during a
.Ic PMC_OP_PMCALLOCATE
request.
.It Bq Er EINVAL
An invalid CPU number was specified during a
.Ic PMC_OP_PMCALLOCATE
request.
.It Bq Er EINVAL
A cpu other than
.Li PMC_CPU_ANY
was specified in a
.Ic PMC_OP_ALLOCATE
request for a process-private PMC.
.It Bq Er EINVAL
A cpu number of
.Li PMC_CPU_ANY
was specified in a
.Ic PMC_OP_ALLOCATE
request for a system-wide PMC.
.It Bq Er EINVAL
The
.Ar pm_flags
argument to an
.Ic PMC_OP_PMCALLOCATE
request contained unknown flags.
.It Bq Er EINVAL
A PMC allocated for system-wide operation was specified with a
.Ic PMC_OP_PMCATTACH
request.
.It Bq Er EINVAL
The
.Ar pm_pid
argument to a
.Ic PMC_OP_PMCATTACH
request specified an illegal process id.
.It Bq Er EINVAL
A
.Ic PMC_OP_PMCDETACH
request was issued for a PMC not attached to the target process.
.It Bq Er EINVAL
Argument
.Ar pm_flags
to a
.Ic PMC_OP_PMCRW
request contained illegal flags.
.It Bq Er EINVAL
A
.Ic PMC_OP_PMCX86GETMSR
operation was requested for a PMC not in process-virtual mode.
.It Bq Er EINVAL
(On Intel Pentium 4 CPUs with HTT support) An allocation request for
a process-private PMC was issued for an event that does not support
counting on a per-logical CPU basis.
.It Bq Er ENOMEM
The system was not able to allocate kernel memory.
.It Bq Er ENOSYS
(i386 architectures) A
.Ic PMC_OP_PMCX86GETMSR
operation was requested for hardware that does not support reading
PMCs directly with the RDPMC instruction.
.It Bq Er ENXIO
An
.Ic OP_GETPMCINFO
operation was requested for a disabled CPU.
.It Bq Er ENXIO
A system-wide PMC on a disabled CPU was requested to be allocated with
.Ic PMC_OP_PMCALLOCATE .
.It Bq Er ENXIO
A
.Ic PMC_OP_PMCSTART
or
.Ic PMC_OP_PMCSTOP
request was issued for a system-wide PMC that was allocated on a
currently disabled CPU.
.It Bq Er EPERM
An
.Ic OP_PMCADMIN
request was issued by a process without super-user
privilege or by a jailed super-user process.
.It Bq Er EPERM
An
.Ic PMC_OP_PMCATTACH
operation was issued for a target process that the current process
does not have permission to attach to.
.It Bq Er ESRCH
A process issued a PMC operation request without having allocated any PMCs.
.It Bq Er ESRCH
A
.Ic PMC_OP_PMCATTACH
request specified a non-existent process id.
.It Bq Er ESRCH
The target process for a
.Ic PMC_OP_PMCDETACH
operation is not being monitored by the
.Nm
driver.
.El
.Sh BUGS
The kernel driver requires all CPUs in an SMP system to be symmetric
with respect to their performance monitoring counter resources.
.Pp
The driver samples the state of the kernel's logical processor support
at the time of initialization (i.e., at module load time).
On CPUs supporting logical processors, the driver could misbehave if
logical processors are subsequently enabled or disabled while the
driver is active.
.Sh SEE ALSO
.Xr kenv 1 ,
.Xr pmc 3 ,
.Xr kldload 8 ,
.Xr pmccontrol 8 ,
.Xr pmcstat 8 ,
.Xr sysctl 8 ,
.Xr p_candebug 9

View File

@ -112,6 +112,7 @@ MINUSLPAM+= -lypclnt
LIBPANEL?= ${DESTDIR}${LIBDIR}/libpanel.a
LIBPCAP?= ${DESTDIR}${LIBDIR}/libpcap.a
LIBPMC?= ${DESTDIR}${LIBDIR}/libpmc.a
LIBPTHREAD?= ${DESTDIR}${LIBDIR}/libpthread.a
LIBRADIUS?= ${DESTDIR}${LIBDIR}/libradius.a
LIBREADLINE?= ${DESTDIR}${LIBDIR}/libreadline.a

View File

@ -0,0 +1,76 @@
/*-
* Copyright (c) 2003, Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/* Machine dependent interfaces */
#ifndef _MACHINE_PMC_MDEP_H
#define _MACHINE_PMC_MDEP_H 1
#include <sys/pmc.h>
/* AMD K7 PMCs */
#define K8_NPMCS 5 /* 1 TSC + 4 PMCs */
#define K8_PMC_COUNTERMASK 0xFF000000
#define K8_PMC_TO_COUNTER(x) (((x) << 24) & K8_PMC_COUNTERMASK)
#define K8_PMC_INVERT (1 << 23)
#define K8_PMC_ENABLE (1 << 22)
#define K8_PMC_INT (1 << 20)
#define K8_PMC_PC (1 << 19)
#define K8_PMC_EDGE (1 << 18)
#define K8_PMC_OS (1 << 17)
#define K8_PMC_USR (1 << 16)
#define K8_PMC_UNITMASK_M 0x10
#define K8_PMC_UNITMASK_O 0x08
#define K8_PMC_UNITMASK_E 0x04
#define K8_PMC_UNITMASK_S 0x02
#define K8_PMC_UNITMASK_I 0x01
#define K8_PMC_UNITMASK_MOESI 0x1F
#define K8_PMC_UNITMASK 0xFF00
#define K8_PMC_EVENTMASK 0x00FF
#define K8_PMC_TO_UNITMASK(x) (((x) << 8) & K8_PMC_UNITMASK)
#define K8_PMC_TO_EVENTMASK(x) ((x) & 0xFF)
#define K8_VALID_BITS (K8_PMC_COUNTERMASK | K8_PMC_INVERT | \
K8_PMC_ENABLE | K8_PMC_INT | K8_PMC_PC | K8_PMC_EDGE | K8_PMC_OS | \
K8_PMC_USR | K8_PMC_UNITMASK | K8_PMC_EVENTMASK)
#ifdef _KERNEL
/*
* Prototypes
*/
#if defined(__amd64__)
struct pmc_mdep *pmc_amd_initialize(void);
#endif /* defined(__i386__) */
#endif /* _KERNEL */
#endif /* _MACHINE_PMC_MDEP_H */

View File

@ -420,6 +420,19 @@ options REGRESSION
#
options COMPILING_LINT
#####################################################################
# PERFORMANCE MONITORING OPTIONS
#
# The hwpmc driver that allows the use of in-CPU performance monitoring
# counters for performance monitoring. The base kernel needs to configured
# with the 'options' line, while the hwpmc device can be either compiled
# in or loaded as a loadable kernel module.
#
device hwpmc # Driver (also a loadable module)
options HWPMC_HOOKS # Other necessary kernel hooks
#####################################################################
# NETWORKING OPTIONS

View File

@ -1044,6 +1044,10 @@ gnu/ext2fs/ext2_subr.c optional ext2fs
gnu/ext2fs/ext2_vfsops.c optional ext2fs
gnu/ext2fs/ext2_vnops.c optional ext2fs
#
# Support for hardware performance monitoring counters
#
hwpmc/hwpmc_mod.c optional hwpmc
#
# isdn4bsd device drivers
#
i4b/driver/i4b_trace.c optional i4btrc
@ -1157,6 +1161,7 @@ kern/kern_mtxpool.c standard
kern/kern_mutex.c standard
kern/kern_ntptime.c standard
kern/kern_physio.c standard
kern/kern_pmc.c standard
kern/kern_poll.c optional device_polling
kern/kern_proc.c standard
kern/kern_prot.c standard

View File

@ -170,6 +170,7 @@ geom/geom_bsd.c standard
geom/geom_bsd_enc.c standard
geom/geom_mbr.c standard
geom/geom_mbr_enc.c standard
hwpmc/hwpmc_amd.c optional hwpmc
isa/atkbd_isa.c optional atkbd
isa/atkbdc_isa.c optional atkbdc
isa/psm.c optional psm

View File

@ -216,6 +216,8 @@ geom/geom_bsd_enc.c standard
geom/geom_mbr.c standard
geom/geom_mbr_enc.c standard
dev/acpica/acpi_if.m standard
hwpmc/hwpmc_amd.c optional hwpmc
hwpmc/hwpmc_intel.c optional hwpmc
i386/acpica/OsdEnvironment.c optional acpi
i386/acpica/acpi_machdep.c optional acpi
i386/acpica/acpi_wakeup.c optional acpi

View File

@ -699,3 +699,6 @@ NI4BISPPP opt_i4b.h
# VFS options
LOOKUP_SHARED opt_vfs.h
# HWPMC options
HWPMC_HOOKS opt_global.h

996
sys/dev/hwpmc/hwpmc_amd.c Normal file
View File

@ -0,0 +1,996 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Support for the AMD K7 and later processors */
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
/* AMD K7 and K8 PMCs */
#define AMD_PMC_EVSEL_0 0xC0010000
#define AMD_PMC_EVSEL_1 0xC0010001
#define AMD_PMC_EVSEL_2 0xC0010002
#define AMD_PMC_EVSEL_3 0xC0010003
#define AMD_PMC_PERFCTR_0 0xC0010004
#define AMD_PMC_PERFCTR_1 0xC0010005
#define AMD_PMC_PERFCTR_2 0xC0010006
#define AMD_PMC_PERFCTR_3 0xC0010007
#define K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) || \
((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) || \
((c) >= 0xCD && (c) <= 0xCF))
#define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
/* reserved bits include bit 21 and the top two bits of the unit mask */
#define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
#define K8_PMC_RESERVED (1 << 21)
#define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
#define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
#if __i386__
#define AMD_NPMCS K7_NPMCS
#define AMD_PMC_CLASS PMC_CLASS_K7
#define AMD_PMC_COUNTERMASK K7_PMC_COUNTERMASK
#define AMD_PMC_TO_COUNTER(x) K7_PMC_TO_COUNTER(x)
#define AMD_PMC_INVERT K7_PMC_INVERT
#define AMD_PMC_ENABLE K7_PMC_ENABLE
#define AMD_PMC_INT K7_PMC_INT
#define AMD_PMC_PC K7_PMC_PC
#define AMD_PMC_EDGE K7_PMC_EDGE
#define AMD_PMC_OS K7_PMC_OS
#define AMD_PMC_USR K7_PMC_USR
#define AMD_PMC_UNITMASK_M K7_PMC_UNITMASK_M
#define AMD_PMC_UNITMASK_O K7_PMC_UNITMASK_O
#define AMD_PMC_UNITMASK_E K7_PMC_UNITMASK_E
#define AMD_PMC_UNITMASK_S K7_PMC_UNITMASK_S
#define AMD_PMC_UNITMASK_I K7_PMC_UNITMASK_I
#define AMD_PMC_UNITMASK K7_PMC_UNITMASK
#define AMD_PMC_EVENTMASK K7_PMC_EVENTMASK
#define AMD_PMC_TO_UNITMASK(x) K7_PMC_TO_UNITMASK(x)
#define AMD_PMC_TO_EVENTMASK(x) K7_PMC_TO_EVENTMASK(x)
#define AMD_VALID_BITS K7_VALID_BITS
#define AMD_PMC_CLASS_NAME "K7-"
#elif __amd64__
#define AMD_NPMCS K8_NPMCS
#define AMD_PMC_CLASS PMC_CLASS_K8
#define AMD_PMC_COUNTERMASK K8_PMC_COUNTERMASK
#define AMD_PMC_TO_COUNTER(x) K8_PMC_TO_COUNTER(x)
#define AMD_PMC_INVERT K8_PMC_INVERT
#define AMD_PMC_ENABLE K8_PMC_ENABLE
#define AMD_PMC_INT K8_PMC_INT
#define AMD_PMC_PC K8_PMC_PC
#define AMD_PMC_EDGE K8_PMC_EDGE
#define AMD_PMC_OS K8_PMC_OS
#define AMD_PMC_USR K8_PMC_USR
#define AMD_PMC_UNITMASK_M K8_PMC_UNITMASK_M
#define AMD_PMC_UNITMASK_O K8_PMC_UNITMASK_O
#define AMD_PMC_UNITMASK_E K8_PMC_UNITMASK_E
#define AMD_PMC_UNITMASK_S K8_PMC_UNITMASK_S
#define AMD_PMC_UNITMASK_I K8_PMC_UNITMASK_I
#define AMD_PMC_UNITMASK K8_PMC_UNITMASK
#define AMD_PMC_EVENTMASK K8_PMC_EVENTMASK
#define AMD_PMC_TO_UNITMASK(x) K8_PMC_TO_UNITMASK(x)
#define AMD_PMC_TO_EVENTMASK(x) K8_PMC_TO_EVENTMASK(x)
#define AMD_VALID_BITS K8_VALID_BITS
#define AMD_PMC_CLASS_NAME "K8-"
#else
#error Unsupported architecture.
#endif
/* AMD K7 & K8 PMCs */
struct amd_descr {
struct pmc_descr pm_descr; /* "base class" */
uint32_t pm_evsel; /* address of EVSEL register */
uint32_t pm_perfctr; /* address of PERFCTR register */
};
static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
{
{
.pm_descr =
{
.pd_name = "TSC",
.pd_class = PMC_CLASS_TSC,
.pd_caps = PMC_CAP_READ,
.pd_width = 64
},
.pm_evsel = MSR_TSC,
.pm_perfctr = 0 /* unused */
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "0",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_0,
.pm_perfctr = AMD_PMC_PERFCTR_0
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "1",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_1,
.pm_perfctr = AMD_PMC_PERFCTR_1
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "2",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_2,
.pm_perfctr = AMD_PMC_PERFCTR_2
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "3",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_3,
.pm_perfctr = AMD_PMC_PERFCTR_3
}
};
struct amd_event_code_map {
enum pmc_event pe_ev; /* enum value */
uint8_t pe_code; /* encoded event mask */
uint8_t pe_mask; /* bits allowed in unit mask */
};
const struct amd_event_code_map amd_event_codes[] = {
#if __i386__
{ PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
{ PMC_EV_K7_DC_MISSES, 0x41, 0 },
{ PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, K7_PMC_UNITMASK_MOESI },
{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, K7_PMC_UNITMASK_MOESI },
{ PMC_EV_K7_DC_WRITEBACKS, 0x44, K7_PMC_UNITMASK_MOESI },
{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
{ PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
{ PMC_EV_K7_IC_FETCHES, 0x80, 0 },
{ PMC_EV_K7_IC_MISSES, 0x81, 0 },
{ PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
{ PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
{ PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
{ PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
{ PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
{ PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 }
#endif
#if __amd64__
{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
0x21, 0x00 },
{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
{ PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
{ PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
{ PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
{ PMC_EV_K8_DC_MISS, 0x41, 0x00 },
{ PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
{ PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
{ PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
{ PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
{ PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
{ PMC_EV_K8_IC_MISS, 0x81, 0x00 },
{ PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
{ PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
{ PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
{ PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
{ PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
0xCA, 0x00 },
{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
0xCC, 0x07 },
{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
{ PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
0xD2, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
0xD5, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
0xD6, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
0xD9, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
0xDA, 0x00 },
{ PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
0xE2, 0x00 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
{ PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
{ PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
#endif
};
const int amd_event_codes_size =
sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
/*
* read a pmc register
*/
static int
amd_read_pmc(int cpu, int ri, pmc_value_t *v)
{
enum pmc_mode mode;
const struct amd_descr *pd;
struct pmc *pm;
const struct pmc_hw *phw;
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &amd_pmcdesc[ri];
pm = phw->phw_pmc;
KASSERT(pm != NULL,
("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
cpu, ri));
mode = pm->pm_mode;
PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
/* Reading the TSC is a special case */
if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
KASSERT(PMC_IS_COUNTING_MODE(mode),
("[amd,%d] TSC counter in non-counting mode", __LINE__));
*v = rdtsc();
PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
return 0;
}
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
if (PMC_IS_SAMPLING_MODE(mode))
*v = -tmp;
else
*v = tmp;
PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
return 0;
}
/*
* Write a PMC MSR.
*/
static int
amd_write_pmc(int cpu, int ri, pmc_value_t v)
{
const struct amd_descr *pd;
struct pmc *pm;
const struct pmc_hw *phw;
enum pmc_mode mode;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &amd_pmcdesc[ri];
pm = phw->phw_pmc;
KASSERT(pm != NULL,
("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
cpu, ri));
mode = pm->pm_mode;
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
/* use 2's complement of the count for sampling mode PMCs */
if (PMC_IS_SAMPLING_MODE(mode))
v = -v;
PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
/* write the PMC value */
wrmsr(pd->pm_perfctr, v);
return 0;
}
/*
* configure hardware pmc according to the configuration recorded in
* pmc 'pm'.
*/
static int
amd_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
KASSERT(pm == NULL || phw->phw_pmc == NULL,
("[amd,%d] hwpmc not unconfigured before re-config", __LINE__));
phw->phw_pmc = pm;
return 0;
}
/*
* Machine dependent actions taken during the context switch in of a
* thread.
*/
static int
amd_switch_in(struct pmc_cpu *pc)
{
(void) pc;
/* enable the RDPMC instruction */
load_cr4(rcr4() | CR4_PCE);
return 0;
}
/*
* Machine dependent actions taken during the context switch out of a
* thread.
*/
static int
amd_switch_out(struct pmc_cpu *pc)
{
(void) pc;
/* disallow RDPMC instruction */
load_cr4(rcr4() & ~CR4_PCE);
return 0;
}
/*
* Check if a given allocation is feasible.
*/
static int
amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
int i;
uint32_t allowed_unitmask, caps, config, unitmask;
enum pmc_event pe;
const struct pmc_descr *pd;
(void) cpu;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row index %d", __LINE__, ri));
pd = &amd_pmcdesc[ri].pm_descr;
/* check class match */
if (pd->pd_class != pm->pm_class)
return EINVAL;
caps = pm->pm_caps;
PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
if ((pd->pd_caps & caps) != caps)
return EPERM;
if (pd->pd_class == PMC_CLASS_TSC) {
/* TSC's are always allocated in system-wide counting mode */
if (a->pm_ev != PMC_EV_TSC_TSC ||
a->pm_mode != PMC_MODE_SC)
return EINVAL;
return 0;
}
KASSERT(pd->pd_class == AMD_PMC_CLASS,
("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
pe = a->pm_ev;
/* map ev to the correct event mask code */
config = allowed_unitmask = 0;
for (i = 0; i < amd_event_codes_size; i++)
if (amd_event_codes[i].pe_ev == pe) {
config =
AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
allowed_unitmask =
AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
break;
}
if (i == amd_event_codes_size)
return EINVAL;
unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
return EINVAL;
if (unitmask && (caps & PMC_CAP_QUALIFIER))
config |= unitmask;
if (caps & PMC_CAP_THRESHOLD)
config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
/* set at least one of the 'usr' or 'os' caps */
if (caps & PMC_CAP_USER)
config |= AMD_PMC_USR;
if (caps & PMC_CAP_SYSTEM)
config |= AMD_PMC_OS;
if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
config |= (AMD_PMC_USR|AMD_PMC_OS);
if (caps & PMC_CAP_EDGE)
config |= AMD_PMC_EDGE;
if (caps & PMC_CAP_INVERT)
config |= AMD_PMC_INVERT;
if (caps & PMC_CAP_INTERRUPT)
config |= AMD_PMC_INT;
pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
return 0;
}
/*
* Release machine dependent state associated with a PMC. This is a
* no-op on this architecture.
*
*/
/* ARGSUSED0 */
static int
amd_release_pmc(int cpu, int ri, struct pmc *pmc)
{
#if DEBUG
const struct amd_descr *pd;
#endif
struct pmc_hw *phw;
(void) pmc;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
#if DEBUG
pd = &amd_pmcdesc[ri];
if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
("[amd,%d] PMC %d released while active", __LINE__, ri));
#endif
return 0;
}
/*
* start a PMC.
*/
static int
amd_start_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
const struct amd_descr *pd;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &amd_pmcdesc[ri];
KASSERT(pm != NULL,
("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
cpu, ri));
PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0; /* TSCs are always running */
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
ri, cpu, pd->pm_descr.pd_name));
/* turn on the PMC ENABLE bit */
config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
wrmsr(pd->pm_evsel, config);
return 0;
}
/*
* Stop a PMC.
*/
static int
amd_stop_pmc(int cpu, int ri)
{
struct pmc *pm;
struct pmc_hw *phw;
const struct amd_descr *pd;
uint64_t config;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &amd_pmcdesc[ri];
KASSERT(pm != NULL,
("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
cpu, ri));
/* can't stop a TSC */
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
__LINE__, ri, cpu, pd->pm_descr.pd_name));
PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
/* turn off the PMC ENABLE bit */
config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
wrmsr(pd->pm_evsel, config);
return 0;
}
/*
* Interrupt handler. This function needs to return '1' if the
* interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
* to sleep or do anything a 'fast' interrupt handler is not allowed
* to do.
*/
static int
amd_intr(int cpu, uintptr_t eip)
{
int i, retval;
enum pmc_mode mode;
uint32_t perfctr;
struct pmc *pm;
struct pmc_cpu *pc;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] out of range CPU %d", __LINE__, cpu));
retval = 0;
pc = pmc_pcpu[cpu];
/*
* look for all PMCs that have interrupted:
* - skip over the TSC [PMC#0]
* - look for a PMC with a valid 'struct pmc' association
* - look for a PMC in (a) sampling mode and (b) which has
* overflowed. If found, we update the process's
* histogram or send it a profiling signal by calling
* the appropriate helper function.
*/
for (i = 1; i < AMD_NPMCS; i++) {
phw = pc->pc_hwpmcs[i];
perfctr = amd_pmcdesc[i].pm_perfctr;
KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
if ((pm = phw->phw_pmc) == NULL ||
pm->pm_state != PMC_STATE_RUNNING) {
atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
continue;
}
mode = pm->pm_mode;
if (PMC_IS_SAMPLING_MODE(mode) &&
AMD_PMC_HAS_OVERFLOWED(perfctr)) {
atomic_add_int(&pmc_stats.pm_intr_processed, 1);
if (PMC_IS_SYSTEM_MODE(mode))
pmc_update_histogram(phw, eip);
else if (PMC_IS_VIRTUAL_MODE(mode))
pmc_send_signal(pm);
retval = 1;
}
}
return retval;
}
/*
* describe a PMC
*/
static int
amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
{
int error;
size_t copied;
const struct amd_descr *pd;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] row-index %d out of range", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &amd_pmcdesc[ri];
if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
PMC_NAME_MAX, &copied)) != 0)
return error;
pi->pm_class = pd->pm_descr.pd_class;
pi->pm_caps = pd->pm_descr.pd_caps;
pi->pm_width = pd->pm_descr.pd_width;
if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
pi->pm_enabled = TRUE;
*ppmc = phw->phw_pmc;
} else {
pi->pm_enabled = FALSE;
*ppmc = NULL;
}
return 0;
}
/*
* i386 specific entry points
*/
/*
* return the MSR address of the given PMC.
*/
static int
amd_get_msr(int ri, uint32_t *msr)
{
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] ri %d out of range", __LINE__, ri));
*msr = amd_pmcdesc[ri].pm_perfctr;
return 0;
}
/*
* processor dependent initialization.
*/
/*
* Per-processor data structure
*
* [common stuff]
* [5 struct pmc_hw pointers]
* [5 struct pmc_hw structures]
*/
struct amd_cpu {
struct pmc_cpu pc_common;
struct pmc_hw *pc_hwpmcs[AMD_NPMCS];
struct pmc_hw pc_amdpmcs[AMD_NPMCS];
};
static int
amd_init(int cpu)
{
int n;
struct amd_cpu *pcs;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] insane cpu number %d", __LINE__, cpu));
PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
M_WAITOK|M_ZERO);
if (pcs == NULL)
return ENOMEM;
phw = &pcs->pc_amdpmcs[0];
/*
* Initialize the per-cpu mutex and set the content of the
* hardware descriptors to a known state.
*/
for (n = 0; n < AMD_NPMCS; n++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
phw->phw_pmc = NULL;
pcs->pc_hwpmcs[n] = phw;
}
/* Mark the TSC as shareable */
pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
return 0;
}
/*
* processor dependent cleanup prior to the KLD
* being unloaded
*/
static int
amd_cleanup(int cpu)
{
int i;
uint32_t evsel;
struct pmc_cpu *pcs;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
/*
* First, turn off all PMCs on this CPU.
*/
for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
evsel &= ~AMD_PMC_ENABLE;
wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
}
/*
* Next, free up allocated space.
*/
pcs = pmc_pcpu[cpu];
#if DEBUG
/* check the TSC */
KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
for (i = 1; i < AMD_NPMCS; i++) {
KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
}
#endif
KASSERT(pcs != NULL,
("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
pmc_pcpu[cpu] = NULL;
FREE(pcs, M_PMC);
return 0;
}
/*
* Initialize ourselves.
*/
struct pmc_mdep *
pmc_amd_initialize(void)
{
struct pmc_mdep *pmc_mdep;
/* The presence of hardware performance counters on the AMD
Athlon, Duron or later processors, is _not_ indicated by
any of the processor feature flags set by the 'CPUID'
instruction, so we only check the 'instruction family'
field returned by CPUID for instruction family >= 6. This
test needs to be be refined. */
if ((cpu_id & 0xF00) < 0x600)
return NULL;
MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
M_PMC, M_WAITOK|M_ZERO);
#if __i386__
pmc_mdep->pmd_cputype = PMC_CPU_AMD_K7;
#elif __amd64__
pmc_mdep->pmd_cputype = PMC_CPU_AMD_K8;
#else
#error Unknown AMD CPU type.
#endif
pmc_mdep->pmd_npmc = AMD_NPMCS;
/* this processor has two classes of usable PMCs */
pmc_mdep->pmd_nclass = 2;
pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
pmc_mdep->pmd_classes[1] = AMD_PMC_CLASS;
pmc_mdep->pmd_nclasspmcs[0] = 1;
pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
pmc_mdep->pmd_init = amd_init;
pmc_mdep->pmd_cleanup = amd_cleanup;
pmc_mdep->pmd_switch_in = amd_switch_in;
pmc_mdep->pmd_switch_out = amd_switch_out;
pmc_mdep->pmd_read_pmc = amd_read_pmc;
pmc_mdep->pmd_write_pmc = amd_write_pmc;
pmc_mdep->pmd_config_pmc = amd_config_pmc;
pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
pmc_mdep->pmd_release_pmc = amd_release_pmc;
pmc_mdep->pmd_start_pmc = amd_start_pmc;
pmc_mdep->pmd_stop_pmc = amd_stop_pmc;
pmc_mdep->pmd_intr = amd_intr;
pmc_mdep->pmd_describe = amd_describe;
pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */
PMCDBG(MDP,INI,0,"%s","amd-initialize");
return pmc_mdep;
}

142
sys/dev/hwpmc/hwpmc_intel.c Normal file
View File

@ -0,0 +1,142 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
struct pmc_mdep *
pmc_intel_initialize(void)
{
struct pmc_mdep *pmc_mdep;
enum pmc_cputype cputype;
int error, model;
KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
("[intel,%d] Initializing non-intel processor", __LINE__));
PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id);
cputype = -1;
switch (cpu_id & 0xF00) {
case 0x500: /* Pentium family processors */
cputype = PMC_CPU_INTEL_P5;
break;
case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
switch ((cpu_id & 0xF0) >> 4) { /* model number field */
case 0x1:
cputype = PMC_CPU_INTEL_P6;
break;
case 0x3: case 0x5:
cputype = PMC_CPU_INTEL_PII;
break;
case 0x6:
cputype = PMC_CPU_INTEL_CL;
break;
case 0x7: case 0x8: case 0xA: case 0xB:
cputype = PMC_CPU_INTEL_PIII;
break;
case 0x9: case 0xD:
cputype = PMC_CPU_INTEL_PM;
break;
}
break;
case 0xF00: /* P4 */
model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
if (model >= 0 && model <= 3) /* known models */
cputype = PMC_CPU_INTEL_PIV;
break;
}
if ((int) cputype == -1) {
printf("pmc: Unknown Intel CPU.\n");
return NULL;
}
MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
M_PMC, M_WAITOK|M_ZERO);
pmc_mdep->pmd_cputype = cputype;
pmc_mdep->pmd_nclass = 2;
pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
pmc_mdep->pmd_nclasspmcs[0] = 1;
error = 0;
switch (cputype) {
/*
* Intel Pentium 4 Processors
*/
case PMC_CPU_INTEL_PIV:
error = pmc_initialize_p4(pmc_mdep);
break;
/*
* P6 Family Processors
*/
case PMC_CPU_INTEL_P6:
case PMC_CPU_INTEL_CL:
case PMC_CPU_INTEL_PII:
case PMC_CPU_INTEL_PIII:
case PMC_CPU_INTEL_PM:
error = pmc_initialize_p6(pmc_mdep);
break;
/*
* Intel Pentium PMCs.
*/
case PMC_CPU_INTEL_P5:
error = pmc_initialize_p5(pmc_mdep);
break;
default:
KASSERT(0,("[intel,%d] Unknown CPU type", __LINE__));
}
if (error) {
FREE(pmc_mdep, M_PMC);
pmc_mdep = NULL;
}
return pmc_mdep;
}

3671
sys/dev/hwpmc/hwpmc_mod.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
/*
* Intel Pentium PMCs
*/
int
pmc_initialize_p5(struct pmc_mdep *pmc_mdep)
{
(void) pmc_mdep;
return ENOSYS; /* nothing here yet */
}

1484
sys/dev/hwpmc/hwpmc_piv.c Normal file

File diff suppressed because it is too large Load Diff

742
sys/dev/hwpmc/hwpmc_ppro.c Normal file
View File

@ -0,0 +1,742 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
/*
* PENTIUM PRO SUPPORT
*/
struct p6pmc_descr {
struct pmc_descr pm_descr; /* common information */
uint32_t pm_pmc_msr;
uint32_t pm_evsel_msr;
};
static struct p6pmc_descr p6_pmcdesc[P6_NPMCS] = {
/* TSC */
{
.pm_descr =
{
.pd_name = "TSC",
.pd_class = PMC_CLASS_TSC,
.pd_caps = PMC_CAP_READ,
.pd_width = 64
},
.pm_pmc_msr = 0x10,
.pm_evsel_msr = ~0
},
#define P6_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
/* PMC 0 */
{
.pm_descr =
{
.pd_name ="P6-0",
.pd_class = PMC_CLASS_P6,
.pd_caps = P6_PMC_CAPS,
.pd_width = 40
},
.pm_pmc_msr = P6_MSR_PERFCTR0,
.pm_evsel_msr = P6_MSR_EVSEL0
},
/* PMC 1 */
{
.pm_descr =
{
.pd_name ="P6-1",
.pd_class = PMC_CLASS_P6,
.pd_caps = P6_PMC_CAPS,
.pd_width = 40
},
.pm_pmc_msr = P6_MSR_PERFCTR1,
.pm_evsel_msr = P6_MSR_EVSEL1
}
};
static enum pmc_cputype p6_cputype;
/*
* P6 Event descriptor
*/
struct p6_event_descr {
const enum pmc_event pm_event;
uint32_t pm_evsel;
uint32_t pm_flags;
uint32_t pm_unitmask;
};
static const struct p6_event_descr p6_events[] = {
#define P6_EVDESCR(NAME, EVSEL, FLAGS, UMASK) \
{ \
.pm_event = PMC_EV_P6_##NAME, \
.pm_evsel = (EVSEL), \
.pm_flags = (FLAGS), \
.pm_unitmask = (UMASK) \
}
#define P6F_P6 (1 << PMC_CPU_INTEL_P6)
#define P6F_CL (1 << PMC_CPU_INTEL_CL)
#define P6F_PII (1 << PMC_CPU_INTEL_PII)
#define P6F_PIII (1 << PMC_CPU_INTEL_PIII)
#define P6F_PM (1 << PMC_CPU_INTEL_PM)
#define P6F_CTR0 0x0001
#define P6F_CTR1 0x0002
#define P6F_ALL_CPUS (P6F_P6 | P6F_PII | P6F_CL | P6F_PIII | P6F_PM)
#define P6F_ALL_CTRS (P6F_CTR0 | P6F_CTR1)
#define P6F_ALL (P6F_ALL_CPUS | P6F_ALL_CTRS)
#define P6_EVENT_VALID_FOR_CPU(P,CPU) ((P)->pm_flags & (1 << (CPU)))
#define P6_EVENT_VALID_FOR_CTR(P,CTR) ((P)->pm_flags & (1 << (CTR)))
P6_EVDESCR(DATA_MEM_REFS, 0x43, P6F_ALL, 0x00),
P6_EVDESCR(DCU_LINES_IN, 0x45, P6F_ALL, 0x00),
P6_EVDESCR(DCU_M_LINES_IN, 0x46, P6F_ALL, 0x00),
P6_EVDESCR(DCU_M_LINES_OUT, 0x47, P6F_ALL, 0x00),
P6_EVDESCR(DCU_MISS_OUTSTANDING, 0x47, P6F_ALL, 0x00),
P6_EVDESCR(IFU_FETCH, 0x80, P6F_ALL, 0x00),
P6_EVDESCR(IFU_FETCH_MISS, 0x81, P6F_ALL, 0x00),
P6_EVDESCR(ITLB_MISS, 0x85, P6F_ALL, 0x00),
P6_EVDESCR(IFU_MEM_STALL, 0x86, P6F_ALL, 0x00),
P6_EVDESCR(ILD_STALL, 0x87, P6F_ALL, 0x00),
P6_EVDESCR(L2_IFETCH, 0x28, P6F_ALL, 0x0F),
P6_EVDESCR(L2_LD, 0x29, P6F_ALL, 0x0F),
P6_EVDESCR(L2_ST, 0x2A, P6F_ALL, 0x0F),
P6_EVDESCR(L2_LINES_IN, 0x24, P6F_ALL, 0x0F),
P6_EVDESCR(L2_LINES_OUT, 0x26, P6F_ALL, 0x0F),
P6_EVDESCR(L2_M_LINES_INM, 0x25, P6F_ALL, 0x00),
P6_EVDESCR(L2_M_LINES_OUTM, 0x27, P6F_ALL, 0x0F),
P6_EVDESCR(L2_RQSTS, 0x2E, P6F_ALL, 0x0F),
P6_EVDESCR(L2_ADS, 0x21, P6F_ALL, 0x00),
P6_EVDESCR(L2_DBUS_BUSY, 0x22, P6F_ALL, 0x00),
P6_EVDESCR(L2_DBUS_BUSY_RD, 0x23, P6F_ALL, 0x00),
P6_EVDESCR(BUS_DRDY_CLOCKS, 0x62, P6F_ALL, 0x20),
P6_EVDESCR(BUS_LOCK_CLOCKS, 0x63, P6F_ALL, 0x20),
P6_EVDESCR(BUS_REQ_OUTSTANDING, 0x60, P6F_ALL, 0x00),
P6_EVDESCR(BUS_TRAN_BRD, 0x65, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_RFO, 0x66, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRANS_WB, 0x67, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_IFETCH, 0x68, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_INVAL, 0x69, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_PWR, 0x6A, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRANS_P, 0x6B, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRANS_IO, 0x6C, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_DEF, 0x6D, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_BURST, 0x6E, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_ANY, 0x70, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_MEM, 0x6F, P6F_ALL, 0x20),
P6_EVDESCR(BUS_DATA_RCV, 0x64, P6F_ALL, 0x00),
P6_EVDESCR(BUS_BNR_DRV, 0x61, P6F_ALL, 0x00),
P6_EVDESCR(BUS_HIT_DRV, 0x7A, P6F_ALL, 0x00),
P6_EVDESCR(BUS_HITM_DRV, 0x7B, P6F_ALL, 0x00),
P6_EVDESCR(BUS_SNOOP_STALL, 0x7E, P6F_ALL, 0x00),
P6_EVDESCR(FLOPS, 0xC1, P6F_ALL_CPUS | P6F_CTR0, 0x00),
P6_EVDESCR(FP_COMPS_OPS_EXE, 0x10, P6F_ALL_CPUS | P6F_CTR0, 0x00),
P6_EVDESCR(FP_ASSIST, 0x11, P6F_ALL_CPUS | P6F_CTR1, 0x00),
P6_EVDESCR(MUL, 0x12, P6F_ALL_CPUS | P6F_CTR1, 0x00),
P6_EVDESCR(DIV, 0x13, P6F_ALL_CPUS | P6F_CTR1, 0x00),
P6_EVDESCR(CYCLES_DIV_BUSY, 0x14, P6F_ALL_CPUS | P6F_CTR0, 0x00),
P6_EVDESCR(LD_BLOCKS, 0x03, P6F_ALL, 0x00),
P6_EVDESCR(SB_DRAINS, 0x04, P6F_ALL, 0x00),
P6_EVDESCR(MISALIGN_MEM_REF, 0x05, P6F_ALL, 0x00),
P6_EVDESCR(EMON_KNI_PREF_DISPATCHED, 0x07, P6F_PIII | P6F_ALL_CTRS, 0x03),
P6_EVDESCR(EMON_KNI_PREF_MISS, 0x4B, P6F_PIII | P6F_ALL_CTRS, 0x03),
P6_EVDESCR(INST_RETIRED, 0xC0, P6F_ALL, 0x00),
P6_EVDESCR(UOPS_RETIRED, 0xC2, P6F_ALL, 0x00),
P6_EVDESCR(INST_DECODED, 0xD0, P6F_ALL, 0x00),
P6_EVDESCR(EMON_KNI_INST_RETIRED, 0xD8, P6F_PIII | P6F_ALL_CTRS, 0x01),
P6_EVDESCR(EMON_KNI_COMP_INST_RET, 0xD9, P6F_PIII | P6F_ALL_CTRS, 0x01),
P6_EVDESCR(HW_INT_RX, 0xC8, P6F_ALL, 0x00),
P6_EVDESCR(CYCLES_INT_MASKED, 0xC6, P6F_ALL, 0x00),
P6_EVDESCR(CYCLES_INT_PENDING_AND_MASKED, 0xC7, P6F_ALL, 0x00),
P6_EVDESCR(BR_INST_RETIRED, 0xC4, P6F_ALL, 0x00),
P6_EVDESCR(BR_MISS_PRED_RETIRED, 0xC5, P6F_ALL, 0x00),
P6_EVDESCR(BR_TAKEN_RETIRED, 0xC9, P6F_ALL, 0x00),
P6_EVDESCR(BR_MISS_PRED_TAKEN_RET, 0xCA, P6F_ALL, 0x00),
P6_EVDESCR(BR_INST_DECODED, 0xE0, P6F_ALL, 0x00),
P6_EVDESCR(BTB_MISSES, 0xE2, P6F_ALL, 0x00),
P6_EVDESCR(BR_BOGUS, 0xE4, P6F_ALL, 0x00),
P6_EVDESCR(BACLEARS, 0xE6, P6F_ALL, 0x00),
P6_EVDESCR(RESOURCE_STALLS, 0xA2, P6F_ALL, 0x00),
P6_EVDESCR(PARTIAL_RAT_STALLS, 0xD2, P6F_ALL, 0x00),
P6_EVDESCR(SEGMENT_REG_LOADS, 0x06, P6F_ALL, 0x00),
P6_EVDESCR(CPU_CLK_UNHALTED, 0x79, P6F_ALL, 0x00),
P6_EVDESCR(MMX_INSTR_EXEC, 0xB0,
P6F_ALL_CTRS | P6F_CL | P6F_PII, 0x00),
P6_EVDESCR(MMX_SAT_INSTR_EXEC, 0xB1,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
P6_EVDESCR(MMX_UOPS_EXEC, 0xB2,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
P6_EVDESCR(MMX_INSTR_TYPE_EXEC, 0xB3,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x3F),
P6_EVDESCR(FP_MMX_TRANS, 0xCC,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x01),
P6_EVDESCR(MMX_ASSIST, 0xCD,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
P6_EVDESCR(MMX_INSTR_RET, 0xCE, P6F_ALL_CTRS | P6F_PII, 0x00),
P6_EVDESCR(SEG_RENAME_STALLS, 0xD4,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
P6_EVDESCR(SEG_REG_RENAMES, 0xD5,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
P6_EVDESCR(RET_SEG_RENAMES, 0xD6,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
P6_EVDESCR(EMON_EST_TRANS, 0x58, P6F_ALL_CTRS | P6F_PM, 0x02),
P6_EVDESCR(EMON_THERMAL_TRIP, 0x59, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_INST_EXEC, 0x88, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_MISSP_EXEC, 0x89, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_BAC_MISSP_EXEC, 0x8A, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CND_EXEC, 0x8B, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CND_MISSP_EXEC, 0x8C, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_IND_EXEC, 0x8D, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_IND_MISSP_EXEC, 0x8E, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_RET_EXEC, 0x8F, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_RET_MISSP_EXEC, 0x90, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_RET_BAC_MISSP_EXEC, 0x91, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CALL_EXEC, 0x92, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CALL_MISSP_EXEC, 0x93, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_IND_CALL_EXEC, 0x94, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_SIMD_INSTR_RETIRED, 0xCE, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_SYNCH_UOPS, 0xD3, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_ESP_UOPS, 0xD7, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_FUSED_UOPS_RET, 0xDA, P6F_ALL_CTRS | P6F_PM, 0x03),
P6_EVDESCR(EMON_UNFUSION, 0xDB, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_PREF_RQSTS_UP, 0xF0, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_PREF_RQSTS_DN, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_SSE_SSE2_INST_RETIRED, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x03),
P6_EVDESCR(EMON_SSE_SSE2_COMP_INST_RETIRED, 0xD9, P6F_ALL_CTRS | P6F_PM, 0x03)
#undef P6_EVDESCR
};
#define P6_NEVENTS (PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1)
static const struct p6_event_descr *
p6_find_event(enum pmc_event ev)
{
int n;
for (n = 0; n < P6_NEVENTS; n++)
if (p6_events[n].pm_event == ev)
break;
if (n == P6_NEVENTS)
return NULL;
return &p6_events[n];
}
/*
* Per-CPU data structure for P6 class CPUs
*
* [common stuff]
* [3 struct pmc_hw pointers]
* [3 struct pmc_hw structures]
*/
struct p6_cpu {
struct pmc_cpu pc_common;
struct pmc_hw *pc_hwpmcs[P6_NPMCS];
struct pmc_hw pc_p6pmcs[P6_NPMCS];
};
static int
p6_init(int cpu)
{
int n;
struct p6_cpu *pcs;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] bad cpu %d", __LINE__, cpu));
PMCDBG(MDP,INI,0,"p6-init cpu=%d", cpu);
MALLOC(pcs, struct p6_cpu *, sizeof(struct p6_cpu), M_PMC,
M_WAITOK|M_ZERO);
if (pcs == NULL)
return ENOMEM;
phw = pcs->pc_p6pmcs;
for (n = 0; n < P6_NPMCS; n++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
phw->phw_pmc = NULL;
pcs->pc_hwpmcs[n] = phw;
}
/* Mark the TSC as shareable */
pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
return 0;
}
static int
p6_cleanup(int cpu)
{
struct pmc_cpu *pcs;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] bad cpu %d", __LINE__, cpu));
PMCDBG(MDP,INI,0,"p6-cleanup cpu=%d", cpu);
if ((pcs = pmc_pcpu[cpu]) != NULL)
FREE(pcs, M_PMC);
pmc_pcpu[cpu] = NULL;
return 0;
}
static int
p6_switch_in(struct pmc_cpu *pc)
{
(void) pc;
return 0;
}
static int
p6_switch_out(struct pmc_cpu *pc)
{
(void) pc;
return 0;
}
static int
p6_read_pmc(int cpu, int ri, pmc_value_t *v)
{
struct pmc_hw *phw;
struct pmc *pm;
struct p6pmc_descr *pd;
pmc_value_t tmp;
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
tmp = rdmsr(pd->pm_pmc_msr) & P6_PERFCTR_MASK;
if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
*v = -tmp;
else
*v = tmp;
PMCDBG(MDP,REA,1, "p6-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
pd->pm_pmc_msr, *v);
return 0;
}
static int
p6_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct pmc_hw *phw;
struct pmc *pm;
struct p6pmc_descr *pd;
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
PMCDBG(MDP,WRI,1, "p6-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
pd->pm_pmc_msr, v);
if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
v = -v;
wrmsr(pd->pm_pmc_msr, v & P6_PERFCTR_MASK);
return 0;
}
static int
p6_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
PMCDBG(MDP,CFG,1, "p6-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
phw->phw_pmc = pm;
return 0;
}
/*
* A pmc may be allocated to a given row index if:
* - the event is valid for this CPU
* - the event is valid for this counter index
*/
static int
p6_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
uint32_t allowed_unitmask, caps, config, unitmask;
const struct p6pmc_descr *pd;
const struct p6_event_descr *pevent;
enum pmc_event ev;
(void) cpu;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p4,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p4,%d] illegal row-index value %d", __LINE__, ri));
pd = &p6_pmcdesc[ri];
PMCDBG(MDP,ALL,1, "p6-allocate ri=%d class=%d pmccaps=0x%x "
"reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
pm->pm_caps);
/* check class */
if (pd->pm_descr.pd_class != pm->pm_class)
return EINVAL;
/* check requested capabilities */
caps = a->pm_caps;
if ((pd->pm_descr.pd_caps & caps) != caps)
return EPERM;
if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
/* TSC's are always allocated in system-wide counting mode */
if (a->pm_ev != PMC_EV_TSC_TSC ||
a->pm_mode != PMC_MODE_SC)
return EINVAL;
return 0;
}
/*
* P6 class events
*/
ev = pm->pm_event;
if (ev < PMC_EV_P6_FIRST || ev > PMC_EV_P6_LAST)
return EINVAL;
if ((pevent = p6_find_event(ev)) == NULL)
return ESRCH;
if (!P6_EVENT_VALID_FOR_CPU(pevent, p6_cputype) ||
!P6_EVENT_VALID_FOR_CTR(pevent, (ri-1)))
return EINVAL;
/* For certain events, Pentium M differs from the stock P6 */
allowed_unitmask = 0;
if (p6_cputype == PMC_CPU_INTEL_PM) {
if (ev == PMC_EV_P6_L2_LD || ev == PMC_EV_P6_L2_LINES_IN ||
ev == PMC_EV_P6_L2_LINES_OUT)
allowed_unitmask = P6_EVSEL_TO_UMASK(0x3F);
else if (ev == PMC_EV_P6_L2_M_LINES_OUTM)
allowed_unitmask = P6_EVSEL_TO_UMASK(0x30);
} else
allowed_unitmask = P6_EVSEL_TO_UMASK(pevent->pm_unitmask);
unitmask = a->pm_p6_config & P6_EVSEL_UMASK_MASK;
if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
return EINVAL;
if (ev == PMC_EV_P6_MMX_UOPS_EXEC) /* hardcoded mask */
unitmask = P6_EVSEL_TO_UMASK(0x0F);
config = 0;
config |= P6_EVSEL_EVENT_SELECT(pevent->pm_evsel);
if (unitmask & (caps & PMC_CAP_QUALIFIER))
config |= unitmask;
if (caps & PMC_CAP_THRESHOLD)
config |= a->pm_p6_config & P6_EVSEL_CMASK_MASK;
/* set at least one of the 'usr' or 'os' caps */
if (caps & PMC_CAP_USER)
config |= P6_EVSEL_USR;
if (caps & PMC_CAP_SYSTEM)
config |= P6_EVSEL_OS;
if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
config |= (P6_EVSEL_USR|P6_EVSEL_OS);
if (caps & PMC_CAP_EDGE)
config |= P6_EVSEL_E;
if (caps & PMC_CAP_INVERT)
config |= P6_EVSEL_INV;
if (caps & PMC_CAP_INTERRUPT)
config |= P6_EVSEL_INT;
pm->pm_md.pm_p6.pm_p6_evsel = config;
PMCDBG(MDP,ALL,2, "p6-allocate config=0x%x", config);
return 0;
}
static int
p6_release_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
(void) pm;
PMCDBG(MDP,REL,1, "p6-release cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[p6,%d] PHW pmc %p != pmc %p", __LINE__, phw->phw_pmc, pm));
return 0;
}
static int
p6_start_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
const struct p6pmc_descr *pd;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] starting cpu%d,ri%d with no pmc configured",
__LINE__, cpu, ri));
PMCDBG(MDP,STA,1, "p6-start cpu=%d ri=%d", cpu, ri);
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0; /* TSC are always running */
KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
("[p6,%d] unknown PMC class %d", __LINE__,
pd->pm_descr.pd_class));
config = pm->pm_md.pm_p6.pm_p6_evsel;
PMCDBG(MDP,STA,2, "p6-start/2 cpu=%d ri=%d evselmsr=0x%x config=0x%x",
cpu, ri, pd->pm_evsel_msr, config);
if (pd->pm_evsel_msr == P6_MSR_EVSEL0) /* CTR 0 */
wrmsr(pd->pm_evsel_msr, config | P6_EVSEL_EN);
else { /* CTR1 shares the enable bit CTR 0 */
wrmsr(pd->pm_evsel_msr, config);
wrmsr(P6_MSR_EVSEL0, rdmsr(P6_MSR_EVSEL0) | P6_EVSEL_EN);
}
return 0;
}
static int
p6_stop_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
struct p6pmc_descr *pd;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d] illegal row index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
cpu, ri));
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
("[p6,%d] unknown PMC class %d", __LINE__,
pd->pm_descr.pd_class));
PMCDBG(MDP,STO,1, "p6-stop cpu=%d ri=%d", cpu, ri);
/*
* If CTR0 is being turned off but CTR1 is active, we need
* leave CTR0's EN field set. If CTR1 is being stopped, it
* suffices to zero its EVSEL register.
*/
if (ri == 1 &&
pmc_pcpu[cpu]->pc_hwpmcs[2]->phw_pmc != NULL)
config = P6_EVSEL_EN;
else
config = 0;
wrmsr(pd->pm_evsel_msr, config);
PMCDBG(MDP,STO,2, "p6-stop/2 cpu=%d ri=%d config=0x%x", cpu, ri,
config);
return 0;
}
static int
p6_intr(int cpu, uintptr_t eip)
{
(void) cpu;
(void) eip;
return 0;
}
static int
p6_describe(int cpu, int ri, struct pmc_info *pi,
struct pmc **ppmc)
{
int error;
size_t copied;
struct pmc_hw *phw;
struct p6pmc_descr *pd;
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &p6_pmcdesc[ri];
if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
PMC_NAME_MAX, &copied)) != 0)
return error;
pi->pm_class = pd->pm_descr.pd_class;
pi->pm_caps = pd->pm_descr.pd_caps;
pi->pm_width = pd->pm_descr.pd_width;
if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
pi->pm_enabled = TRUE;
*ppmc = phw->phw_pmc;
} else {
pi->pm_enabled = FALSE;
*ppmc = NULL;
}
return 0;
}
static int
p6_get_msr(int ri, uint32_t *msr)
{
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d ri %d out of range", __LINE__, ri));
*msr = p6_pmcdesc[ri].pm_pmc_msr;
return 0;
}
int
pmc_initialize_p6(struct pmc_mdep *pmc_mdep)
{
KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
("[p6,%d] Initializing non-intel processor", __LINE__));
PMCDBG(MDP,INI,1, "%s", "p6-initialize");
switch (pmc_mdep->pmd_cputype) {
/*
* P6 Family Processors
*/
case PMC_CPU_INTEL_P6:
case PMC_CPU_INTEL_CL:
case PMC_CPU_INTEL_PII:
case PMC_CPU_INTEL_PIII:
case PMC_CPU_INTEL_PM:
p6_cputype = pmc_mdep->pmd_cputype;
pmc_mdep->pmd_npmc = P6_NPMCS;
pmc_mdep->pmd_classes[1] = PMC_CLASS_P6;
pmc_mdep->pmd_nclasspmcs[1] = 2;
pmc_mdep->pmd_init = p6_init;
pmc_mdep->pmd_cleanup = p6_cleanup;
pmc_mdep->pmd_switch_in = p6_switch_in;
pmc_mdep->pmd_switch_out = p6_switch_out;
pmc_mdep->pmd_read_pmc = p6_read_pmc;
pmc_mdep->pmd_write_pmc = p6_write_pmc;
pmc_mdep->pmd_config_pmc = p6_config_pmc;
pmc_mdep->pmd_allocate_pmc = p6_allocate_pmc;
pmc_mdep->pmd_release_pmc = p6_release_pmc;
pmc_mdep->pmd_start_pmc = p6_start_pmc;
pmc_mdep->pmd_stop_pmc = p6_stop_pmc;
pmc_mdep->pmd_intr = p6_intr;
pmc_mdep->pmd_describe = p6_describe;
pmc_mdep->pmd_get_msr = p6_get_msr; /* i386 */
break;
default:
KASSERT(0,("[p6,%d] Unknown CPU type", __LINE__));
return ENOSYS;
}
return 0;
}

996
sys/hwpmc/hwpmc_amd.c Normal file
View File

@ -0,0 +1,996 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Support for the AMD K7 and later processors */
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
/* AMD K7 and K8 PMCs */
#define AMD_PMC_EVSEL_0 0xC0010000
#define AMD_PMC_EVSEL_1 0xC0010001
#define AMD_PMC_EVSEL_2 0xC0010002
#define AMD_PMC_EVSEL_3 0xC0010003
#define AMD_PMC_PERFCTR_0 0xC0010004
#define AMD_PMC_PERFCTR_1 0xC0010005
#define AMD_PMC_PERFCTR_2 0xC0010006
#define AMD_PMC_PERFCTR_3 0xC0010007
#define K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) || \
((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) || \
((c) >= 0xCD && (c) <= 0xCF))
#define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
/* reserved bits include bit 21 and the top two bits of the unit mask */
#define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
#define K8_PMC_RESERVED (1 << 21)
#define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
#define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
#if __i386__
#define AMD_NPMCS K7_NPMCS
#define AMD_PMC_CLASS PMC_CLASS_K7
#define AMD_PMC_COUNTERMASK K7_PMC_COUNTERMASK
#define AMD_PMC_TO_COUNTER(x) K7_PMC_TO_COUNTER(x)
#define AMD_PMC_INVERT K7_PMC_INVERT
#define AMD_PMC_ENABLE K7_PMC_ENABLE
#define AMD_PMC_INT K7_PMC_INT
#define AMD_PMC_PC K7_PMC_PC
#define AMD_PMC_EDGE K7_PMC_EDGE
#define AMD_PMC_OS K7_PMC_OS
#define AMD_PMC_USR K7_PMC_USR
#define AMD_PMC_UNITMASK_M K7_PMC_UNITMASK_M
#define AMD_PMC_UNITMASK_O K7_PMC_UNITMASK_O
#define AMD_PMC_UNITMASK_E K7_PMC_UNITMASK_E
#define AMD_PMC_UNITMASK_S K7_PMC_UNITMASK_S
#define AMD_PMC_UNITMASK_I K7_PMC_UNITMASK_I
#define AMD_PMC_UNITMASK K7_PMC_UNITMASK
#define AMD_PMC_EVENTMASK K7_PMC_EVENTMASK
#define AMD_PMC_TO_UNITMASK(x) K7_PMC_TO_UNITMASK(x)
#define AMD_PMC_TO_EVENTMASK(x) K7_PMC_TO_EVENTMASK(x)
#define AMD_VALID_BITS K7_VALID_BITS
#define AMD_PMC_CLASS_NAME "K7-"
#elif __amd64__
#define AMD_NPMCS K8_NPMCS
#define AMD_PMC_CLASS PMC_CLASS_K8
#define AMD_PMC_COUNTERMASK K8_PMC_COUNTERMASK
#define AMD_PMC_TO_COUNTER(x) K8_PMC_TO_COUNTER(x)
#define AMD_PMC_INVERT K8_PMC_INVERT
#define AMD_PMC_ENABLE K8_PMC_ENABLE
#define AMD_PMC_INT K8_PMC_INT
#define AMD_PMC_PC K8_PMC_PC
#define AMD_PMC_EDGE K8_PMC_EDGE
#define AMD_PMC_OS K8_PMC_OS
#define AMD_PMC_USR K8_PMC_USR
#define AMD_PMC_UNITMASK_M K8_PMC_UNITMASK_M
#define AMD_PMC_UNITMASK_O K8_PMC_UNITMASK_O
#define AMD_PMC_UNITMASK_E K8_PMC_UNITMASK_E
#define AMD_PMC_UNITMASK_S K8_PMC_UNITMASK_S
#define AMD_PMC_UNITMASK_I K8_PMC_UNITMASK_I
#define AMD_PMC_UNITMASK K8_PMC_UNITMASK
#define AMD_PMC_EVENTMASK K8_PMC_EVENTMASK
#define AMD_PMC_TO_UNITMASK(x) K8_PMC_TO_UNITMASK(x)
#define AMD_PMC_TO_EVENTMASK(x) K8_PMC_TO_EVENTMASK(x)
#define AMD_VALID_BITS K8_VALID_BITS
#define AMD_PMC_CLASS_NAME "K8-"
#else
#error Unsupported architecture.
#endif
/* AMD K7 & K8 PMCs */
struct amd_descr {
struct pmc_descr pm_descr; /* "base class" */
uint32_t pm_evsel; /* address of EVSEL register */
uint32_t pm_perfctr; /* address of PERFCTR register */
};
static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
{
{
.pm_descr =
{
.pd_name = "TSC",
.pd_class = PMC_CLASS_TSC,
.pd_caps = PMC_CAP_READ,
.pd_width = 64
},
.pm_evsel = MSR_TSC,
.pm_perfctr = 0 /* unused */
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "0",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_0,
.pm_perfctr = AMD_PMC_PERFCTR_0
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "1",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_1,
.pm_perfctr = AMD_PMC_PERFCTR_1
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "2",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_2,
.pm_perfctr = AMD_PMC_PERFCTR_2
},
{
.pm_descr =
{
.pd_name = AMD_PMC_CLASS_NAME "3",
.pd_class = AMD_PMC_CLASS,
.pd_caps = AMD_PMC_CAPS,
.pd_width = 48
},
.pm_evsel = AMD_PMC_EVSEL_3,
.pm_perfctr = AMD_PMC_PERFCTR_3
}
};
struct amd_event_code_map {
enum pmc_event pe_ev; /* enum value */
uint8_t pe_code; /* encoded event mask */
uint8_t pe_mask; /* bits allowed in unit mask */
};
const struct amd_event_code_map amd_event_codes[] = {
#if __i386__
{ PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
{ PMC_EV_K7_DC_MISSES, 0x41, 0 },
{ PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, K7_PMC_UNITMASK_MOESI },
{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, K7_PMC_UNITMASK_MOESI },
{ PMC_EV_K7_DC_WRITEBACKS, 0x44, K7_PMC_UNITMASK_MOESI },
{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
{ PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
{ PMC_EV_K7_IC_FETCHES, 0x80, 0 },
{ PMC_EV_K7_IC_MISSES, 0x81, 0 },
{ PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
{ PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
{ PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
{ PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
{ PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
{ PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 }
#endif
#if __amd64__
{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
0x21, 0x00 },
{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
{ PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
{ PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
{ PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
{ PMC_EV_K8_DC_MISS, 0x41, 0x00 },
{ PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
{ PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
{ PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
{ PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
{ PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
{ PMC_EV_K8_IC_MISS, 0x81, 0x00 },
{ PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
{ PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
{ PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
{ PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
{ PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
0xCA, 0x00 },
{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
0xCC, 0x07 },
{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
{ PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
0xD2, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
0xD5, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
0xD6, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
0xD9, 0x00 },
{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
0xDA, 0x00 },
{ PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
0xE2, 0x00 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
{ PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
{ PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
#endif
};
const int amd_event_codes_size =
sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
/*
* read a pmc register
*/
static int
amd_read_pmc(int cpu, int ri, pmc_value_t *v)
{
enum pmc_mode mode;
const struct amd_descr *pd;
struct pmc *pm;
const struct pmc_hw *phw;
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &amd_pmcdesc[ri];
pm = phw->phw_pmc;
KASSERT(pm != NULL,
("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
cpu, ri));
mode = pm->pm_mode;
PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
/* Reading the TSC is a special case */
if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
KASSERT(PMC_IS_COUNTING_MODE(mode),
("[amd,%d] TSC counter in non-counting mode", __LINE__));
*v = rdtsc();
PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
return 0;
}
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
if (PMC_IS_SAMPLING_MODE(mode))
*v = -tmp;
else
*v = tmp;
PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
return 0;
}
/*
* Write a PMC MSR.
*/
static int
amd_write_pmc(int cpu, int ri, pmc_value_t v)
{
const struct amd_descr *pd;
struct pmc *pm;
const struct pmc_hw *phw;
enum pmc_mode mode;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &amd_pmcdesc[ri];
pm = phw->phw_pmc;
KASSERT(pm != NULL,
("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
cpu, ri));
mode = pm->pm_mode;
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
/* use 2's complement of the count for sampling mode PMCs */
if (PMC_IS_SAMPLING_MODE(mode))
v = -v;
PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
/* write the PMC value */
wrmsr(pd->pm_perfctr, v);
return 0;
}
/*
* configure hardware pmc according to the configuration recorded in
* pmc 'pm'.
*/
static int
amd_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
KASSERT(pm == NULL || phw->phw_pmc == NULL,
("[amd,%d] hwpmc not unconfigured before re-config", __LINE__));
phw->phw_pmc = pm;
return 0;
}
/*
* Machine dependent actions taken during the context switch in of a
* thread.
*/
static int
amd_switch_in(struct pmc_cpu *pc)
{
(void) pc;
/* enable the RDPMC instruction */
load_cr4(rcr4() | CR4_PCE);
return 0;
}
/*
* Machine dependent actions taken during the context switch out of a
* thread.
*/
static int
amd_switch_out(struct pmc_cpu *pc)
{
(void) pc;
/* disallow RDPMC instruction */
load_cr4(rcr4() & ~CR4_PCE);
return 0;
}
/*
* Check if a given allocation is feasible.
*/
static int
amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
int i;
uint32_t allowed_unitmask, caps, config, unitmask;
enum pmc_event pe;
const struct pmc_descr *pd;
(void) cpu;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row index %d", __LINE__, ri));
pd = &amd_pmcdesc[ri].pm_descr;
/* check class match */
if (pd->pd_class != pm->pm_class)
return EINVAL;
caps = pm->pm_caps;
PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
if ((pd->pd_caps & caps) != caps)
return EPERM;
if (pd->pd_class == PMC_CLASS_TSC) {
/* TSC's are always allocated in system-wide counting mode */
if (a->pm_ev != PMC_EV_TSC_TSC ||
a->pm_mode != PMC_MODE_SC)
return EINVAL;
return 0;
}
KASSERT(pd->pd_class == AMD_PMC_CLASS,
("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
pe = a->pm_ev;
/* map ev to the correct event mask code */
config = allowed_unitmask = 0;
for (i = 0; i < amd_event_codes_size; i++)
if (amd_event_codes[i].pe_ev == pe) {
config =
AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
allowed_unitmask =
AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
break;
}
if (i == amd_event_codes_size)
return EINVAL;
unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
return EINVAL;
if (unitmask && (caps & PMC_CAP_QUALIFIER))
config |= unitmask;
if (caps & PMC_CAP_THRESHOLD)
config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
/* set at least one of the 'usr' or 'os' caps */
if (caps & PMC_CAP_USER)
config |= AMD_PMC_USR;
if (caps & PMC_CAP_SYSTEM)
config |= AMD_PMC_OS;
if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
config |= (AMD_PMC_USR|AMD_PMC_OS);
if (caps & PMC_CAP_EDGE)
config |= AMD_PMC_EDGE;
if (caps & PMC_CAP_INVERT)
config |= AMD_PMC_INVERT;
if (caps & PMC_CAP_INTERRUPT)
config |= AMD_PMC_INT;
pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
return 0;
}
/*
* Release machine dependent state associated with a PMC. This is a
* no-op on this architecture.
*
*/
/* ARGSUSED0 */
static int
amd_release_pmc(int cpu, int ri, struct pmc *pmc)
{
#if DEBUG
const struct amd_descr *pd;
#endif
struct pmc_hw *phw;
(void) pmc;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
#if DEBUG
pd = &amd_pmcdesc[ri];
if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
("[amd,%d] PMC %d released while active", __LINE__, ri));
#endif
return 0;
}
/*
* start a PMC.
*/
static int
amd_start_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
const struct amd_descr *pd;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &amd_pmcdesc[ri];
KASSERT(pm != NULL,
("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
cpu, ri));
PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0; /* TSCs are always running */
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
ri, cpu, pd->pm_descr.pd_name));
/* turn on the PMC ENABLE bit */
config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
wrmsr(pd->pm_evsel, config);
return 0;
}
/*
* Stop a PMC.
*/
static int
amd_stop_pmc(int cpu, int ri)
{
struct pmc *pm;
struct pmc_hw *phw;
const struct amd_descr *pd;
uint64_t config;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &amd_pmcdesc[ri];
KASSERT(pm != NULL,
("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
cpu, ri));
/* can't stop a TSC */
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
("[amd,%d] unknown PMC class (%d)", __LINE__,
pd->pm_descr.pd_class));
KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
__LINE__, ri, cpu, pd->pm_descr.pd_name));
PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
/* turn off the PMC ENABLE bit */
config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
wrmsr(pd->pm_evsel, config);
return 0;
}
/*
* Interrupt handler. This function needs to return '1' if the
* interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
* to sleep or do anything a 'fast' interrupt handler is not allowed
* to do.
*/
static int
amd_intr(int cpu, uintptr_t eip)
{
int i, retval;
enum pmc_mode mode;
uint32_t perfctr;
struct pmc *pm;
struct pmc_cpu *pc;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] out of range CPU %d", __LINE__, cpu));
retval = 0;
pc = pmc_pcpu[cpu];
/*
* look for all PMCs that have interrupted:
* - skip over the TSC [PMC#0]
* - look for a PMC with a valid 'struct pmc' association
* - look for a PMC in (a) sampling mode and (b) which has
* overflowed. If found, we update the process's
* histogram or send it a profiling signal by calling
* the appropriate helper function.
*/
for (i = 1; i < AMD_NPMCS; i++) {
phw = pc->pc_hwpmcs[i];
perfctr = amd_pmcdesc[i].pm_perfctr;
KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
if ((pm = phw->phw_pmc) == NULL ||
pm->pm_state != PMC_STATE_RUNNING) {
atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
continue;
}
mode = pm->pm_mode;
if (PMC_IS_SAMPLING_MODE(mode) &&
AMD_PMC_HAS_OVERFLOWED(perfctr)) {
atomic_add_int(&pmc_stats.pm_intr_processed, 1);
if (PMC_IS_SYSTEM_MODE(mode))
pmc_update_histogram(phw, eip);
else if (PMC_IS_VIRTUAL_MODE(mode))
pmc_send_signal(pm);
retval = 1;
}
}
return retval;
}
/*
* describe a PMC
*/
static int
amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
{
int error;
size_t copied;
const struct amd_descr *pd;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] row-index %d out of range", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &amd_pmcdesc[ri];
if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
PMC_NAME_MAX, &copied)) != 0)
return error;
pi->pm_class = pd->pm_descr.pd_class;
pi->pm_caps = pd->pm_descr.pd_caps;
pi->pm_width = pd->pm_descr.pd_width;
if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
pi->pm_enabled = TRUE;
*ppmc = phw->phw_pmc;
} else {
pi->pm_enabled = FALSE;
*ppmc = NULL;
}
return 0;
}
/*
* i386 specific entry points
*/
/*
* return the MSR address of the given PMC.
*/
static int
amd_get_msr(int ri, uint32_t *msr)
{
KASSERT(ri >= 0 && ri < AMD_NPMCS,
("[amd,%d] ri %d out of range", __LINE__, ri));
*msr = amd_pmcdesc[ri].pm_perfctr;
return 0;
}
/*
* processor dependent initialization.
*/
/*
* Per-processor data structure
*
* [common stuff]
* [5 struct pmc_hw pointers]
* [5 struct pmc_hw structures]
*/
struct amd_cpu {
struct pmc_cpu pc_common;
struct pmc_hw *pc_hwpmcs[AMD_NPMCS];
struct pmc_hw pc_amdpmcs[AMD_NPMCS];
};
static int
amd_init(int cpu)
{
int n;
struct amd_cpu *pcs;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] insane cpu number %d", __LINE__, cpu));
PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
M_WAITOK|M_ZERO);
if (pcs == NULL)
return ENOMEM;
phw = &pcs->pc_amdpmcs[0];
/*
* Initialize the per-cpu mutex and set the content of the
* hardware descriptors to a known state.
*/
for (n = 0; n < AMD_NPMCS; n++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
phw->phw_pmc = NULL;
pcs->pc_hwpmcs[n] = phw;
}
/* Mark the TSC as shareable */
pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
return 0;
}
/*
* processor dependent cleanup prior to the KLD
* being unloaded
*/
static int
amd_cleanup(int cpu)
{
int i;
uint32_t evsel;
struct pmc_cpu *pcs;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
/*
* First, turn off all PMCs on this CPU.
*/
for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
evsel &= ~AMD_PMC_ENABLE;
wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
}
/*
* Next, free up allocated space.
*/
pcs = pmc_pcpu[cpu];
#if DEBUG
/* check the TSC */
KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
for (i = 1; i < AMD_NPMCS; i++) {
KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
}
#endif
KASSERT(pcs != NULL,
("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
pmc_pcpu[cpu] = NULL;
FREE(pcs, M_PMC);
return 0;
}
/*
* Initialize ourselves.
*/
struct pmc_mdep *
pmc_amd_initialize(void)
{
struct pmc_mdep *pmc_mdep;
/* The presence of hardware performance counters on the AMD
Athlon, Duron or later processors, is _not_ indicated by
any of the processor feature flags set by the 'CPUID'
instruction, so we only check the 'instruction family'
field returned by CPUID for instruction family >= 6. This
test needs to be be refined. */
if ((cpu_id & 0xF00) < 0x600)
return NULL;
MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
M_PMC, M_WAITOK|M_ZERO);
#if __i386__
pmc_mdep->pmd_cputype = PMC_CPU_AMD_K7;
#elif __amd64__
pmc_mdep->pmd_cputype = PMC_CPU_AMD_K8;
#else
#error Unknown AMD CPU type.
#endif
pmc_mdep->pmd_npmc = AMD_NPMCS;
/* this processor has two classes of usable PMCs */
pmc_mdep->pmd_nclass = 2;
pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
pmc_mdep->pmd_classes[1] = AMD_PMC_CLASS;
pmc_mdep->pmd_nclasspmcs[0] = 1;
pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
pmc_mdep->pmd_init = amd_init;
pmc_mdep->pmd_cleanup = amd_cleanup;
pmc_mdep->pmd_switch_in = amd_switch_in;
pmc_mdep->pmd_switch_out = amd_switch_out;
pmc_mdep->pmd_read_pmc = amd_read_pmc;
pmc_mdep->pmd_write_pmc = amd_write_pmc;
pmc_mdep->pmd_config_pmc = amd_config_pmc;
pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
pmc_mdep->pmd_release_pmc = amd_release_pmc;
pmc_mdep->pmd_start_pmc = amd_start_pmc;
pmc_mdep->pmd_stop_pmc = amd_stop_pmc;
pmc_mdep->pmd_intr = amd_intr;
pmc_mdep->pmd_describe = amd_describe;
pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */
PMCDBG(MDP,INI,0,"%s","amd-initialize");
return pmc_mdep;
}

142
sys/hwpmc/hwpmc_intel.c Normal file
View File

@ -0,0 +1,142 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
struct pmc_mdep *
pmc_intel_initialize(void)
{
struct pmc_mdep *pmc_mdep;
enum pmc_cputype cputype;
int error, model;
KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
("[intel,%d] Initializing non-intel processor", __LINE__));
PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id);
cputype = -1;
switch (cpu_id & 0xF00) {
case 0x500: /* Pentium family processors */
cputype = PMC_CPU_INTEL_P5;
break;
case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
switch ((cpu_id & 0xF0) >> 4) { /* model number field */
case 0x1:
cputype = PMC_CPU_INTEL_P6;
break;
case 0x3: case 0x5:
cputype = PMC_CPU_INTEL_PII;
break;
case 0x6:
cputype = PMC_CPU_INTEL_CL;
break;
case 0x7: case 0x8: case 0xA: case 0xB:
cputype = PMC_CPU_INTEL_PIII;
break;
case 0x9: case 0xD:
cputype = PMC_CPU_INTEL_PM;
break;
}
break;
case 0xF00: /* P4 */
model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
if (model >= 0 && model <= 3) /* known models */
cputype = PMC_CPU_INTEL_PIV;
break;
}
if ((int) cputype == -1) {
printf("pmc: Unknown Intel CPU.\n");
return NULL;
}
MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
M_PMC, M_WAITOK|M_ZERO);
pmc_mdep->pmd_cputype = cputype;
pmc_mdep->pmd_nclass = 2;
pmc_mdep->pmd_classes[0] = PMC_CLASS_TSC;
pmc_mdep->pmd_nclasspmcs[0] = 1;
error = 0;
switch (cputype) {
/*
* Intel Pentium 4 Processors
*/
case PMC_CPU_INTEL_PIV:
error = pmc_initialize_p4(pmc_mdep);
break;
/*
* P6 Family Processors
*/
case PMC_CPU_INTEL_P6:
case PMC_CPU_INTEL_CL:
case PMC_CPU_INTEL_PII:
case PMC_CPU_INTEL_PIII:
case PMC_CPU_INTEL_PM:
error = pmc_initialize_p6(pmc_mdep);
break;
/*
* Intel Pentium PMCs.
*/
case PMC_CPU_INTEL_P5:
error = pmc_initialize_p5(pmc_mdep);
break;
default:
KASSERT(0,("[intel,%d] Unknown CPU type", __LINE__));
}
if (error) {
FREE(pmc_mdep, M_PMC);
pmc_mdep = NULL;
}
return pmc_mdep;
}

3671
sys/hwpmc/hwpmc_mod.c Normal file

File diff suppressed because it is too large Load Diff

51
sys/hwpmc/hwpmc_pentium.c Normal file
View File

@ -0,0 +1,51 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
/*
* Intel Pentium PMCs
*/
int
pmc_initialize_p5(struct pmc_mdep *pmc_mdep)
{
(void) pmc_mdep;
return ENOSYS; /* nothing here yet */
}

1484
sys/hwpmc/hwpmc_piv.c Normal file

File diff suppressed because it is too large Load Diff

742
sys/hwpmc/hwpmc_ppro.c Normal file
View File

@ -0,0 +1,742 @@
/*-
* Copyright (c) 2003-2005 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/pmc_mdep.h>
#include <machine/specialreg.h>
/*
* PENTIUM PRO SUPPORT
*/
struct p6pmc_descr {
struct pmc_descr pm_descr; /* common information */
uint32_t pm_pmc_msr;
uint32_t pm_evsel_msr;
};
static struct p6pmc_descr p6_pmcdesc[P6_NPMCS] = {
/* TSC */
{
.pm_descr =
{
.pd_name = "TSC",
.pd_class = PMC_CLASS_TSC,
.pd_caps = PMC_CAP_READ,
.pd_width = 64
},
.pm_pmc_msr = 0x10,
.pm_evsel_msr = ~0
},
#define P6_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
/* PMC 0 */
{
.pm_descr =
{
.pd_name ="P6-0",
.pd_class = PMC_CLASS_P6,
.pd_caps = P6_PMC_CAPS,
.pd_width = 40
},
.pm_pmc_msr = P6_MSR_PERFCTR0,
.pm_evsel_msr = P6_MSR_EVSEL0
},
/* PMC 1 */
{
.pm_descr =
{
.pd_name ="P6-1",
.pd_class = PMC_CLASS_P6,
.pd_caps = P6_PMC_CAPS,
.pd_width = 40
},
.pm_pmc_msr = P6_MSR_PERFCTR1,
.pm_evsel_msr = P6_MSR_EVSEL1
}
};
static enum pmc_cputype p6_cputype;
/*
* P6 Event descriptor
*/
struct p6_event_descr {
const enum pmc_event pm_event;
uint32_t pm_evsel;
uint32_t pm_flags;
uint32_t pm_unitmask;
};
static const struct p6_event_descr p6_events[] = {
#define P6_EVDESCR(NAME, EVSEL, FLAGS, UMASK) \
{ \
.pm_event = PMC_EV_P6_##NAME, \
.pm_evsel = (EVSEL), \
.pm_flags = (FLAGS), \
.pm_unitmask = (UMASK) \
}
#define P6F_P6 (1 << PMC_CPU_INTEL_P6)
#define P6F_CL (1 << PMC_CPU_INTEL_CL)
#define P6F_PII (1 << PMC_CPU_INTEL_PII)
#define P6F_PIII (1 << PMC_CPU_INTEL_PIII)
#define P6F_PM (1 << PMC_CPU_INTEL_PM)
#define P6F_CTR0 0x0001
#define P6F_CTR1 0x0002
#define P6F_ALL_CPUS (P6F_P6 | P6F_PII | P6F_CL | P6F_PIII | P6F_PM)
#define P6F_ALL_CTRS (P6F_CTR0 | P6F_CTR1)
#define P6F_ALL (P6F_ALL_CPUS | P6F_ALL_CTRS)
#define P6_EVENT_VALID_FOR_CPU(P,CPU) ((P)->pm_flags & (1 << (CPU)))
#define P6_EVENT_VALID_FOR_CTR(P,CTR) ((P)->pm_flags & (1 << (CTR)))
P6_EVDESCR(DATA_MEM_REFS, 0x43, P6F_ALL, 0x00),
P6_EVDESCR(DCU_LINES_IN, 0x45, P6F_ALL, 0x00),
P6_EVDESCR(DCU_M_LINES_IN, 0x46, P6F_ALL, 0x00),
P6_EVDESCR(DCU_M_LINES_OUT, 0x47, P6F_ALL, 0x00),
P6_EVDESCR(DCU_MISS_OUTSTANDING, 0x47, P6F_ALL, 0x00),
P6_EVDESCR(IFU_FETCH, 0x80, P6F_ALL, 0x00),
P6_EVDESCR(IFU_FETCH_MISS, 0x81, P6F_ALL, 0x00),
P6_EVDESCR(ITLB_MISS, 0x85, P6F_ALL, 0x00),
P6_EVDESCR(IFU_MEM_STALL, 0x86, P6F_ALL, 0x00),
P6_EVDESCR(ILD_STALL, 0x87, P6F_ALL, 0x00),
P6_EVDESCR(L2_IFETCH, 0x28, P6F_ALL, 0x0F),
P6_EVDESCR(L2_LD, 0x29, P6F_ALL, 0x0F),
P6_EVDESCR(L2_ST, 0x2A, P6F_ALL, 0x0F),
P6_EVDESCR(L2_LINES_IN, 0x24, P6F_ALL, 0x0F),
P6_EVDESCR(L2_LINES_OUT, 0x26, P6F_ALL, 0x0F),
P6_EVDESCR(L2_M_LINES_INM, 0x25, P6F_ALL, 0x00),
P6_EVDESCR(L2_M_LINES_OUTM, 0x27, P6F_ALL, 0x0F),
P6_EVDESCR(L2_RQSTS, 0x2E, P6F_ALL, 0x0F),
P6_EVDESCR(L2_ADS, 0x21, P6F_ALL, 0x00),
P6_EVDESCR(L2_DBUS_BUSY, 0x22, P6F_ALL, 0x00),
P6_EVDESCR(L2_DBUS_BUSY_RD, 0x23, P6F_ALL, 0x00),
P6_EVDESCR(BUS_DRDY_CLOCKS, 0x62, P6F_ALL, 0x20),
P6_EVDESCR(BUS_LOCK_CLOCKS, 0x63, P6F_ALL, 0x20),
P6_EVDESCR(BUS_REQ_OUTSTANDING, 0x60, P6F_ALL, 0x00),
P6_EVDESCR(BUS_TRAN_BRD, 0x65, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_RFO, 0x66, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRANS_WB, 0x67, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_IFETCH, 0x68, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_INVAL, 0x69, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_PWR, 0x6A, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRANS_P, 0x6B, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRANS_IO, 0x6C, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_DEF, 0x6D, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_BURST, 0x6E, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_ANY, 0x70, P6F_ALL, 0x20),
P6_EVDESCR(BUS_TRAN_MEM, 0x6F, P6F_ALL, 0x20),
P6_EVDESCR(BUS_DATA_RCV, 0x64, P6F_ALL, 0x00),
P6_EVDESCR(BUS_BNR_DRV, 0x61, P6F_ALL, 0x00),
P6_EVDESCR(BUS_HIT_DRV, 0x7A, P6F_ALL, 0x00),
P6_EVDESCR(BUS_HITM_DRV, 0x7B, P6F_ALL, 0x00),
P6_EVDESCR(BUS_SNOOP_STALL, 0x7E, P6F_ALL, 0x00),
P6_EVDESCR(FLOPS, 0xC1, P6F_ALL_CPUS | P6F_CTR0, 0x00),
P6_EVDESCR(FP_COMPS_OPS_EXE, 0x10, P6F_ALL_CPUS | P6F_CTR0, 0x00),
P6_EVDESCR(FP_ASSIST, 0x11, P6F_ALL_CPUS | P6F_CTR1, 0x00),
P6_EVDESCR(MUL, 0x12, P6F_ALL_CPUS | P6F_CTR1, 0x00),
P6_EVDESCR(DIV, 0x13, P6F_ALL_CPUS | P6F_CTR1, 0x00),
P6_EVDESCR(CYCLES_DIV_BUSY, 0x14, P6F_ALL_CPUS | P6F_CTR0, 0x00),
P6_EVDESCR(LD_BLOCKS, 0x03, P6F_ALL, 0x00),
P6_EVDESCR(SB_DRAINS, 0x04, P6F_ALL, 0x00),
P6_EVDESCR(MISALIGN_MEM_REF, 0x05, P6F_ALL, 0x00),
P6_EVDESCR(EMON_KNI_PREF_DISPATCHED, 0x07, P6F_PIII | P6F_ALL_CTRS, 0x03),
P6_EVDESCR(EMON_KNI_PREF_MISS, 0x4B, P6F_PIII | P6F_ALL_CTRS, 0x03),
P6_EVDESCR(INST_RETIRED, 0xC0, P6F_ALL, 0x00),
P6_EVDESCR(UOPS_RETIRED, 0xC2, P6F_ALL, 0x00),
P6_EVDESCR(INST_DECODED, 0xD0, P6F_ALL, 0x00),
P6_EVDESCR(EMON_KNI_INST_RETIRED, 0xD8, P6F_PIII | P6F_ALL_CTRS, 0x01),
P6_EVDESCR(EMON_KNI_COMP_INST_RET, 0xD9, P6F_PIII | P6F_ALL_CTRS, 0x01),
P6_EVDESCR(HW_INT_RX, 0xC8, P6F_ALL, 0x00),
P6_EVDESCR(CYCLES_INT_MASKED, 0xC6, P6F_ALL, 0x00),
P6_EVDESCR(CYCLES_INT_PENDING_AND_MASKED, 0xC7, P6F_ALL, 0x00),
P6_EVDESCR(BR_INST_RETIRED, 0xC4, P6F_ALL, 0x00),
P6_EVDESCR(BR_MISS_PRED_RETIRED, 0xC5, P6F_ALL, 0x00),
P6_EVDESCR(BR_TAKEN_RETIRED, 0xC9, P6F_ALL, 0x00),
P6_EVDESCR(BR_MISS_PRED_TAKEN_RET, 0xCA, P6F_ALL, 0x00),
P6_EVDESCR(BR_INST_DECODED, 0xE0, P6F_ALL, 0x00),
P6_EVDESCR(BTB_MISSES, 0xE2, P6F_ALL, 0x00),
P6_EVDESCR(BR_BOGUS, 0xE4, P6F_ALL, 0x00),
P6_EVDESCR(BACLEARS, 0xE6, P6F_ALL, 0x00),
P6_EVDESCR(RESOURCE_STALLS, 0xA2, P6F_ALL, 0x00),
P6_EVDESCR(PARTIAL_RAT_STALLS, 0xD2, P6F_ALL, 0x00),
P6_EVDESCR(SEGMENT_REG_LOADS, 0x06, P6F_ALL, 0x00),
P6_EVDESCR(CPU_CLK_UNHALTED, 0x79, P6F_ALL, 0x00),
P6_EVDESCR(MMX_INSTR_EXEC, 0xB0,
P6F_ALL_CTRS | P6F_CL | P6F_PII, 0x00),
P6_EVDESCR(MMX_SAT_INSTR_EXEC, 0xB1,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
P6_EVDESCR(MMX_UOPS_EXEC, 0xB2,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
P6_EVDESCR(MMX_INSTR_TYPE_EXEC, 0xB3,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x3F),
P6_EVDESCR(FP_MMX_TRANS, 0xCC,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x01),
P6_EVDESCR(MMX_ASSIST, 0xCD,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
P6_EVDESCR(MMX_INSTR_RET, 0xCE, P6F_ALL_CTRS | P6F_PII, 0x00),
P6_EVDESCR(SEG_RENAME_STALLS, 0xD4,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
P6_EVDESCR(SEG_REG_RENAMES, 0xD5,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
P6_EVDESCR(RET_SEG_RENAMES, 0xD6,
P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
P6_EVDESCR(EMON_EST_TRANS, 0x58, P6F_ALL_CTRS | P6F_PM, 0x02),
P6_EVDESCR(EMON_THERMAL_TRIP, 0x59, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_INST_EXEC, 0x88, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_MISSP_EXEC, 0x89, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_BAC_MISSP_EXEC, 0x8A, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CND_EXEC, 0x8B, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CND_MISSP_EXEC, 0x8C, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_IND_EXEC, 0x8D, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_IND_MISSP_EXEC, 0x8E, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_RET_EXEC, 0x8F, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_RET_MISSP_EXEC, 0x90, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_RET_BAC_MISSP_EXEC, 0x91, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CALL_EXEC, 0x92, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_CALL_MISSP_EXEC, 0x93, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(BR_IND_CALL_EXEC, 0x94, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_SIMD_INSTR_RETIRED, 0xCE, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_SYNCH_UOPS, 0xD3, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_ESP_UOPS, 0xD7, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_FUSED_UOPS_RET, 0xDA, P6F_ALL_CTRS | P6F_PM, 0x03),
P6_EVDESCR(EMON_UNFUSION, 0xDB, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_PREF_RQSTS_UP, 0xF0, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_PREF_RQSTS_DN, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x00),
P6_EVDESCR(EMON_SSE_SSE2_INST_RETIRED, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x03),
P6_EVDESCR(EMON_SSE_SSE2_COMP_INST_RETIRED, 0xD9, P6F_ALL_CTRS | P6F_PM, 0x03)
#undef P6_EVDESCR
};
#define P6_NEVENTS (PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1)
static const struct p6_event_descr *
p6_find_event(enum pmc_event ev)
{
int n;
for (n = 0; n < P6_NEVENTS; n++)
if (p6_events[n].pm_event == ev)
break;
if (n == P6_NEVENTS)
return NULL;
return &p6_events[n];
}
/*
* Per-CPU data structure for P6 class CPUs
*
* [common stuff]
* [3 struct pmc_hw pointers]
* [3 struct pmc_hw structures]
*/
struct p6_cpu {
struct pmc_cpu pc_common;
struct pmc_hw *pc_hwpmcs[P6_NPMCS];
struct pmc_hw pc_p6pmcs[P6_NPMCS];
};
static int
p6_init(int cpu)
{
int n;
struct p6_cpu *pcs;
struct pmc_hw *phw;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] bad cpu %d", __LINE__, cpu));
PMCDBG(MDP,INI,0,"p6-init cpu=%d", cpu);
MALLOC(pcs, struct p6_cpu *, sizeof(struct p6_cpu), M_PMC,
M_WAITOK|M_ZERO);
if (pcs == NULL)
return ENOMEM;
phw = pcs->pc_p6pmcs;
for (n = 0; n < P6_NPMCS; n++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
phw->phw_pmc = NULL;
pcs->pc_hwpmcs[n] = phw;
}
/* Mark the TSC as shareable */
pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
return 0;
}
static int
p6_cleanup(int cpu)
{
struct pmc_cpu *pcs;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] bad cpu %d", __LINE__, cpu));
PMCDBG(MDP,INI,0,"p6-cleanup cpu=%d", cpu);
if ((pcs = pmc_pcpu[cpu]) != NULL)
FREE(pcs, M_PMC);
pmc_pcpu[cpu] = NULL;
return 0;
}
static int
p6_switch_in(struct pmc_cpu *pc)
{
(void) pc;
return 0;
}
static int
p6_switch_out(struct pmc_cpu *pc)
{
(void) pc;
return 0;
}
static int
p6_read_pmc(int cpu, int ri, pmc_value_t *v)
{
struct pmc_hw *phw;
struct pmc *pm;
struct p6pmc_descr *pd;
pmc_value_t tmp;
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
tmp = rdmsr(pd->pm_pmc_msr) & P6_PERFCTR_MASK;
if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
*v = -tmp;
else
*v = tmp;
PMCDBG(MDP,REA,1, "p6-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
pd->pm_pmc_msr, *v);
return 0;
}
static int
p6_write_pmc(int cpu, int ri, pmc_value_t v)
{
struct pmc_hw *phw;
struct pmc *pm;
struct p6pmc_descr *pd;
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
PMCDBG(MDP,WRI,1, "p6-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
pd->pm_pmc_msr, v);
if (PMC_IS_SAMPLING_MODE(pm->pm_mode))
v = -v;
wrmsr(pd->pm_pmc_msr, v & P6_PERFCTR_MASK);
return 0;
}
static int
p6_config_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
PMCDBG(MDP,CFG,1, "p6-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
phw->phw_pmc = pm;
return 0;
}
/*
* A pmc may be allocated to a given row index if:
* - the event is valid for this CPU
* - the event is valid for this counter index
*/
static int
p6_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
uint32_t allowed_unitmask, caps, config, unitmask;
const struct p6pmc_descr *pd;
const struct p6_event_descr *pevent;
enum pmc_event ev;
(void) cpu;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p4,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p4,%d] illegal row-index value %d", __LINE__, ri));
pd = &p6_pmcdesc[ri];
PMCDBG(MDP,ALL,1, "p6-allocate ri=%d class=%d pmccaps=0x%x "
"reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
pm->pm_caps);
/* check class */
if (pd->pm_descr.pd_class != pm->pm_class)
return EINVAL;
/* check requested capabilities */
caps = a->pm_caps;
if ((pd->pm_descr.pd_caps & caps) != caps)
return EPERM;
if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
/* TSC's are always allocated in system-wide counting mode */
if (a->pm_ev != PMC_EV_TSC_TSC ||
a->pm_mode != PMC_MODE_SC)
return EINVAL;
return 0;
}
/*
* P6 class events
*/
ev = pm->pm_event;
if (ev < PMC_EV_P6_FIRST || ev > PMC_EV_P6_LAST)
return EINVAL;
if ((pevent = p6_find_event(ev)) == NULL)
return ESRCH;
if (!P6_EVENT_VALID_FOR_CPU(pevent, p6_cputype) ||
!P6_EVENT_VALID_FOR_CTR(pevent, (ri-1)))
return EINVAL;
/* For certain events, Pentium M differs from the stock P6 */
allowed_unitmask = 0;
if (p6_cputype == PMC_CPU_INTEL_PM) {
if (ev == PMC_EV_P6_L2_LD || ev == PMC_EV_P6_L2_LINES_IN ||
ev == PMC_EV_P6_L2_LINES_OUT)
allowed_unitmask = P6_EVSEL_TO_UMASK(0x3F);
else if (ev == PMC_EV_P6_L2_M_LINES_OUTM)
allowed_unitmask = P6_EVSEL_TO_UMASK(0x30);
} else
allowed_unitmask = P6_EVSEL_TO_UMASK(pevent->pm_unitmask);
unitmask = a->pm_p6_config & P6_EVSEL_UMASK_MASK;
if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
return EINVAL;
if (ev == PMC_EV_P6_MMX_UOPS_EXEC) /* hardcoded mask */
unitmask = P6_EVSEL_TO_UMASK(0x0F);
config = 0;
config |= P6_EVSEL_EVENT_SELECT(pevent->pm_evsel);
if (unitmask & (caps & PMC_CAP_QUALIFIER))
config |= unitmask;
if (caps & PMC_CAP_THRESHOLD)
config |= a->pm_p6_config & P6_EVSEL_CMASK_MASK;
/* set at least one of the 'usr' or 'os' caps */
if (caps & PMC_CAP_USER)
config |= P6_EVSEL_USR;
if (caps & PMC_CAP_SYSTEM)
config |= P6_EVSEL_OS;
if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
config |= (P6_EVSEL_USR|P6_EVSEL_OS);
if (caps & PMC_CAP_EDGE)
config |= P6_EVSEL_E;
if (caps & PMC_CAP_INVERT)
config |= P6_EVSEL_INV;
if (caps & PMC_CAP_INTERRUPT)
config |= P6_EVSEL_INT;
pm->pm_md.pm_p6.pm_p6_evsel = config;
PMCDBG(MDP,ALL,2, "p6-allocate config=0x%x", config);
return 0;
}
static int
p6_release_pmc(int cpu, int ri, struct pmc *pm)
{
struct pmc_hw *phw;
(void) pm;
PMCDBG(MDP,REL,1, "p6-release cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
KASSERT(phw->phw_pmc == NULL,
("[p6,%d] PHW pmc %p != pmc %p", __LINE__, phw->phw_pmc, pm));
return 0;
}
static int
p6_start_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
const struct p6pmc_descr *pd;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d] illegal row-index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] starting cpu%d,ri%d with no pmc configured",
__LINE__, cpu, ri));
PMCDBG(MDP,STA,1, "p6-start cpu=%d ri=%d", cpu, ri);
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0; /* TSC are always running */
KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
("[p6,%d] unknown PMC class %d", __LINE__,
pd->pm_descr.pd_class));
config = pm->pm_md.pm_p6.pm_p6_evsel;
PMCDBG(MDP,STA,2, "p6-start/2 cpu=%d ri=%d evselmsr=0x%x config=0x%x",
cpu, ri, pd->pm_evsel_msr, config);
if (pd->pm_evsel_msr == P6_MSR_EVSEL0) /* CTR 0 */
wrmsr(pd->pm_evsel_msr, config | P6_EVSEL_EN);
else { /* CTR1 shares the enable bit CTR 0 */
wrmsr(pd->pm_evsel_msr, config);
wrmsr(P6_MSR_EVSEL0, rdmsr(P6_MSR_EVSEL0) | P6_EVSEL_EN);
}
return 0;
}
static int
p6_stop_pmc(int cpu, int ri)
{
uint32_t config;
struct pmc *pm;
struct pmc_hw *phw;
struct p6pmc_descr *pd;
KASSERT(cpu >= 0 && cpu < mp_ncpus,
("[p6,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d] illegal row index %d", __LINE__, ri));
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pm = phw->phw_pmc;
pd = &p6_pmcdesc[ri];
KASSERT(pm,
("[p6,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
cpu, ri));
if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
return 0;
KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P6,
("[p6,%d] unknown PMC class %d", __LINE__,
pd->pm_descr.pd_class));
PMCDBG(MDP,STO,1, "p6-stop cpu=%d ri=%d", cpu, ri);
/*
* If CTR0 is being turned off but CTR1 is active, we need
* leave CTR0's EN field set. If CTR1 is being stopped, it
* suffices to zero its EVSEL register.
*/
if (ri == 1 &&
pmc_pcpu[cpu]->pc_hwpmcs[2]->phw_pmc != NULL)
config = P6_EVSEL_EN;
else
config = 0;
wrmsr(pd->pm_evsel_msr, config);
PMCDBG(MDP,STO,2, "p6-stop/2 cpu=%d ri=%d config=0x%x", cpu, ri,
config);
return 0;
}
static int
p6_intr(int cpu, uintptr_t eip)
{
(void) cpu;
(void) eip;
return 0;
}
static int
p6_describe(int cpu, int ri, struct pmc_info *pi,
struct pmc **ppmc)
{
int error;
size_t copied;
struct pmc_hw *phw;
struct p6pmc_descr *pd;
phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
pd = &p6_pmcdesc[ri];
if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
PMC_NAME_MAX, &copied)) != 0)
return error;
pi->pm_class = pd->pm_descr.pd_class;
pi->pm_caps = pd->pm_descr.pd_caps;
pi->pm_width = pd->pm_descr.pd_width;
if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
pi->pm_enabled = TRUE;
*ppmc = phw->phw_pmc;
} else {
pi->pm_enabled = FALSE;
*ppmc = NULL;
}
return 0;
}
static int
p6_get_msr(int ri, uint32_t *msr)
{
KASSERT(ri >= 0 && ri < P6_NPMCS,
("[p6,%d ri %d out of range", __LINE__, ri));
*msr = p6_pmcdesc[ri].pm_pmc_msr;
return 0;
}
int
pmc_initialize_p6(struct pmc_mdep *pmc_mdep)
{
KASSERT(strcmp(cpu_vendor, "GenuineIntel") == 0,
("[p6,%d] Initializing non-intel processor", __LINE__));
PMCDBG(MDP,INI,1, "%s", "p6-initialize");
switch (pmc_mdep->pmd_cputype) {
/*
* P6 Family Processors
*/
case PMC_CPU_INTEL_P6:
case PMC_CPU_INTEL_CL:
case PMC_CPU_INTEL_PII:
case PMC_CPU_INTEL_PIII:
case PMC_CPU_INTEL_PM:
p6_cputype = pmc_mdep->pmd_cputype;
pmc_mdep->pmd_npmc = P6_NPMCS;
pmc_mdep->pmd_classes[1] = PMC_CLASS_P6;
pmc_mdep->pmd_nclasspmcs[1] = 2;
pmc_mdep->pmd_init = p6_init;
pmc_mdep->pmd_cleanup = p6_cleanup;
pmc_mdep->pmd_switch_in = p6_switch_in;
pmc_mdep->pmd_switch_out = p6_switch_out;
pmc_mdep->pmd_read_pmc = p6_read_pmc;
pmc_mdep->pmd_write_pmc = p6_write_pmc;
pmc_mdep->pmd_config_pmc = p6_config_pmc;
pmc_mdep->pmd_allocate_pmc = p6_allocate_pmc;
pmc_mdep->pmd_release_pmc = p6_release_pmc;
pmc_mdep->pmd_start_pmc = p6_start_pmc;
pmc_mdep->pmd_stop_pmc = p6_stop_pmc;
pmc_mdep->pmd_intr = p6_intr;
pmc_mdep->pmd_describe = p6_describe;
pmc_mdep->pmd_get_msr = p6_get_msr; /* i386 */
break;
default:
KASSERT(0,("[p6,%d] Unknown CPU type", __LINE__));
return ENOSYS;
}
return 0;
}

View File

@ -108,7 +108,7 @@ static struct lvt lvts[LVT_MAX + 1] = {
{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, 0 }, /* PMC */
{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
};
@ -304,6 +304,11 @@ lapic_setup(void)
/* Program LINT[01] LVT entries. */
lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);
#ifdef HWPMC_HOOKS
/* Program the PMC LVT entry if present. */
if (maxlvt >= LVT_PMC)
lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
#endif
/* Program timer LVT and setup handler. */
lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer);

184
sys/i386/include/pmc_mdep.h Normal file
View File

@ -0,0 +1,184 @@
/*-
* Copyright (c) 2003, Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/* Machine dependent interfaces */
#ifndef _MACHINE_PMC_MDEP_H
#define _MACHINE_PMC_MDEP_H 1
#include <sys/pmc.h>
/* AMD K7 PMCs */
#define K7_NPMCS 5 /* 1 TSC + 4 PMCs */
#define K7_PMC_COUNTERMASK 0xFF000000
#define K7_PMC_TO_COUNTER(x) (((x) << 24) & K7_PMC_COUNTERMASK)
#define K7_PMC_INVERT (1 << 23)
#define K7_PMC_ENABLE (1 << 22)
#define K7_PMC_INT (1 << 20)
#define K7_PMC_PC (1 << 19)
#define K7_PMC_EDGE (1 << 18)
#define K7_PMC_OS (1 << 17)
#define K7_PMC_USR (1 << 16)
#define K7_PMC_UNITMASK_M 0x10
#define K7_PMC_UNITMASK_O 0x08
#define K7_PMC_UNITMASK_E 0x04
#define K7_PMC_UNITMASK_S 0x02
#define K7_PMC_UNITMASK_I 0x01
#define K7_PMC_UNITMASK_MOESI 0x1F
#define K7_PMC_UNITMASK 0xFF00
#define K7_PMC_EVENTMASK 0x00FF
#define K7_PMC_TO_UNITMASK(x) (((x) << 8) & K7_PMC_UNITMASK)
#define K7_PMC_TO_EVENTMASK(x) ((x) & 0xFF)
#define K7_VALID_BITS (K7_PMC_COUNTERMASK | K7_PMC_INVERT | \
K7_PMC_ENABLE | K7_PMC_INT | K7_PMC_PC | K7_PMC_EDGE | K7_PMC_OS | \
K7_PMC_USR | K7_PMC_UNITMASK | K7_PMC_EVENTMASK)
/* Intel P4 PMCs */
#define P4_NPMCS 19 /* 1 TSC + 18 PMCS */
#define P4_NESCR 45
#define P4_INVALID_PMC_INDEX -1
#define P4_MAX_ESCR_PER_EVENT 2
#define P4_MAX_PMC_PER_ESCR 3
#define P4_CCCR_OVF (1 << 31)
#define P4_CCCR_CASCADE (1 << 30)
#define P4_CCCR_OVF_PMI_T1 (1 << 27)
#define P4_CCCR_OVF_PMI_T0 (1 << 26)
#define P4_CCCR_FORCE_OVF (1 << 25)
#define P4_CCCR_EDGE (1 << 24)
#define P4_CCCR_THRESHOLD_SHIFT 20
#define P4_CCCR_THRESHOLD_MASK 0x00F00000
#define P4_CCCR_TO_THRESHOLD(C) (((C) << P4_CCCR_THRESHOLD_SHIFT) & \
P4_CCCR_THRESHOLD_MASK)
#define P4_CCCR_COMPLEMENT (1 << 19)
#define P4_CCCR_COMPARE (1 << 18)
#define P4_CCCR_ACTIVE_THREAD_SHIFT 16
#define P4_CCCR_ACTIVE_THREAD_MASK 0x00030000
#define P4_CCCR_TO_ACTIVE_THREAD(T) (((T) << P4_CCCR_ACTIVE_THREAD_SHIFT) & \
P4_CCCR_ACTIVE_THREAD_MASK)
#define P4_CCCR_ESCR_SELECT_SHIFT 13
#define P4_CCCR_ESCR_SELECT_MASK 0x0000E000
#define P4_CCCR_TO_ESCR_SELECT(E) (((E) << P4_CCCR_ESCR_SELECT_SHIFT) & \
P4_CCCR_ESCR_SELECT_MASK)
#define P4_CCCR_ENABLE (1 << 12)
#define P4_CCCR_VALID_BITS (P4_CCCR_OVF | P4_CCCR_CASCADE | \
P4_CCCR_OVF_PMI_T1 | P4_CCCR_OVF_PMI_T0 | P4_CCCR_FORCE_OVF | \
P4_CCCR_EDGE | P4_CCCR_THRESHOLD_MASK | P4_CCCR_COMPLEMENT | \
P4_CCCR_COMPARE | P4_CCCR_ESCR_SELECT_MASK | P4_CCCR_ENABLE)
#define P4_ESCR_EVENT_SELECT_SHIFT 25
#define P4_ESCR_EVENT_SELECT_MASK 0x7E000000
#define P4_ESCR_TO_EVENT_SELECT(E) (((E) << P4_ESCR_EVENT_SELECT_SHIFT) & \
P4_ESCR_EVENT_SELECT_MASK)
#define P4_ESCR_EVENT_MASK_SHIFT 9
#define P4_ESCR_EVENT_MASK_MASK 0x01FFFE00
#define P4_ESCR_TO_EVENT_MASK(M) (((M) << P4_ESCR_EVENT_MASK_SHIFT) & \
P4_ESCR_EVENT_MASK_MASK)
#define P4_ESCR_TAG_VALUE_SHIFT 5
#define P4_ESCR_TAG_VALUE_MASK 0x000001E0
#define P4_ESCR_TO_TAG_VALUE(T) (((T) << P4_ESCR_TAG_VALUE_SHIFT) & \
P4_ESCR_TAG_VALUE_MASK)
#define P4_ESCR_TAG_ENABLE 0x00000010
#define P4_ESCR_T0_OS 0x00000008
#define P4_ESCR_T0_USR 0x00000004
#define P4_ESCR_T1_OS 0x00000002
#define P4_ESCR_T1_USR 0x00000001
#define P4_ESCR_OS P4_ESCR_T0_OS
#define P4_ESCR_USR P4_ESCR_T0_USR
#define P4_ESCR_VALID_BITS (P4_ESCR_EVENT_SELECT_MASK | \
P4_ESCR_EVENT_MASK_MASK | P4_ESCR_TAG_VALUE_MASK | \
P4_ESCR_TAG_ENABLE | P4_ESCR_T0_OS | P4_ESCR_T0_USR | P4_ESCR_T1_OS \
P4_ESCR_T1_USR)
#define P4_PERFCTR_MASK 0xFFFFFFFFFFLL /* 40 bits */
/* Intel PPro, Celeron, P-II, P-III, Pentium-M PMCS */
#define P6_NPMCS 3 /* 1 TSC + 2 PMCs */
#define P6_EVSEL_CMASK_MASK 0xFF000000
#define P6_EVSEL_TO_CMASK(C) (((C) & 0xFF) << 24)
#define P6_EVSEL_INV (1 << 23)
#define P6_EVSEL_EN (1 << 22)
#define P6_EVSEL_INT (1 << 20)
#define P6_EVSEL_PC (1 << 19)
#define P6_EVSEL_E (1 << 18)
#define P6_EVSEL_OS (1 << 17)
#define P6_EVSEL_USR (1 << 16)
#define P6_EVSEL_UMASK_MASK 0x0000FF00
#define P6_EVSEL_TO_UMASK(U) (((U) & 0xFF) << 8)
#define P6_EVSEL_EVENT_SELECT(ES) ((ES) & 0xFF)
#define P6_EVSEL_RESERVED (1 << 21)
#define P6_MSR_EVSEL0 0x0186
#define P6_MSR_EVSEL1 0x0187
#define P6_MSR_PERFCTR0 0x00C1
#define P6_MSR_PERFCTR1 0x00C2
#define P6_PERFCTR_MASK 0xFFFFFFFFFFLL /* 40 bits */
/* Intel Pentium PMCs */
#define PENTIUM_NPMCS 3 /* 1 TSC + 2 PMCs */
#define PENTIUM_CESR_PC1 (1 << 25)
#define PENTIUM_CESR_CC1_MASK 0x01C00000
#define PENTIUM_CESR_TO_CC1(C) (((C) & 0x07) << 22)
#define PENTIUM_CESR_ES1_MASK 0x003F0000
#define PENTIUM_CESR_TO_ES1(E) (((E) & 0x3F) << 16)
#define PENTIUM_CESR_PC0 (1 << 9)
#define PENTIUM_CESR_CC0_MASK 0x000001C0
#define PENTIUM_CESR_TO_CC0(C) (((C) & 0x07) << 6)
#define PENTIUM_CESR_ES0_MASK 0x0000003F
#define PENTIUM_CESR_TO_ES0(E) ((E) & 0x3F)
#define PENTIUM_CESR_RESERVED 0xFC00FC00
#define PENTIUM_MSR_CESR 0x11
#define PENTIUM_MSR_CTR0 0x12
#define PENTIUM_MSR_CTR1 0x13
#ifdef _KERNEL
/*
* Prototypes
*/
#if defined(__i386__)
struct pmc_mdep *pmc_amd_initialize(void); /* AMD K7/K8 PMCs */
struct pmc_mdep *pmc_intel_initialize(void); /* Intel PMCs */
int pmc_initialize_p4(struct pmc_mdep *); /* Pentium IV PMCs */
int pmc_initialize_p5(struct pmc_mdep *); /* Pentium PMCs */
int pmc_initialize_p6(struct pmc_mdep *); /* Pentium Pro PMCs */
#endif /* defined(__i386__) */
#endif /* _KERNEL */
#endif /* _MACHINE_PMC_MDEP_H */

View File

@ -72,6 +72,10 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
#endif
#include <machine/reg.h>
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
@ -662,7 +666,25 @@ do_execve(td, args, mac_p)
p->p_args = newargs;
newargs = NULL;
}
#ifdef HWPMC_HOOKS
/*
* Check if the process is using PMCs and if so do exec() time
* processing. This processing needs to happen AFTER the
* P_INEXEC flag is cleared.
*
* The proc lock needs to be released before taking the PMC
* SX.
*/
if (PMC_PROC_IS_USING_PMCS(p)) {
PROC_UNLOCK(p);
PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC,
(void *) &credential_changing);
} else
PROC_UNLOCK(p);
#else /* !HWPMC_HOOKS */
PROC_UNLOCK(p);
#endif
/* Set values passed into the program in registers. */
if (p->p_sysent->sv_setregs)

82
sys/kern/kern_pmc.c Normal file
View File

@ -0,0 +1,82 @@
/*-
* Copyright (c) 2003 Joseph Koshy
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
#include <sys/smp.h>
struct sx pmc_sx;
/* Hook variable. */
int (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
/* Interrupt handler */
int (*pmc_intr)(int cpu, uintptr_t pc) = NULL;
/*
* Since PMC(4) may not be loaded in the current kernel, the
* convention followed is that a non-NULL value of 'pmc_hook' implies
* the presence of this kernel module.
*
* This requires us to protect 'pmc_hook' with a
* shared (sx) lock -- thus making the process of calling into PMC(4)
* somewhat more expensive than a simple 'if' check and indirect call.
*/
SX_SYSINIT(pmc, &pmc_sx, "pmc shared lock");
/*
* pmc_cpu_is_disabled
*
* return TRUE if the cpu specified has been disabled.
*/
int
pmc_cpu_is_disabled(int cpu)
{
#ifdef SMP
return ((hlt_cpus_mask & (1 << cpu)) != 0);
#else
return 0;
#endif
}
int
pmc_cpu_is_logical(int cpu)
{
#ifdef SMP
return ((logical_cpus_mask & (1 << cpu)) != 0);
#else
return 0;
#endif
}
#endif /* HWPMC_HOOKS */

View File

@ -53,6 +53,10 @@ __FBSDID("$FreeBSD$");
#include <sys/turnstile.h>
#include <machine/smp.h>
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
#endif
/*
* INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
* the range 100-256 Hz (approximately).
@ -959,8 +963,18 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
newtd = choosethread();
}
if (td != newtd)
if (td != newtd) {
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
cpu_switch(td, newtd);
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
#endif
}
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
}
@ -1283,6 +1297,13 @@ sched_unbind(struct thread* td)
td->td_kse->ke_flags &= ~KEF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
return (td->td_kse->ke_flags & KEF_BOUND);
}
int
sched_load(void)
{

View File

@ -53,6 +53,10 @@ __FBSDID("$FreeBSD$");
#include <sys/ktrace.h>
#endif
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
#endif
#include <machine/cpu.h>
#include <machine/smp.h>
@ -1391,8 +1395,18 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
kseq_load_add(KSEQ_SELF(), newtd->td_kse);
} else
newtd = choosethread();
if (td != newtd)
if (td != newtd) {
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
#endif
cpu_switch(td, newtd);
#ifdef HWPMC_HOOKS
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
#endif
}
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
@ -1951,6 +1965,13 @@ sched_unbind(struct thread *td)
td->td_kse->ke_flags &= ~KEF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
return (td->td_kse->ke_flags & KEF_BOUND);
}
int
sched_load(void)
{

View File

@ -91,6 +91,7 @@ SUBDIR= ${_3dfx} \
hifn \
hme \
${_hptmv} \
hwpmc \
${_i2c} \
${_ibcs2} \
${_ichwd} \

View File

@ -0,0 +1,21 @@
#
# $FreeBSD$
#
.PATH: ${.CURDIR}/../../hwpmc
KMOD= hwpmc
SRCS= hwpmc_mod.c
WARNS?= 2
.if ${MACHINE_ARCH} == "i386"
SRCS+= hwpmc_amd.c hwpmc_intel.c hwpmc_piv.c hwpmc_ppro.c hwpmc_pentium.c
.endif
.if ${MACHINE_ARCH} == "amd64"
SRCS+= hwpmc_amd.c
.endif
.include <bsd.kmod.mk>

View File

@ -57,7 +57,7 @@
* is created, otherwise 1.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 600023 /* Master, propagated to newvers */
#define __FreeBSD_version 600024 /* Master, propagated to newvers */
#ifndef LOCORE
#include <sys/types.h>

1418
sys/sys/pmc.h Normal file

File diff suppressed because it is too large Load Diff

93
sys/sys/pmckern.h Normal file
View File

@ -0,0 +1,93 @@
/*-
* Copyright (c) 2003, Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* PMC interface used by the base kernel.
*/
#ifndef _SYS_PMCKERN_H_
#define _SYS_PMCKERN_H_
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/sx.h>
#define PMC_FN_PROCESS_EXIT 1
#define PMC_FN_PROCESS_EXEC 2
#define PMC_FN_PROCESS_FORK 3
#define PMC_FN_CSW_IN 4
#define PMC_FN_CSW_OUT 5
/* hook */
extern int (*pmc_hook)(struct thread *_td, int _function, void *_arg);
extern int (*pmc_intr)(int cpu, uintptr_t pc);
/* SX lock protecting the hook */
extern struct sx pmc_sx;
/* hook invocation; for use within the kernel */
#define PMC_CALL_HOOK(t, cmd, arg) \
do { \
sx_slock(&pmc_sx); \
if (pmc_hook != NULL) \
(pmc_hook)((t), (cmd), (arg)); \
sx_sunlock(&pmc_sx); \
} while (0)
/* hook invocation that needs an exclusive lock */
#define PMC_CALL_HOOK_X(t, cmd, arg) \
do { \
sx_xlock(&pmc_sx); \
if (pmc_hook != NULL) \
(pmc_hook)((t), (cmd), (arg)); \
sx_xunlock(&pmc_sx); \
} while (0)
/* context switches cannot take locks */
#define PMC_SWITCH_CONTEXT(t, cmd) \
do { \
if (pmc_hook != NULL) \
(pmc_hook)((t), (cmd), NULL); \
} while (0)
/*
* check if a process is using HWPMCs.
*/
#define PMC_PROC_IS_USING_PMCS(p) \
(__predict_false(atomic_load_acq_int(&(p)->p_flag) & \
P_HWPMC))
/* helper functions */
int pmc_cpu_is_disabled(int _cpu);
int pmc_cpu_is_logical(int _cpu);
#endif /* _SYS_PMCKERN_H_ */

View File

@ -634,6 +634,8 @@ struct proc {
#define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */
#define P_SIGEVENT 0x200000 /* Process pending signals changed. */
#define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
#define P_HWPMC 0x800000 /* Process is using HWPMCs */
#define P_JAILED 0x1000000 /* Process is in jail. */
#define P_INEXEC 0x4000000 /* Process is in execve(). */

View File

@ -87,6 +87,7 @@ void sched_bind(struct thread *td, int cpu);
static __inline void sched_pin(void);
void sched_unbind(struct thread *td);
static __inline void sched_unpin(void);
int sched_is_bound(struct thread *td);
/*
* These procedures tell the process data structure allocation code how

View File

@ -112,6 +112,8 @@ SUBDIR= ac \
${_pcvt} \
periodic \
pkg_install \
pmccontrol \
pmcstat \
${_pnpinfo} \
powerd \
ppp \

View File

@ -0,0 +1,17 @@
#
# $FreeBSD$
#
PROG= pmccontrol
MAN= pmccontrol.8
DPADD= ${LIBPMC}
LDADD= -lpmc
WARNS= 6
CFLAGS+= -I${.CURDIR}/../../sys -I${.CURDIR}/../../lib/libpmc
SRCS= pmccontrol.c
.include <bsd.prog.mk>

View File

@ -0,0 +1,132 @@
.\" Copyright (c) 2003 Joseph Koshy. All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" This software is provided by Joseph Koshy ``as is'' and
.\" any express or implied warranties, including, but not limited to, the
.\" implied warranties of merchantability and fitness for a particular purpose
.\" are disclaimed. in no event shall Joseph Koshy be liable
.\" for any direct, indirect, incidental, special, exemplary, or consequential
.\" damages (including, but not limited to, procurement of substitute goods
.\" or services; loss of use, data, or profits; or business interruption)
.\" however caused and on any theory of liability, whether in contract, strict
.\" liability, or tort (including negligence or otherwise) arising in any way
.\" out of the use of this software, even if advised of the possibility of
.\" such damage.
.\"
.\" $FreeBSD$
.\"
.Dd Dec 15, 2003
.Os
.Dt PMCCONTROL 8
.Sh NAME
.Nm pmccontrol
.Nd control hardware performance monitoring counters
.Sh SYNOPSIS
.Nm
.Oo
.Op Fl c Ar cpu
.Op Fl e Ar pmc
.Op Fl d Ar pmc
.Oc Ns ...
.Nm
.Op Fl lL
.Nm
.Op Fl s
.Sh DESCRIPTION
The
.Nm
utility controls the operation of the system's hardware performance
monitoring counters.
.Sh OPTIONS
The
.Nm
utility processes options in command line order, so later options modify
the effect of earlier ones.
The following options are available:
.Bl -tag -width indent
.It Fl c Ar cpu
Subsequent enable and disable options affect the CPU
denoted by
.Ar cpu .
The argument
.Ar cpu
is either a number denoting a CPU number in the system, or the string
.Dq Li \&* ,
denoting all CPUs in the system.
.It Fl d Ar pmc
Disable PMC number
.Ar pmc
on the CPU specified by
.Fl c ,
preventing it from being used till subsequently re-enabled.
The argument
.Ar pmc
is either a number denoting a specified PMC, or the string
.Dq Li \&*
denoting all the PMCs on the specified CPU.
.Pp
Only idle PMCs may be disabled.
.\" XXX this probably needs to be fixed.
.It Fl e Ar pmc
Enable PMC number
.Ar pmc ,
on the CPU specified by
.Fl c ,
allowing it to be used in the future.
The argument
.Ar pmc
is either a number denoting a specified PMC, or the string
.Dq Li \&*
denoting all the PMCs on the specified CPU.
If PMC
.Ar pmc
is already enabled, this option has no effect.
.It Fl l
List available hardware performance counters and their current
disposition.
.It Fl L
List available hardware performance counter classes and their
supported event names.
.It Fl s
Print driver statistics maintained by
.Xr hwpmc 4 .
.El
.Sh EXAMPLES
To disable all PMCs on all CPUs, use the command:
.Dl pmccontrol -d\&*
.Pp
To enable all PMCs on all CPUs, use:
.Dl pmccontrol -e\&*
.Pp
To disable PMCs 0 and 1 on CPU 2, use:
.Dl pmccontrol -c2 -d0 -d1
.Pp
To disable PMC 0 of CPU 0 only, and enable all other PMCS on all other
CPUs, use:
.Dl pmccontrol -c\&* -e\&* -c0 -d0
.Sh DIAGNOSTICS
.Ex -std pmccontrol
.Sh HISTORY
The
.Nm
utility is proposed to be integrated into
.Fx
sometime after
.Fx 5.2 .
.Nm
.Bt
.Sh AUTHORS
.An Joseph Koshy Aq jkoshy@FreeBSD.org
.Sh SEE ALSO
.Xr pmc 3 ,
.Xr hwpmc 4 ,
.Xr pmcstat 8 ,
.Xr sysctl 8

View File

@ -0,0 +1,476 @@
/*-
* Copyright (c) 2003,2004 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <pmc.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
/* Compile time defaults */
#define PMCC_PRINT_USAGE 0
#define PMCC_PRINT_EVENTS 1
#define PMCC_LIST_STATE 2
#define PMCC_ENABLE_DISABLE 3
#define PMCC_SHOW_STATISTICS 4
#define PMCC_CPU_ALL -1
#define PMCC_CPU_WILDCARD '*'
#define PMCC_PMC_ALL -1
#define PMCC_PMC_WILDCARD '*'
#define PMCC_OP_IGNORE 0
#define PMCC_OP_DISABLE 1
#define PMCC_OP_ENABLE 2
#define PMCC_PROGRAM_NAME "pmccontrol"
STAILQ_HEAD(pmcc_op_list, pmcc_op) head = STAILQ_HEAD_INITIALIZER(head);
struct pmcc_op {
char op_cpu;
char op_pmc;
char op_op;
STAILQ_ENTRY(pmcc_op) op_next;
};
/* Function Prototypes */
#if DEBUG
static void pmcc_init_debug(void);
#endif
static int pmcc_do_list_state(void);
static int pmcc_do_enable_disable(struct pmcc_op_list *);
static int pmcc_do_list_events(void);
/* Globals */
static char usage_message[] =
"Usage:\n"
" " PMCC_PROGRAM_NAME " -l\n"
" " PMCC_PROGRAM_NAME " -s\n"
" " PMCC_PROGRAM_NAME " [-e pmc | -d pmc | -c cpu] ...";
#if DEBUG
FILE *debug_stream = NULL;
#endif
#if DEBUG
#define DEBUG_MSG(...) \
(void) fprintf(debug_stream, "[pmccontrol] " __VA_ARGS__);
#else
#define DEBUG_MSG(m) /* */
#endif /* !DEBUG */
int pmc_syscall = -1;
#define PMC_CALL(cmd, params) \
if ((error = syscall(pmc_syscall, PMC_OP_##cmd, (params))) != 0) \
{ \
DEBUG_MSG("ERROR: syscall [" #cmd "]"); \
exit(EX_OSERR); \
}
#if DEBUG
/* log debug messages to a separate file */
static void
pmcc_init_debug(void)
{
char *fn;
fn = getenv("PMCCONTROL_DEBUG");
if (fn != NULL)
{
debug_stream = fopen(fn, "w");
if (debug_stream == NULL)
debug_stream = stderr;
} else
debug_stream = stderr;
}
#endif
static int
pmcc_do_enable_disable(struct pmcc_op_list *op_list)
{
unsigned char op;
int c, error, i, j, ncpu, npmc, t;
int cpu, pmc;
struct pmcc_op *np;
unsigned char *map;
if ((ncpu = pmc_ncpu()) < 0)
err(EX_OSERR, "Unable to determine the number of cpus");
/* determine the maximum number of PMCs in any CPU */
npmc = 0;
for (c = 0; c < ncpu; c++) {
if ((t = pmc_npmc(c)) < 0)
err(EX_OSERR, "Unable to determine the number of PMCs in "
"CPU %d", c);
npmc = t > npmc ? t : npmc;
}
if (npmc == 0)
errx(EX_CONFIG, "No PMCs found");
if ((map = malloc(npmc * ncpu)) == NULL)
err(EX_SOFTWARE, "Out of memory");
(void) memset(map, PMCC_OP_IGNORE, npmc*ncpu);
error = 0;
STAILQ_FOREACH(np, op_list, op_next) {
cpu = np->op_cpu;
pmc = np->op_pmc;
op = np->op_op;
if (cpu >= ncpu)
errx(EX_DATAERR, "CPU id too large: \"%d\"", cpu);
if (pmc >= npmc)
errx(EX_DATAERR, "PMC id too large: \"%d\"", pmc);
#define MARKMAP(M,C,P,V) do { \
*((M) + (C)*npmc + (P)) = (V); \
} while (0)
#define SET_PMCS(C,P,V) do { \
if ((P) == PMCC_PMC_ALL) { \
for (j = 0; j < npmc; j++) \
MARKMAP(map, (C), j, (V)); \
} else \
MARKMAP(map, (C), (P), (V)); \
} while (0)
#define MAP(M,C,P) (*((M) + (C)*npmc + (P)))
if (cpu == PMCC_CPU_ALL)
for (i = 0; i < ncpu; i++)
SET_PMCS(i, pmc, op);
else
SET_PMCS(cpu, pmc, op);
}
/* Configure PMCS */
for (i = 0; i < ncpu; i++)
for (j = 0; j < npmc; j++) {
unsigned char b;
b = MAP(map, i, j);
error = 0;
if (b == PMCC_OP_ENABLE)
error = pmc_enable(i, j);
else if (b == PMCC_OP_DISABLE)
error = pmc_disable(i, j);
if (error < 0)
err(EX_OSERR, "%s of PMC %d on CPU %d failed",
b == PMCC_OP_ENABLE ? "Enable" :
"Disable", j, i);
}
return error;
}
static int
pmcc_do_list_state(void)
{
size_t dummy;
int c, cpu, n, npmc, ncpu;
unsigned int logical_cpus_mask;
struct pmc_info *pd;
struct pmc_op_getpmcinfo *pi;
const struct pmc_op_getcpuinfo *pc;
if (pmc_cpuinfo(&pc) != 0)
err(EX_OSERR, "Unable to determine CPU information");
dummy = sizeof(logical_cpus_mask);
if (sysctlbyname("machdep.logical_cpus_mask", &logical_cpus_mask,
&dummy, NULL, 0) < 0)
logical_cpus_mask = 0;
ncpu = pc->pm_ncpu;
for (c = cpu = 0; cpu < ncpu; cpu++) {
#if i386
if (pc->pm_cputype == PMC_CPU_INTEL_PIV &&
(logical_cpus_mask & (1 << cpu)))
continue; /* skip P4-style 'logical' cpus */
#endif
if (pmc_pmcinfo(cpu, &pi) < 0)
err(EX_OSERR, "Unable to get PMC status for CPU %d",
cpu);
printf("#CPU %d:\n", c++);
npmc = pmc_npmc(cpu);
printf("#N NAME CLASS STATE ROW-DISP\n");
for (n = 0; n < npmc; n++) {
pd = &pi->pm_pmcs[n];
printf(" %-2d %-16s %-6s %-8s %-10s",
n,
pd->pm_name,
pmc_name_of_class(pd->pm_class),
pd->pm_enabled ? "ENABLED" : "DISABLED",
pmc_name_of_disposition(pd->pm_rowdisp));
if (pd->pm_ownerpid != -1) {
printf(" (pid %d)", pd->pm_ownerpid);
printf(" %-32s",
pmc_name_of_event(pd->pm_event));
if (PMC_IS_SAMPLING_MODE(pd->pm_mode))
printf(" (reload count %jd)",
pd->pm_reloadcount);
}
printf("\n");
}
free(pi);
}
return 0;
}
static int
pmcc_do_list_events(void)
{
enum pmc_class c;
unsigned int i, j, nevents;
const char **eventnamelist;
const struct pmc_op_getcpuinfo *ci;
if (pmc_cpuinfo(&ci) != 0)
err(EX_OSERR, "Unable to determine CPU information");
eventnamelist = NULL;
for (i = 0; i < ci->pm_nclass; i++) {
c = ci->pm_classes[i];
printf("%s\n", pmc_name_of_class(c));
if (pmc_event_names_of_class(c, &eventnamelist, &nevents) < 0)
err(EX_OSERR, "ERROR: Cannot find information for "
"event class \"%s\"", pmc_name_of_class(c));
for (j = 0; j < nevents; j++)
printf("\t%s\n", eventnamelist[j]);
free(eventnamelist);
}
return 0;
}
static int
pmcc_show_statistics(void)
{
struct pmc_op_getdriverstats gms;
if (pmc_get_driver_stats(&gms) < 0)
err(EX_OSERR, "ERROR: cannot retrieve driver statistics");
/*
* Print statistics.
*/
#define PRINT(N,V) (void) printf("%20s %d\n", (N), gms.pm_##V)
PRINT("interrupts-processed", intr_processed);
PRINT("interrupts-ignored", intr_ignored);
PRINT("system-calls", syscalls);
PRINT("system-calls-with-errors", syscall_errors);
return 0;
}
/*
* Main
*/
int
main(int argc, char **argv)
{
int error, command, currentcpu, option, pmc;
char *dummy;
struct pmcc_op *p;
#if DEBUG
pmcc_init_debug();
#endif
/* parse args */
currentcpu = PMCC_CPU_ALL;
command = PMCC_PRINT_USAGE;
error = 0;
STAILQ_INIT(&head);
while ((option = getopt(argc, argv, ":c:d:e:lLs")) != -1)
switch (option) {
case 'L':
if (command != PMCC_PRINT_USAGE) {
error = 1;
break;
}
command = PMCC_PRINT_EVENTS;
break;
case 'c':
if (command != PMCC_PRINT_USAGE &&
command != PMCC_ENABLE_DISABLE) {
error = 1;
break;
}
command = PMCC_ENABLE_DISABLE;
if (*optarg == PMCC_CPU_WILDCARD)
currentcpu = PMCC_CPU_ALL;
else {
currentcpu = strtoul(optarg, &dummy, 0);
if (*dummy != '\0' || currentcpu < 0)
errx(EX_DATAERR,
"\"%s\" is not a valid CPU id",
optarg);
}
break;
case 'd':
case 'e':
if (command != PMCC_PRINT_USAGE &&
command != PMCC_ENABLE_DISABLE) {
error = 1;
break;
}
command = PMCC_ENABLE_DISABLE;
if (*optarg == PMCC_PMC_WILDCARD)
pmc = PMCC_PMC_ALL;
else {
pmc = strtoul(optarg, &dummy, 0);
if (*dummy != '\0' || pmc < 0)
errx(EX_DATAERR,
"\"%s\" is not a valid PMC id",
optarg);
}
if ((p = malloc(sizeof(*p))) == NULL)
err(EX_SOFTWARE, "Out of memory");
p->op_cpu = currentcpu;
p->op_pmc = pmc;
p->op_op = option == 'd' ? PMCC_OP_DISABLE :
PMCC_OP_ENABLE;
STAILQ_INSERT_TAIL(&head, p, op_next);
break;
case 'l':
if (command != PMCC_PRINT_USAGE) {
error = 1;
break;
}
command = PMCC_LIST_STATE;
break;
case 's':
if (command != PMCC_PRINT_USAGE) {
error = 1;
break;
}
command = PMCC_SHOW_STATISTICS;
break;
case ':':
errx(EX_USAGE,
"Missing argument to option '-%c'", optopt);
break;
case '?':
warnx("Unrecognized option \"-%c\"", optopt);
errx(EX_USAGE, usage_message);
break;
default:
error = 1;
break;
}
if (command == PMCC_PRINT_USAGE)
(void) errx(EX_USAGE, usage_message);
if (error)
exit(EX_USAGE);
if (pmc_init() < 0)
err(EX_UNAVAILABLE,
"Initialization of the pmc(3) library failed");
switch (command) {
case PMCC_LIST_STATE:
error = pmcc_do_list_state();
break;
case PMCC_PRINT_EVENTS:
error = pmcc_do_list_events();
break;
case PMCC_SHOW_STATISTICS:
error = pmcc_show_statistics();
break;
case PMCC_ENABLE_DISABLE:
if (STAILQ_EMPTY(&head))
errx(EX_USAGE, "No PMCs specified to enable or disable");
error = pmcc_do_enable_disable(&head);
break;
default:
assert(0);
}
if (error != 0)
err(EX_OSERR, "Command failed");
exit(0);
}

17
usr.sbin/pmcstat/Makefile Normal file
View File

@ -0,0 +1,17 @@
#
# $FreeBSD$
#
PROG= pmcstat
MAN= pmcstat.8
DPADD= ${LIBPMC}
LDADD= -lpmc -lm
WARNS= 6
CFLAGS+= -I${.CURDIR}/../../sys -I${.CURDIR}/../../lib/libpmc
SRCS= pmcstat.c
.include <bsd.prog.mk>

196
usr.sbin/pmcstat/pmcstat.8 Normal file
View File

@ -0,0 +1,196 @@
.\" Copyright (c) 2003 Joseph Koshy. All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" This software is provided by Joseph Koshy ``as is'' and
.\" any express or implied warranties, including, but not limited to, the
.\" implied warranties of merchantability and fitness for a particular purpose
.\" are disclaimed. in no event shall Joseph Koshy be liable
.\" for any direct, indirect, incidental, special, exemplary, or consequential
.\" damages (including, but not limited to, procurement of substitute goods
.\" or services; loss of use, data, or profits; or business interruption)
.\" however caused and on any theory of liability, whether in contract, strict
.\" liability, or tort (including negligence or otherwise) arising in any way
.\" out of the use of this software, even if advised of the possibility of
.\" such damage.
.\"
.\" $FreeBSD$
.\"
.Dd Dec 15, 2003
.Os
.Dt PMCSTAT 8
.Sh NAME
.Nm pmcstat
.Nd performance measurement with performance monitoring hardware
.Sh SYNOPSIS
.Nm
.Op Fl C
.Op Fl O Ar logfilename
.Op Fl P Ar event-spec
.Op Fl S Ar event-spec
.Op Fl c Ar cpu
.Op Fl d
.Op Fl n Ar count
.Op Fl o Ar outputfile
.Op Fl p Ar event-spec
.Op Fl s Ar event-spec
.Op Fl t Ar pid
.Op Fl w Ar interval
.Op command Op args
.Sh DESCRIPTION
The
.Nm
utility measures system performance using the facilities provided by
.Xr hwpmc 4 .
.Pp
The
.Nm
utility can measure both hardware events seen by the system as a
whole, and those seen when a specified process is executing on the
system's CPUs.
If a specific process is being targeted (for example,
if the
.Fl t Ar pid
option is specified, or if a command line is specified using
.Ar command ) ,
then measurement occurs till the target process exits or
the
.Nm
utility is interrupted by the user.
If a specific process is not targeted for measurement, then
.Nm
will perform system-wide measurements till interrupted by the
user.
.Pp
A given invocation of
.Nm
can mix allocations of system-mode and process-mode PMCs, of both
counting and sampling flavors.
The values of all counting PMCs are printed in human readable form
at regular intervals by
.Nm .
The output of sampling PMCs is configured to go to log file, for later
analysis by tools like
.Xr pmcreport 8 .
.Pp
Hardware events to measure are specified to
.Nm
using event specifier strings
.Ar event-spec .
The syntax of these event specifiers is machine dependent and is
documented in
.Xr pmc 3 .
.Pp
A process-mode PMC may be configured to be inheritable by the target
process' current and future children.
.Sh OPTIONS
The following options are available:
.Bl -tag -width indent
.It Fl C
Toggle between showing cumulative and incremental counts for
subsequent counting mode PMCs specified on the command line.
The default is to show incremental counts.
.It Fl O Ar logfilename
Send the output of sampling mode PMCs to
.Ar logfilename .
The default file name is
.Pa pmcstat.out ,
in the current directory.
.It Fl P Ar event-spec
Allocate a process mode sampling PMC measuring hardware events
specified in
.Ar event-spec .
.It Fl S Ar event-spec
Allocate a system mode sampling PMC measuring hardware events
specified in
.Ar event-spec .
.It Fl c Ar cpu
Set the cpu for subsequent system mode PMCs specified on the
command line to
.Ar cpu .
The default is to allocate system mode PMCs on CPU zero.
.It Fl d
Toggle between process mode PMCs measuring events for the target
process' current and future children or only measuring events for
the attached process.
The default is to measure events for the target process alone.
.It Fl n Ar rate
Set the default sampling rate for subsequent sampling mode
PMCs specified on the command line.
The default is to configure PMCs to sample the CPU's instruction
pointer every 65536 events.
.It Fl o Ar outputfile
Send the periodic counter output of
.Nm
to file
.Ar outputfile .
The default is to send output to
.Pa stderr .
.It Fl p Ar event-spec
Allocate a process mode counting PMC measuring hardware events
specified in
.Ar event-spec .
.It Fl s Ar event-spec
Allocate a system mode counting PMC measuring hardware events
specified in
.Ar event-spec .
.It Fl t Ar pid
Attach all process mode PMCs allocated to the process with PID
.Ar pid .
The option is not allowed in conjunction with specifying a
command using
.Ar command .
.It Fl w Ar secs
Print the values of all counting mode PMCs every
.Ar secs
seconds.
The argument
.Ar secs
may be a fractional value.
The default interval is 5 seconds.
.El
.Pp
If
.Ar command
is specified, it is executed using
.Xr execvp 3 .
.Sh EXAMPLES
To perform system-wide statistical sampling on an AMD Athlon CPU with
samples taken every 32768 instruction retirals and data being sampled
to file
.Dq sample.stat ,
use:
.Dl pmccstat -O sample.stat -n 32768 -S k7-retired-instructions
.Pp
To execute
.Dq mozilla
and measure the number of data cache misses suffered
by it and its children every 12 seconds on an AMD Athlon, use:
.Dl pmcstat -d -w 12 -p k7-dc-misses mozilla
.Sh DIAGNOSTICS
.Ex -std pmcstat
.Sh HISTORY
The
.Nm
utility is proposed to be integrated into
.Fx
sometime after
.Fx 5.2 .
.Nm
.Bt
.Sh AUTHORS
.An Joseph Koshy Aq jkoshy@FreeBSD.org
.Sh SEE ALSO
.Xr execvp 3 ,
.Xr pmc 3 ,
.Xr hwpmc 4 ,
.Xr pmccontrol 8 ,
.Xr pmcreport 8 ,
.Xr sysctl 8

728
usr.sbin/pmcstat/pmcstat.c Normal file
View File

@ -0,0 +1,728 @@
/*-
* Copyright (c) 2003,2004 Joseph Koshy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/event.h>
#include <sys/queue.h>
#include <sys/time.h>
#include <sys/ttycom.h>
#include <sys/wait.h>
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <math.h>
#include <pmc.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
/* Operation modes */
#define FLAG_HAS_PID 0x00000001
#define FLAG_HAS_WAIT_INTERVAL 0x00000002
#define FLAG_HAS_LOG_FILE 0x00000004
#define FLAG_HAS_PROCESS 0x00000008
#define FLAG_USING_SAMPLING 0x00000010
#define FLAG_USING_COUNTING 0x00000020
#define FLAG_USING_PROCESS_PMC 0x00000040
#define DEFAULT_SAMPLE_COUNT 65536
#define DEFAULT_WAIT_INTERVAL 5.0
#define DEFAULT_DISPLAY_HEIGHT 23
#define DEFAULT_LOGFILE_NAME "pmcstat.out"
#define PRINT_HEADER_PREFIX "# "
#define READPIPEFD 0
#define WRITEPIPEFD 1
#define NPIPEFD 2
struct pmcstat_ev {
STAILQ_ENTRY(pmcstat_ev) ev_next;
char *ev_spec; /* event specification */
char *ev_name; /* (derived) event name */
enum pmc_mode ev_mode; /* desired mode */
int ev_count; /* associated count if in sampling mode */
int ev_cpu; /* specific cpu if requested */
int ev_descendants; /* attach to descendants */
int ev_cumulative; /* show cumulative counts */
int ev_fieldwidth; /* print width */
int ev_fieldskip; /* #leading spaces */
pmc_value_t ev_saved; /* saved value for incremental counts */
pmc_id_t ev_pmcid; /* allocated ID */
};
struct pmcstat_args {
int pa_flags;
pid_t pa_pid;
FILE *pa_outputfile;
FILE *pa_logfile;
double pa_interval;
int pa_argc;
char **pa_argv;
STAILQ_HEAD(, pmcstat_ev) pa_head;
} args;
int pmcstat_interrupt = 0;
int pmcstat_displayheight = DEFAULT_DISPLAY_HEIGHT;
int pmcstat_pipefd[NPIPEFD];
int pmcstat_kq;
/* Function prototypes */
void pmcstat_cleanup(struct pmcstat_args *_a);
void pmcstat_print_counters(struct pmcstat_args *_a);
void pmcstat_print_headers(struct pmcstat_args *_a);
void pmcstat_print_pmcs(struct pmcstat_args *_a);
void pmcstat_setup_process(struct pmcstat_args *_a);
void pmcstat_show_usage(void);
void pmcstat_start_pmcs(struct pmcstat_args *_a);
void pmcstat_start_process(struct pmcstat_args *_a);
/*
* cleanup
*/
void
pmcstat_cleanup(struct pmcstat_args *a)
{
struct pmcstat_ev *ev, *tmp;
/* de-configure the log file if present. */
if (a->pa_flags & FLAG_USING_SAMPLING) {
(void) pmc_configure_logfile(-1);
(void) fclose(a->pa_logfile);
}
/* release allocated PMCs. */
STAILQ_FOREACH_SAFE(ev, &a->pa_head, ev_next, tmp)
if (ev->ev_pmcid != PMC_ID_INVALID) {
if (pmc_release(ev->ev_pmcid) < 0)
err(EX_OSERR, "ERROR: cannot release pmc "
"%d \"%s\"", ev->ev_pmcid, ev->ev_name);
free(ev->ev_name);
free(ev->ev_spec);
STAILQ_REMOVE(&a->pa_head, ev, pmcstat_ev, ev_next);
free(ev);
}
}
void
pmcstat_start_pmcs(struct pmcstat_args *a)
{
struct pmcstat_ev *ev;
STAILQ_FOREACH(ev, &args.pa_head, ev_next) {
assert(ev->ev_pmcid != PMC_ID_INVALID);
if (pmc_start(ev->ev_pmcid) < 0) {
warn("ERROR: Cannot start pmc %d \"%s\"",
ev->ev_pmcid, ev->ev_name);
pmcstat_cleanup(a);
}
}
}
void
pmcstat_print_headers(struct pmcstat_args *a)
{
struct pmcstat_ev *ev;
int c;
(void) fprintf(a->pa_outputfile, PRINT_HEADER_PREFIX);
STAILQ_FOREACH(ev, &a->pa_head, ev_next) {
if (PMC_IS_SAMPLING_MODE(ev->ev_mode))
continue;
c = PMC_IS_SYSTEM_MODE(ev->ev_mode) ? 's' : 'p';
if (ev->ev_fieldskip != 0) {
(void) fprintf(a->pa_outputfile, "%*s%c/%*s ",
ev->ev_fieldskip, "", c,
ev->ev_fieldwidth - ev->ev_fieldskip - 2,
ev->ev_name);
} else
(void) fprintf(a->pa_outputfile, "%c/%*s ",
c, ev->ev_fieldwidth - 2, ev->ev_name);
}
(void) fflush(a->pa_outputfile);
}
void
pmcstat_print_counters(struct pmcstat_args *a)
{
int extra_width;
struct pmcstat_ev *ev;
pmc_value_t value;
extra_width = sizeof(PRINT_HEADER_PREFIX) - 1;
STAILQ_FOREACH(ev, &a->pa_head, ev_next) {
/* skip sampling mode counters */
if (PMC_IS_SAMPLING_MODE(ev->ev_mode))
continue;
if (pmc_read(ev->ev_pmcid, &value) < 0)
err(EX_OSERR, "ERROR: Cannot read pmc "
"\"%s\"", ev->ev_name);
(void) fprintf(a->pa_outputfile, "%*ju ",
ev->ev_fieldwidth + extra_width, (uintmax_t)
ev->ev_cumulative ? value : (value - ev->ev_saved));
if (ev->ev_cumulative == 0)
ev->ev_saved = value;
extra_width = 0;
}
(void) fflush(a->pa_outputfile);
}
/*
* Print output
*/
void
pmcstat_print_pmcs(struct pmcstat_args *a)
{
static int linecount = 0;
if (++linecount > pmcstat_displayheight) {
(void) fprintf(a->pa_outputfile, "\n");
linecount = 1;
}
if (linecount == 1)
pmcstat_print_headers(a);
(void) fprintf(a->pa_outputfile, "\n");
pmcstat_print_counters(a);
return;
}
/*
* Do process profiling
*
* If a pid was specified, attach each allocated PMC to the target
* process. Otherwise, fork a child and attach the PMCs to the child,
* and have the child exec() the target program.
*/
void
pmcstat_setup_process(struct pmcstat_args *a)
{
char token;
struct pmcstat_ev *ev;
struct kevent kev;
if (a->pa_flags & FLAG_HAS_PID) {
STAILQ_FOREACH(ev, &args.pa_head, ev_next)
if (pmc_attach(ev->ev_pmcid, a->pa_pid) != 0)
err(EX_OSERR, "ERROR: cannot attach pmc \"%s\" to "
"process %d", ev->ev_name, (int) a->pa_pid);
} else {
/*
* We need to fork a new process and startup the child
* using execvp(). Before doing the exec() the child
* process reads its pipe for a token so that the parent
* can finish doing its pmc_attach() calls.
*/
if (pipe(pmcstat_pipefd) < 0)
err(EX_OSERR, "ERROR: cannot create pipe");
switch (a->pa_pid = fork()) {
case -1:
err(EX_OSERR, "ERROR: cannot fork");
/*NOTREACHED*/
case 0: /* child */
/* wait for our parent to signal us */
(void) close(pmcstat_pipefd[WRITEPIPEFD]);
if (read(pmcstat_pipefd[READPIPEFD], &token, 1) < 0)
err(EX_OSERR, "ERROR (child): cannot read "
"token");
(void) close(pmcstat_pipefd[READPIPEFD]);
/* exec() the program requested */
execvp(*args.pa_argv, args.pa_argv);
err(EX_OSERR, "ERROR (child): execvp failed");
/*NOTREACHED*/
default: /* parent */
(void) close(pmcstat_pipefd[READPIPEFD]);
/* attach all our PMCs to the child */
STAILQ_FOREACH(ev, &args.pa_head, ev_next)
if (PMC_IS_VIRTUAL_MODE(ev->ev_mode) &&
pmc_attach(ev->ev_pmcid, a->pa_pid) != 0)
err(EX_OSERR, "ERROR: cannot attach pmc "
"\"%s\" to process %d", ev->ev_name,
(int) a->pa_pid);
}
}
/* Ask to be notified via a kevent when the child exits */
EV_SET(&kev, a->pa_pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, 0);
if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
err(EX_OSERR, "ERROR: cannot monitor process %d",
a->pa_pid);
return;
}
void
pmcstat_start_process(struct pmcstat_args *a)
{
/* nothing to do: target is already running */
if (a->pa_flags & FLAG_HAS_PID)
return;
/* write token to child to state that we are ready */
if (write(pmcstat_pipefd[WRITEPIPEFD], "+", 1) != 1)
err(EX_OSERR, "ERROR: write failed");
(void) close(pmcstat_pipefd[WRITEPIPEFD]);
}
void
pmcstat_show_usage(void)
{
errx(EX_USAGE,
"[options] [commandline]\n"
"\t Measure process and/or system performance using hardware\n"
"\t performance monitoring counters.\n"
"\t Options include:\n"
"\t -C\t\t toggle showing cumulative counts\n"
"\t -O file\t set sampling log file to \"file\"\n"
"\t -P spec\t allocate process-private sampling PMC\n"
"\t -S spec\t allocate system-wide sampling PMC\n"
"\t -c cpu\t\t set default cpu\n"
"\t -d\t\t toggle tracking descendants\n"
"\t -n rate\t set sampling rate\n"
"\t -o file\t send print output to \"file\"\n"
"\t -p spec\t allocate process-private counting PMC\n"
"\t -s spec\t allocate system-wide counting PMC\n"
"\t -t pid\t attach to running process with pid \"pid\"\n"
"\t -w secs\t set printing time interval"
);
}
/*
* Main
*/
int
main(int argc, char **argv)
{
double interval;
int option, npmc, ncpu;
int c, current_cpu, current_sampling_count;
int running;
int do_descendants, use_cumulative_counts;
pid_t pid;
char *end;
struct pmcstat_ev *ev;
struct pmc_op_getpmcinfo *ppmci;
struct sigaction sa;
struct kevent kev;
struct winsize ws;
current_cpu = 0;
current_sampling_count = DEFAULT_SAMPLE_COUNT;
do_descendants = 0;
use_cumulative_counts = 0;
args.pa_flags = 0;
args.pa_pid = (pid_t) -1;
args.pa_logfile = NULL;
args.pa_outputfile = stderr;
args.pa_interval = DEFAULT_WAIT_INTERVAL;
STAILQ_INIT(&args.pa_head);
ev = NULL;
while ((option = getopt(argc, argv, "CO:P:S:c:dn:o:p:s:t:w:")) != -1)
switch (option) {
case 'C': /* cumulative values */
use_cumulative_counts = !use_cumulative_counts;
break;
case 'c': /* CPU */
current_cpu = strtol(optarg, &end, 0);
if (*end != '\0' || current_cpu < 0)
errx(EX_USAGE,
"ERROR: Illegal CPU number \"%s\"",
optarg);
break;
case 'd': /* toggle descendents */
do_descendants = !do_descendants;
break;
case 'p': /* process virtual counting PMC */
case 's': /* system-wide counting PMC */
case 'P': /* process virtual sampling PMC */
case 'S': /* system-wide sampling PMC */
if ((ev = malloc(sizeof(*ev))) == NULL)
errx(EX_SOFTWARE, "ERROR: Out of memory");
switch (option) {
case 'p': ev->ev_mode = PMC_MODE_TC; break;
case 's': ev->ev_mode = PMC_MODE_SC; break;
case 'P': ev->ev_mode = PMC_MODE_TS; break;
case 'S': ev->ev_mode = PMC_MODE_SS; break;
}
if (option == 'P' || option == 'p')
args.pa_flags |= FLAG_USING_PROCESS_PMC;
if (option == 'P' || option == 'S')
args.pa_flags |= FLAG_USING_SAMPLING;
if (option == 'p' || option == 's')
args.pa_flags |= FLAG_USING_COUNTING;
ev->ev_spec = strdup(optarg);
if (option == 'S' || option == 'P')
ev->ev_count = current_sampling_count;
else
ev->ev_count = -1;
if (option == 'S' || option == 's')
ev->ev_cpu = current_cpu;
else
ev->ev_cpu = PMC_CPU_ANY;
ev->ev_descendants = do_descendants;
ev->ev_cumulative = use_cumulative_counts;
ev->ev_saved = 0LL;
ev->ev_pmcid = PMC_ID_INVALID;
/* extract event name */
c = strcspn(optarg, ", \t");
ev->ev_name = malloc(c + 1);
(void) strncpy(ev->ev_name, optarg, c);
*(ev->ev_name + c) = '\0';
STAILQ_INSERT_TAIL(&args.pa_head, ev, ev_next);
break;
case 'n': /* sampling count */
current_sampling_count = strtol(optarg, &end, 0);
if (*end != '\0' || current_sampling_count <= 0)
errx(EX_USAGE,
"ERROR: Illegal count value \"%s\"",
optarg);
break;
case 'o': /* outputfile */
if (args.pa_outputfile != NULL)
(void) fclose(args.pa_outputfile);
if ((args.pa_outputfile = fopen(optarg, "w")) == NULL)
errx(EX_OSERR, "ERROR: cannot open \"%s\" for "
"writing", optarg);
case 'O': /* sampling output */
if (args.pa_logfile != NULL)
(void) fclose(args.pa_logfile);
if ((args.pa_logfile = fopen(optarg, "w")) == NULL)
errx(EX_OSERR, "ERROR: cannot open \"%s\" for "
"writing", optarg);
break;
case 't': /* target pid */
pid = strtol(optarg, &end, 0);
if (*end != '\0' || pid <= 0)
errx(EX_USAGE, "ERROR: Illegal pid value "
"\"%s\"", optarg);
args.pa_flags |= FLAG_HAS_PID;
args.pa_pid = pid;
break;
case 'w': /* wait interval */
interval = strtod(optarg, &end);
if (*end != '\0' || interval <= 0)
errx(EX_USAGE, "ERROR: Illegal wait interval "
"value \"%s\"", optarg);
args.pa_flags |= FLAG_HAS_WAIT_INTERVAL;
args.pa_interval = interval;
break;
case '?':
default:
pmcstat_show_usage();
break;
}
args.pa_argc = (argc -= optind);
args.pa_argv = (argv += optind);
if (argc)
args.pa_flags |= FLAG_HAS_PROCESS;
/*
* Check invocation syntax.
*/
if (STAILQ_EMPTY(&args.pa_head)) {
warnx("ERROR: At least one PMC event must be specified");
pmcstat_show_usage();
}
if (argc == 0) {
if (args.pa_pid == -1) {
if (args.pa_flags & FLAG_USING_PROCESS_PMC)
errx(EX_USAGE, "ERROR: the -P or -p options "
"require a target process");
} else if ((args.pa_flags & FLAG_USING_PROCESS_PMC) == 0)
errx(EX_USAGE,
"ERROR: option -t requires a process-mode pmc "
"specification");
} else if (args.pa_pid != -1)
errx(EX_USAGE,
"ERROR: option -t cannot be specified with a command "
"name");
if (pmc_init() < 0)
err(EX_UNAVAILABLE,
"ERROR: Initialization of the pmc(3) library failed");
if ((ncpu = pmc_ncpu()) < 0)
err(EX_OSERR, "ERROR: Cannot determine the number CPUs "
"on the system");
if ((npmc = pmc_npmc(0)) < 0) /* assume all CPUs are identical */
err(EX_OSERR, "ERROR: Cannot determine the number of PMCs "
"on CPU %d", 0);
/*
* Allocate PMCs.
*/
if (pmc_pmcinfo(0, &ppmci) < 0)
err(EX_OSERR, "ERROR: cannot retrieve pmc information");
assert(ppmci != NULL);
STAILQ_FOREACH(ev, &args.pa_head, ev_next)
if (pmc_allocate(ev->ev_spec, ev->ev_mode,
(ev->ev_descendants ? PMC_F_DESCENDANTS : 0),
ev->ev_cpu, &ev->ev_pmcid) < 0)
err(EX_OSERR, "ERROR: Cannot allocate %s-mode pmc with "
"specification \"%s\"",
PMC_IS_SYSTEM_MODE(ev->ev_mode) ? "system" : "process",
ev->ev_spec);
/* compute printout widths */
STAILQ_FOREACH(ev, &args.pa_head, ev_next) {
int pmc_width;
int pmc_display_width;
int pmc_header_width;
pmc_width = ppmci->pm_pmcs[ev->ev_pmcid].pm_width;
pmc_header_width = strlen(ev->ev_name) + 2; /* prefix '%c|' */
pmc_display_width = (int) floor(pmc_width / 3.32193) + 1;
if (pmc_header_width > pmc_display_width) {
ev->ev_fieldskip = 0;
ev->ev_fieldwidth = pmc_header_width;
} else {
ev->ev_fieldskip = pmc_display_width -
pmc_header_width;
ev->ev_fieldwidth = pmc_display_width;
}
}
/* Allocate a kqueue */
if ((pmcstat_kq = kqueue()) < 0)
err(EX_OSERR, "ERROR: Cannot allocate kqueue");
/*
* If our output is being set to a terminal, register a handler
* for window size changes.
*/
if (isatty(fileno(args.pa_outputfile))) {
if (ioctl(fileno(args.pa_outputfile), TIOCGWINSZ, &ws) < 0)
err(EX_OSERR, "ERROR: Cannot determine window size");
pmcstat_displayheight = ws.ws_row - 1;
EV_SET(&kev, SIGWINCH, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
err(EX_OSERR, "ERROR: Cannot register kevent for "
"SIGWINCH");
}
EV_SET(&kev, SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
err(EX_OSERR, "ERROR: Cannot register kevent for SIGINT");
if (args.pa_flags & FLAG_USING_SAMPLING) {
/*
* configure log file
*/
if (args.pa_logfile == NULL)
if ((args.pa_logfile =
fopen(DEFAULT_LOGFILE_NAME, "w")) == NULL)
err(EX_CANTCREAT, "ERROR: Cannot open sampling "
"log file \"%s\"", DEFAULT_LOGFILE_NAME);
if (pmc_configure_logfile(fileno(args.pa_logfile)) < 0)
err(EX_OSERR, "ERROR: Cannot configure sampling "
"log");
STAILQ_FOREACH(ev, &args.pa_head, ev_next)
if (PMC_IS_SAMPLING_MODE(ev->ev_mode) &&
pmc_set(ev->ev_pmcid, ev->ev_count) < 0)
err(EX_OSERR, "ERROR: Cannot set sampling count "
"for PMC \"%s\"", ev->ev_name);
}
/* setup a timer for any counting mode PMCs */
if (args.pa_flags & FLAG_USING_COUNTING) {
EV_SET(&kev, 0, EVFILT_TIMER, EV_ADD, 0,
args.pa_interval * 1000, NULL);
if (kevent(pmcstat_kq, &kev, 1, NULL, 0, NULL) < 0)
err(EX_OSERR, "ERROR: Cannot register kevent for "
"timer");
}
/* attach PMCs to the target process, starting it if specified */
if (args.pa_flags & FLAG_HAS_PROCESS)
pmcstat_setup_process(&args);
/* start the pmcs */
pmcstat_start_pmcs(&args);
/* start the (commandline) process if needed */
if (args.pa_flags & FLAG_HAS_PROCESS)
pmcstat_start_process(&args);
/* Handle SIGINT using the kqueue loop */
sa.sa_handler = SIG_IGN;
sa.sa_flags = 0;
(void) sigemptyset(&sa.sa_mask);
if (sigaction(SIGINT, &sa, NULL) < 0)
err(EX_OSERR, "ERROR: Cannot install signal handler");
/*
* loop till either the target process (if any) exits, or we
* are killed by a SIGINT.
*/
running = 1;
do {
if ((c = kevent(pmcstat_kq, NULL, 0, &kev, 1, NULL)) <= 0) {
if (errno != EINTR)
err(EX_OSERR, "ERROR: kevent failed");
else
continue;
}
if (kev.flags & EV_ERROR)
errc(EX_OSERR, kev.data, "ERROR: kevent failed");
switch (kev.filter) {
case EVFILT_PROC: /* target process exited */
running = 0;
/* FALLTHROUGH */
case EVFILT_TIMER: /* print out counting PMCs */
pmcstat_print_pmcs(&args);
if (running == 0) /* final newline */
(void) fprintf(args.pa_outputfile, "\n");
break;
case EVFILT_SIGNAL:
if (kev.ident == SIGINT) {
/* pass the signal on to the child process */
if ((args.pa_flags & FLAG_HAS_PROCESS) &&
(args.pa_flags & FLAG_HAS_PID) == 0)
if (kill(args.pa_pid, SIGINT) != 0)
err(EX_OSERR, "cannot kill "
"child");
running = 0;
} else if (kev.ident == SIGWINCH) {
if (ioctl(fileno(args.pa_outputfile),
TIOCGWINSZ, &ws) < 0)
err(EX_OSERR, "ERROR: Cannot determine "
"window size");
pmcstat_displayheight = ws.ws_row - 1;
} else
assert(0);
break;
}
} while (running);
pmcstat_cleanup(&args);
return 0;
}