2005-06-09 19:45:09 +00:00
|
|
|
/*-
|
2017-11-27 14:52:40 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2007-12-07 08:20:17 +00:00
|
|
|
* Copyright (c) 2005-2007 Joseph Koshy
|
|
|
|
* Copyright (c) 2007 The FreeBSD Foundation
|
2018-05-12 01:26:34 +00:00
|
|
|
* Copyright (c) 2018 Matthew Macy
|
2005-06-09 19:45:09 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2007-12-07 08:20:17 +00:00
|
|
|
* Portions of this software were developed by A. Joseph Koshy under
|
|
|
|
* sponsorship from the FreeBSD Foundation and Google, Inc.
|
|
|
|
*
|
2005-06-09 19:45:09 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Logging code for hwpmc(4)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
2014-03-16 10:55:57 +00:00
|
|
|
#include <sys/capsicum.h>
|
2018-10-30 18:26:34 +00:00
|
|
|
#include <sys/domainset.h>
|
2005-06-09 19:45:09 +00:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/pmc.h>
|
2009-06-25 20:59:37 +00:00
|
|
|
#include <sys/pmckern.h>
|
2005-06-09 19:45:09 +00:00
|
|
|
#include <sys/pmclog.h>
|
|
|
|
#include <sys/proc.h>
|
2018-05-12 01:26:34 +00:00
|
|
|
#include <sys/sched.h>
|
2005-06-09 19:45:09 +00:00
|
|
|
#include <sys/signalvar.h>
|
2018-05-12 01:26:34 +00:00
|
|
|
#include <sys/smp.h>
|
2017-11-01 11:32:52 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
2005-06-09 19:45:09 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/uio.h>
|
|
|
|
#include <sys/unistd.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
|
2018-06-07 02:03:22 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
#include <machine/clock.h>
|
|
|
|
#endif
|
|
|
|
|
2018-05-14 06:11:25 +00:00
|
|
|
#define curdomain PCPU_GET(domain)
|
2018-05-12 20:00:29 +00:00
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Sysctl tunables
|
|
|
|
*/
|
|
|
|
|
|
|
|
SYSCTL_DECL(_kern_hwpmc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
|
2015-12-09 22:46:40 +00:00
|
|
|
#if (__FreeBSD_version < 1100000)
|
|
|
|
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
|
|
|
|
#endif
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN,
|
2005-06-09 19:45:09 +00:00
|
|
|
&pmclog_buffer_size, 0, "size of log buffers in kilobytes");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kern.hwpmc.nbuffer -- number of global log buffers
|
|
|
|
*/
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
static int pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU;
|
2015-12-09 22:46:40 +00:00
|
|
|
#if (__FreeBSD_version < 1100000)
|
2018-05-12 01:26:34 +00:00
|
|
|
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers_pcpu);
|
2015-12-09 22:46:40 +00:00
|
|
|
#endif
|
2018-05-12 01:26:34 +00:00
|
|
|
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers_pcpu, CTLFLAG_RDTUN,
|
|
|
|
&pmc_nlogbuffers_pcpu, 0, "number of log buffers per cpu");
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Global log buffer list and associated spin lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct mtx pmc_kthread_mtx; /* sleep lock */
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \
|
|
|
|
(D)->plb_fence = ((char *) (buf)) + 1024*pmclog_buffer_size; \
|
|
|
|
(D)->plb_base = (D)->plb_ptr = ((char *) (buf)); \
|
|
|
|
(D)->plb_domain = domain; \
|
2005-06-09 19:45:09 +00:00
|
|
|
} while (0)
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
#define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \
|
|
|
|
(D)->plb_ptr = (D)->plb_base; \
|
|
|
|
} while (0)
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Log file record constructors.
|
|
|
|
*/
|
2005-07-09 17:29:36 +00:00
|
|
|
#define _PMCLOG_TO_HEADER(T,L) \
|
|
|
|
((PMCLOG_HEADER_MAGIC << 24) | \
|
|
|
|
(PMCLOG_TYPE_ ## T << 16) | \
|
|
|
|
((L) & 0xFFFF))
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/* reserve LEN bytes of space and initialize the entry header */
|
2018-06-07 02:03:22 +00:00
|
|
|
#define _PMCLOG_RESERVE_SAFE(PO,TYPE,LEN,ACTION, TSC) do { \
|
2005-06-09 19:45:09 +00:00
|
|
|
uint32_t *_le; \
|
2018-05-12 01:26:34 +00:00
|
|
|
int _len = roundup((LEN), sizeof(uint32_t)); \
|
2018-06-07 02:03:22 +00:00
|
|
|
struct pmclog_header *ph; \
|
2005-06-09 19:45:09 +00:00
|
|
|
if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
|
2018-06-07 02:03:22 +00:00
|
|
|
ACTION; \
|
|
|
|
} \
|
|
|
|
ph = (struct pmclog_header *)_le; \
|
|
|
|
ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
|
|
|
|
ph->pl_tsc = (TSC); \
|
|
|
|
_le += sizeof(*ph)/4 /* skip over timestamp */
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
/* reserve LEN bytes of space and initialize the entry header */
|
|
|
|
#define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \
|
|
|
|
uint32_t *_le; \
|
2018-06-07 02:03:22 +00:00
|
|
|
int _len = roundup((LEN), sizeof(uint32_t)); \
|
|
|
|
uint64_t tsc; \
|
|
|
|
struct pmclog_header *ph; \
|
|
|
|
tsc = pmc_rdtsc(); \
|
2018-05-12 01:26:34 +00:00
|
|
|
spinlock_enter(); \
|
|
|
|
if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
|
|
|
|
spinlock_exit(); \
|
|
|
|
ACTION; \
|
|
|
|
} \
|
2018-06-07 02:03:22 +00:00
|
|
|
ph = (struct pmclog_header *)_le; \
|
|
|
|
ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
|
|
|
|
ph->pl_tsc = tsc; \
|
|
|
|
_le += sizeof(*ph)/4 /* skip over timestamp */
|
2018-05-12 01:26:34 +00:00
|
|
|
|
|
|
|
|
2018-06-07 02:03:22 +00:00
|
|
|
|
|
|
|
#define PMCLOG_RESERVE_SAFE(P,T,L,TSC) _PMCLOG_RESERVE_SAFE(P,T,L,return,TSC)
|
2005-06-09 19:45:09 +00:00
|
|
|
#define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return)
|
|
|
|
#define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \
|
|
|
|
error=ENOMEM;goto error)
|
|
|
|
|
|
|
|
#define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0)
|
|
|
|
#define PMCLOG_EMIT64(V) do { \
|
|
|
|
*_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \
|
|
|
|
*_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
|
|
/* Emit a string. Caution: does NOT update _le, so needs to be last */
|
|
|
|
#define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0)
|
2012-03-28 20:58:30 +00:00
|
|
|
#define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0)
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
#define PMCLOG_DESPATCH_SAFE(PO) \
|
|
|
|
pmclog_release((PO)); \
|
|
|
|
} while (0)
|
|
|
|
|
2018-05-28 23:12:26 +00:00
|
|
|
#define PMCLOG_DESPATCH_SCHED_LOCK(PO) \
|
|
|
|
pmclog_release_flags((PO), 0); \
|
|
|
|
} while (0)
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
#define PMCLOG_DESPATCH(PO) \
|
|
|
|
pmclog_release((PO)); \
|
|
|
|
spinlock_exit(); \
|
2005-06-09 19:45:09 +00:00
|
|
|
} while (0)
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
#define PMCLOG_DESPATCH_SYNC(PO) \
|
2018-05-28 23:12:26 +00:00
|
|
|
pmclog_schedule_io((PO), 1); \
|
2018-05-12 01:26:34 +00:00
|
|
|
spinlock_exit(); \
|
|
|
|
} while (0)
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-06-07 02:03:22 +00:00
|
|
|
#define TSDELTA 4
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Assertions about the log file format.
|
|
|
|
*/
|
2018-06-07 02:03:22 +00:00
|
|
|
CTASSERT(sizeof(struct pmclog_callchain) == 7*4 + TSDELTA +
|
2007-12-07 08:20:17 +00:00
|
|
|
PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t));
|
2018-06-07 02:03:22 +00:00
|
|
|
CTASSERT(sizeof(struct pmclog_closelog) == 3*4 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX + TSDELTA +
|
|
|
|
5*4 + sizeof(uintfptr_t));
|
2006-03-26 12:20:54 +00:00
|
|
|
CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) ==
|
2018-06-07 02:03:22 +00:00
|
|
|
5*4 + TSDELTA + sizeof(uintfptr_t));
|
|
|
|
CTASSERT(sizeof(struct pmclog_map_out) == 5*4 + 2*sizeof(uintfptr_t) + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_pmcallocate) == 9*4 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX + TSDELTA);
|
|
|
|
CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_proccsw) == 7*4 + 8 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX +
|
|
|
|
sizeof(uintfptr_t) + TSDELTA);
|
|
|
|
CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 + TSDELTA +
|
2005-06-30 19:01:26 +00:00
|
|
|
sizeof(uintfptr_t));
|
2018-06-07 02:03:22 +00:00
|
|
|
CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_procfork) == 5*4 + TSDELTA);
|
|
|
|
CTASSERT(sizeof(struct pmclog_sysexit) == 6*4);
|
|
|
|
CTASSERT(sizeof(struct pmclog_userdata) == 6*4);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Log buffer structure
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct pmclog_buffer {
|
|
|
|
TAILQ_ENTRY(pmclog_buffer) plb_next;
|
|
|
|
char *plb_base;
|
|
|
|
char *plb_ptr;
|
|
|
|
char *plb_fence;
|
2018-05-12 01:26:34 +00:00
|
|
|
uint16_t plb_domain;
|
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Prototypes
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int pmclog_get_buffer(struct pmc_owner *po);
|
|
|
|
static void pmclog_loop(void *arg);
|
|
|
|
static void pmclog_release(struct pmc_owner *po);
|
|
|
|
static uint32_t *pmclog_reserve(struct pmc_owner *po, int length);
|
2018-05-28 23:12:26 +00:00
|
|
|
static void pmclog_schedule_io(struct pmc_owner *po, int wakeup);
|
2018-10-05 05:55:56 +00:00
|
|
|
static void pmclog_schedule_all(struct pmc_owner *po);
|
2005-06-09 19:45:09 +00:00
|
|
|
static void pmclog_stop_kthread(struct pmc_owner *po);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper functions
|
|
|
|
*/
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
static inline void
|
|
|
|
pmc_plb_rele_unlocked(struct pmclog_buffer *plb)
|
|
|
|
{
|
|
|
|
TAILQ_INSERT_HEAD(&pmc_dom_hdrs[plb->plb_domain]->pdbh_head, plb, plb_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pmc_plb_rele(struct pmclog_buffer *plb)
|
|
|
|
{
|
|
|
|
mtx_lock_spin(&pmc_dom_hdrs[plb->plb_domain]->pdbh_mtx);
|
|
|
|
pmc_plb_rele_unlocked(plb);
|
|
|
|
mtx_unlock_spin(&pmc_dom_hdrs[plb->plb_domain]->pdbh_mtx);
|
|
|
|
}
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Get a log buffer
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
pmclog_get_buffer(struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
struct pmclog_buffer *plb;
|
2018-05-12 01:26:34 +00:00
|
|
|
int domain;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
KASSERT(po->po_curbuf[curcpu] == NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p current buffer still valid", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-14 06:11:25 +00:00
|
|
|
domain = curdomain;
|
2018-05-14 00:21:04 +00:00
|
|
|
MPASS(pmc_dom_hdrs[domain]);
|
2018-05-12 01:26:34 +00:00
|
|
|
mtx_lock_spin(&pmc_dom_hdrs[domain]->pdbh_mtx);
|
|
|
|
if ((plb = TAILQ_FIRST(&pmc_dom_hdrs[domain]->pdbh_head)) != NULL)
|
|
|
|
TAILQ_REMOVE(&pmc_dom_hdrs[domain]->pdbh_head, plb, plb_next);
|
|
|
|
mtx_unlock_spin(&pmc_dom_hdrs[domain]->pdbh_mtx);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,GTB,1, "po=%p plb=%p", po, plb);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 15:57:23 +00:00
|
|
|
#ifdef HWPMC_DEBUG
|
2005-06-09 19:45:09 +00:00
|
|
|
if (plb)
|
|
|
|
KASSERT(plb->plb_ptr == plb->plb_base &&
|
|
|
|
plb->plb_base < plb->plb_fence,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p buffer invariants: ptr=%p "
|
2005-06-09 19:45:09 +00:00
|
|
|
"base=%p fence=%p", __LINE__, po, plb->plb_ptr,
|
|
|
|
plb->plb_base, plb->plb_fence));
|
|
|
|
#endif
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
po->po_curbuf[curcpu] = plb;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/* update stats */
|
2018-05-12 01:26:34 +00:00
|
|
|
counter_u64_add(pmc_stats.pm_buffer_requests, 1);
|
2005-06-09 19:45:09 +00:00
|
|
|
if (plb == NULL)
|
2018-05-12 01:26:34 +00:00
|
|
|
counter_u64_add(pmc_stats.pm_buffer_requests_failed, 1);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2008-12-15 14:41:55 +00:00
|
|
|
return (plb ? 0 : ENOMEM);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
2017-11-01 11:43:39 +00:00
|
|
|
struct pmclog_proc_init_args {
|
|
|
|
struct proc *kthr;
|
|
|
|
struct pmc_owner *po;
|
|
|
|
bool exit;
|
|
|
|
bool acted;
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
|
|
|
pmclog_proc_create(struct thread *td, void **handlep)
|
|
|
|
{
|
|
|
|
struct pmclog_proc_init_args *ia;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ia = malloc(sizeof(*ia), M_TEMP, M_WAITOK | M_ZERO);
|
|
|
|
error = kproc_create(pmclog_loop, ia, &ia->kthr,
|
|
|
|
RFHIGHPID, 0, "hwpmc: proc(%d)", td->td_proc->p_pid);
|
|
|
|
if (error == 0)
|
|
|
|
*handlep = ia;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_proc_ignite(void *handle, struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
struct pmclog_proc_init_args *ia;
|
|
|
|
|
|
|
|
ia = handle;
|
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
|
|
|
MPASS(!ia->acted);
|
|
|
|
MPASS(ia->po == NULL);
|
|
|
|
MPASS(!ia->exit);
|
|
|
|
MPASS(ia->kthr != NULL);
|
|
|
|
if (po == NULL) {
|
|
|
|
ia->exit = true;
|
|
|
|
} else {
|
|
|
|
ia->po = po;
|
|
|
|
KASSERT(po->po_kthread == NULL,
|
|
|
|
("[pmclog,%d] po=%p kthread (%p) already present",
|
|
|
|
__LINE__, po, po->po_kthread));
|
|
|
|
po->po_kthread = ia->kthr;
|
|
|
|
}
|
|
|
|
wakeup(ia);
|
|
|
|
while (!ia->acted)
|
|
|
|
msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogw", 0);
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
free(ia, M_TEMP);
|
|
|
|
}
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Log handler loop.
|
|
|
|
*
|
|
|
|
* This function is executed by each pmc owner's helper thread.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pmclog_loop(void *arg)
|
|
|
|
{
|
2017-11-01 11:43:39 +00:00
|
|
|
struct pmclog_proc_init_args *ia;
|
2005-06-09 19:45:09 +00:00
|
|
|
struct pmc_owner *po;
|
|
|
|
struct pmclog_buffer *lb;
|
2009-11-24 19:26:53 +00:00
|
|
|
struct proc *p;
|
2005-06-09 19:45:09 +00:00
|
|
|
struct ucred *ownercred;
|
|
|
|
struct ucred *mycred;
|
|
|
|
struct thread *td;
|
2017-11-01 11:32:52 +00:00
|
|
|
sigset_t unb;
|
2005-06-09 19:45:09 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
size_t nbytes;
|
2017-11-01 11:43:39 +00:00
|
|
|
int error;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
td = curthread;
|
2017-11-01 11:32:52 +00:00
|
|
|
|
|
|
|
SIGEMPTYSET(unb);
|
|
|
|
SIGADDSET(unb, SIGHUP);
|
|
|
|
(void)kern_sigprocmask(td, SIG_UNBLOCK, &unb, NULL, 0);
|
|
|
|
|
2017-11-01 11:43:39 +00:00
|
|
|
ia = arg;
|
|
|
|
MPASS(ia->kthr == curproc);
|
|
|
|
MPASS(!ia->acted);
|
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
|
|
|
while (ia->po == NULL && !ia->exit)
|
|
|
|
msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogi", 0);
|
|
|
|
if (ia->exit) {
|
|
|
|
ia->acted = true;
|
|
|
|
wakeup(ia);
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
kproc_exit(0);
|
|
|
|
}
|
|
|
|
MPASS(ia->po != NULL);
|
|
|
|
po = ia->po;
|
|
|
|
ia->acted = true;
|
|
|
|
wakeup(ia);
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
ia = NULL;
|
|
|
|
|
|
|
|
p = po->po_owner;
|
2005-06-09 19:45:09 +00:00
|
|
|
mycred = td->td_ucred;
|
|
|
|
|
2009-11-24 19:26:53 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
ownercred = crhold(p->p_ucred);
|
|
|
|
PROC_UNLOCK(p);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
|
2005-06-09 19:45:09 +00:00
|
|
|
KASSERT(po->po_kthread == curthread->td_proc,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
|
2005-06-09 19:45:09 +00:00
|
|
|
po, po->po_kthread, curthread->td_proc));
|
|
|
|
|
|
|
|
lb = NULL;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop waiting for I/O requests to be added to the owner
|
|
|
|
* struct's queue. The loop is exited when the log file
|
|
|
|
* is deconfigured.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
|
|
|
/* check if we've been asked to exit */
|
|
|
|
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (lb == NULL) { /* look for a fresh buffer to write */
|
|
|
|
mtx_lock_spin(&po->po_mtx);
|
|
|
|
if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
|
|
|
|
mtx_unlock_spin(&po->po_mtx);
|
|
|
|
|
2012-03-27 14:02:22 +00:00
|
|
|
/* No more buffers and shutdown required. */
|
2017-11-01 11:32:52 +00:00
|
|
|
if (po->po_flags & PMC_PO_SHUTDOWN)
|
2012-03-27 14:02:22 +00:00
|
|
|
break;
|
2011-10-18 15:25:43 +00:00
|
|
|
|
2008-11-30 05:10:14 +00:00
|
|
|
(void) msleep(po, &pmc_kthread_mtx, PWAIT,
|
2018-05-28 23:12:26 +00:00
|
|
|
"pmcloop", 250);
|
2005-06-09 19:45:09 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
|
|
|
|
mtx_unlock_spin(&po->po_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
|
|
|
|
/* process the request */
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG3(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
|
2005-06-09 19:45:09 +00:00
|
|
|
lb->plb_base, lb->plb_ptr);
|
|
|
|
/* change our thread's credentials before issuing the I/O */
|
|
|
|
|
|
|
|
aiov.iov_base = lb->plb_base;
|
|
|
|
aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base;
|
|
|
|
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_offset = -1;
|
|
|
|
auio.uio_resid = nbytes;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
auio.uio_td = td;
|
|
|
|
|
|
|
|
/* switch thread credentials -- see kern_ktrace.c */
|
|
|
|
td->td_ucred = ownercred;
|
|
|
|
error = fo_write(po->po_file, &auio, ownercred, 0, td);
|
|
|
|
td->td_ucred = mycred;
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
/* XXX some errors are recoverable */
|
|
|
|
/* send a SIGIO to the owner and exit */
|
2009-11-24 19:26:53 +00:00
|
|
|
PROC_LOCK(p);
|
2011-09-16 13:58:51 +00:00
|
|
|
kern_psignal(p, SIGIO);
|
2009-11-24 19:26:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
po->po_error = error; /* save for flush log */
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,WRI,2, "po=%p error=%d", po, error);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-11-24 19:26:53 +00:00
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/* put the used buffer back into the global pool */
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_RESET_BUFFER_DESCRIPTOR(lb);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
pmc_plb_rele(lb);
|
2005-06-09 19:45:09 +00:00
|
|
|
lb = NULL;
|
|
|
|
}
|
|
|
|
|
2012-03-27 14:02:22 +00:00
|
|
|
wakeup_one(po->po_kthread);
|
2005-06-09 19:45:09 +00:00
|
|
|
po->po_kthread = NULL;
|
|
|
|
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
|
|
|
|
/* return the current I/O buffer to the global pool */
|
|
|
|
if (lb) {
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_RESET_BUFFER_DESCRIPTOR(lb);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
pmc_plb_rele(lb);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exit this thread, signalling the waiter
|
|
|
|
*/
|
|
|
|
|
|
|
|
crfree(ownercred);
|
|
|
|
|
2007-10-20 23:23:23 +00:00
|
|
|
kproc_exit(0);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release and log entry and schedule an I/O if needed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
2018-05-28 23:12:26 +00:00
|
|
|
pmclog_release_flags(struct pmc_owner *po, int wakeup)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
2018-05-12 01:26:34 +00:00
|
|
|
struct pmclog_buffer *plb;
|
|
|
|
|
|
|
|
plb = po->po_curbuf[curcpu];
|
|
|
|
KASSERT(plb->plb_ptr >= plb->plb_base,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
|
2018-05-12 01:26:34 +00:00
|
|
|
po, plb->plb_ptr, plb->plb_base));
|
|
|
|
KASSERT(plb->plb_ptr <= plb->plb_fence,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
|
2018-05-12 01:26:34 +00:00
|
|
|
po, plb->plb_ptr, plb->plb_fence));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/* schedule an I/O if we've filled a buffer */
|
2018-05-12 01:26:34 +00:00
|
|
|
if (plb->plb_ptr >= plb->plb_fence)
|
2018-05-28 23:12:26 +00:00
|
|
|
pmclog_schedule_io(po, wakeup);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG1(LOG,REL,1, "po=%p", po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
2018-05-28 23:12:26 +00:00
|
|
|
static void
|
|
|
|
pmclog_release(struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
|
|
|
|
pmclog_release_flags(po, 1);
|
|
|
|
}
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to reserve 'length' bytes of space in an owner's log
|
|
|
|
* buffer. The function returns a pointer to 'length' bytes of space
|
|
|
|
* if there was enough space or returns NULL if no space was
|
|
|
|
* available. Non-null returns do so with the po mutex locked. The
|
|
|
|
* caller must invoke pmclog_release() on the pmc owner structure
|
|
|
|
* when done.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static uint32_t *
|
|
|
|
pmclog_reserve(struct pmc_owner *po, int length)
|
|
|
|
{
|
2005-07-09 17:29:36 +00:00
|
|
|
uintptr_t newptr, oldptr;
|
2018-05-12 01:26:34 +00:00
|
|
|
struct pmclog_buffer *plb, **pplb;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,ALL,1, "po=%p len=%d", po, length);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
KASSERT(length % sizeof(uint32_t) == 0,
|
|
|
|
("[pmclog,%d] length not a multiple of word size", __LINE__));
|
|
|
|
|
2010-03-08 19:58:00 +00:00
|
|
|
/* No more data when shutdown in progress. */
|
2018-05-12 01:26:34 +00:00
|
|
|
if (po->po_flags & PMC_PO_SHUTDOWN)
|
2010-03-08 19:58:00 +00:00
|
|
|
return (NULL);
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
pplb = &po->po_curbuf[curcpu];
|
|
|
|
if (*pplb == NULL && pmclog_get_buffer(po) != 0)
|
|
|
|
goto fail;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
KASSERT(*pplb != NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p no current buffer", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
plb = *pplb;
|
|
|
|
KASSERT(plb->plb_ptr >= plb->plb_base &&
|
|
|
|
plb->plb_ptr <= plb->plb_fence,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
|
2018-05-12 01:26:34 +00:00
|
|
|
__LINE__, po, plb->plb_ptr, plb->plb_base,
|
|
|
|
plb->plb_fence));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
oldptr = (uintptr_t) plb->plb_ptr;
|
2005-06-09 19:45:09 +00:00
|
|
|
newptr = oldptr + length;
|
|
|
|
|
2005-07-09 17:29:36 +00:00
|
|
|
KASSERT(oldptr != (uintptr_t) NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have space in the current buffer, return a pointer to
|
|
|
|
* available space with the PO structure locked.
|
|
|
|
*/
|
2018-05-12 01:26:34 +00:00
|
|
|
if (newptr <= (uintptr_t) plb->plb_fence) {
|
|
|
|
plb->plb_ptr = (char *) newptr;
|
2005-06-09 19:45:09 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2005-07-09 17:29:36 +00:00
|
|
|
/*
|
|
|
|
* Otherwise, schedule the current buffer for output and get a
|
|
|
|
* fresh buffer.
|
|
|
|
*/
|
2018-05-28 23:12:26 +00:00
|
|
|
pmclog_schedule_io(po, 0);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
if (pmclog_get_buffer(po) != 0)
|
|
|
|
goto fail;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
plb = *pplb;
|
|
|
|
KASSERT(plb != NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p no current buffer", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
KASSERT(plb->plb_ptr != NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
KASSERT(plb->plb_ptr == plb->plb_base &&
|
|
|
|
plb->plb_ptr <= plb->plb_fence,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
|
2018-05-12 01:26:34 +00:00
|
|
|
__LINE__, po, plb->plb_ptr, plb->plb_base,
|
|
|
|
plb->plb_fence));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
oldptr = (uintptr_t) plb->plb_ptr;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
done:
|
2008-12-15 14:41:55 +00:00
|
|
|
return ((uint32_t *) oldptr);
|
2018-05-12 01:26:34 +00:00
|
|
|
fail:
|
|
|
|
return (NULL);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Schedule an I/O.
|
|
|
|
*
|
|
|
|
* Transfer the current buffer to the helper kthread.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
2018-05-28 23:12:26 +00:00
|
|
|
pmclog_schedule_io(struct pmc_owner *po, int wakeup)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
2018-05-12 01:26:34 +00:00
|
|
|
struct pmclog_buffer *plb;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
plb = po->po_curbuf[curcpu];
|
|
|
|
po->po_curbuf[curcpu] = NULL;
|
|
|
|
KASSERT(plb != NULL,
|
|
|
|
("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po));
|
|
|
|
KASSERT(plb->plb_ptr >= plb->plb_base,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
|
2018-05-12 01:26:34 +00:00
|
|
|
po, plb->plb_ptr, plb->plb_base));
|
|
|
|
KASSERT(plb->plb_ptr <= plb->plb_fence,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
|
2018-05-12 01:26:34 +00:00
|
|
|
po, plb->plb_ptr, plb->plb_fence));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG1(LOG,SIO, 1, "po=%p", po);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the current buffer to the tail of the buffer list and
|
|
|
|
* wakeup the helper.
|
|
|
|
*/
|
2018-05-12 01:26:34 +00:00
|
|
|
mtx_lock_spin(&po->po_mtx);
|
|
|
|
TAILQ_INSERT_TAIL(&po->po_logbuffers, plb, plb_next);
|
|
|
|
mtx_unlock_spin(&po->po_mtx);
|
2018-05-28 23:12:26 +00:00
|
|
|
if (wakeup)
|
|
|
|
wakeup_one(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stop the helper kthread.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
pmclog_stop_kthread(struct pmc_owner *po)
|
|
|
|
{
|
2009-11-24 19:26:53 +00:00
|
|
|
|
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
2005-06-09 19:45:09 +00:00
|
|
|
po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
|
2017-11-01 11:32:52 +00:00
|
|
|
if (po->po_kthread != NULL) {
|
|
|
|
PROC_LOCK(po->po_kthread);
|
|
|
|
kern_psignal(po->po_kthread, SIGHUP);
|
|
|
|
PROC_UNLOCK(po->po_kthread);
|
|
|
|
}
|
2005-06-09 19:45:09 +00:00
|
|
|
wakeup_one(po);
|
2017-11-01 11:32:52 +00:00
|
|
|
while (po->po_kthread)
|
2005-07-09 17:29:36 +00:00
|
|
|
msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
|
2009-11-24 19:26:53 +00:00
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Public functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure a log file for pmc owner 'po'.
|
|
|
|
*
|
|
|
|
* Parameter 'logfd' is a file handle referencing an open file in the
|
|
|
|
* owner process. This file needs to have been opened for writing.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
- Add support for PMCs in Intel CPUs of Family 6, model 0xE (Core Solo
and Core Duo), models 0xF (Core2), model 0x17 (Core2Extreme) and
model 0x1C (Atom).
In these CPUs, the actual numbers, kinds and widths of PMCs present
need to queried at run time. Support for specific "architectural"
events also needs to be queried at run time.
Model 0xE CPUs support programmable PMCs, subsequent CPUs
additionally support "fixed-function" counters.
- Use event names that are close to vendor documentation, taking in
account that:
- events with identical semantics on two or more CPUs in this family
can have differing names in vendor documentation,
- identical vendor event names may map to differing events across
CPUs,
- each type of CPU supports a different subset of measurable
events.
Fixed-function and programmable counters both use the same vendor
names for events. The use of a class name prefix ("iaf-" or
"iap-" respectively) permits these to be distinguished.
- In libpmc, refactor pmc_name_of_event() into a public interface
and an internal helper function, for use by log handling code.
- Minor code tweaks: staticize a global, freshen a few comments.
Tested by: gnn
2008-11-27 09:00:47 +00:00
|
|
|
pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
|
|
|
struct proc *p;
|
2018-06-07 02:03:22 +00:00
|
|
|
struct timespec ts;
|
|
|
|
uint64_t tsc;
|
2017-11-01 11:43:39 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
sx_assert(&pmc_sx, SA_XLOCKED);
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,CFG,1, "config po=%p logfd=%d", po, logfd);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
p = po->po_owner;
|
|
|
|
|
|
|
|
/* return EBUSY if a log file was already present */
|
|
|
|
if (po->po_flags & PMC_PO_OWNS_LOGFILE)
|
2008-12-15 14:41:55 +00:00
|
|
|
return (EBUSY);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
KASSERT(po->po_file == NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p file (%p) already present", __LINE__, po,
|
2005-06-09 19:45:09 +00:00
|
|
|
po->po_file));
|
|
|
|
|
|
|
|
/* get a reference to the file state */
|
2018-05-09 18:47:24 +00:00
|
|
|
error = fget_write(curthread, logfd, &cap_write_rights, &po->po_file);
|
2005-06-09 19:45:09 +00:00
|
|
|
if (error)
|
|
|
|
goto error;
|
2015-03-11 20:15:49 +00:00
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/* mark process as owning a log file */
|
|
|
|
po->po_flags |= PMC_PO_OWNS_LOGFILE;
|
|
|
|
|
|
|
|
/* mark process as using HWPMCs */
|
|
|
|
PROC_LOCK(p);
|
|
|
|
p->p_flag |= P_HWPMC;
|
|
|
|
PROC_UNLOCK(p);
|
2018-06-07 02:03:22 +00:00
|
|
|
nanotime(&ts);
|
|
|
|
tsc = pmc_rdtsc();
|
2005-06-09 19:45:09 +00:00
|
|
|
/* create a log initialization entry */
|
|
|
|
PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE,
|
|
|
|
sizeof(struct pmclog_initialize));
|
|
|
|
PMCLOG_EMIT32(PMC_VERSION);
|
|
|
|
PMCLOG_EMIT32(md->pmd_cputype);
|
2018-06-07 02:03:22 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
PMCLOG_EMIT64(tsc_freq);
|
|
|
|
#else
|
|
|
|
/* other architectures will need to fill this in */
|
2018-06-07 02:20:27 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
|
|
|
PMCLOG_EMIT32(0);
|
2018-06-07 02:03:22 +00:00
|
|
|
#endif
|
|
|
|
memcpy(_le, &ts, sizeof(ts));
|
|
|
|
_le += sizeof(ts)/4;
|
2018-06-04 02:05:48 +00:00
|
|
|
PMCLOG_EMITSTRING(pmc_cpuid, PMC_CPUID_LEN);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2008-12-15 14:41:55 +00:00
|
|
|
return (0);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
error:
|
2008-12-15 14:41:55 +00:00
|
|
|
KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not "
|
|
|
|
"stopped", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
if (po->po_file)
|
|
|
|
(void) fdrop(po->po_file, curthread);
|
|
|
|
po->po_file = NULL; /* clear file and error state */
|
|
|
|
po->po_error = 0;
|
2017-11-13 10:45:31 +00:00
|
|
|
po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2008-12-15 14:41:55 +00:00
|
|
|
return (error);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* De-configure a log file. This will throw away any buffers queued
|
|
|
|
* for this owner process.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
pmclog_deconfigure_log(struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct pmclog_buffer *lb;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG1(LOG,CFG,1, "de-config po=%p", po);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
|
2008-12-15 14:41:55 +00:00
|
|
|
return (EINVAL);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2006-03-09 02:08:12 +00:00
|
|
|
KASSERT(po->po_sscount == 0,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
KASSERT(po->po_file != NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p no log file", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
|
2012-03-27 14:02:22 +00:00
|
|
|
pmclog_stop_kthread(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
KASSERT(po->po_kthread == NULL,
|
2008-12-15 14:41:55 +00:00
|
|
|
("[pmclog,%d] po=%p kthread not stopped", __LINE__, po));
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/* return all queued log buffers to the global pool */
|
|
|
|
while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_RESET_BUFFER_DESCRIPTOR(lb);
|
|
|
|
pmc_plb_rele(lb);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
2018-05-12 01:26:34 +00:00
|
|
|
for (int i = 0; i < mp_ncpus; i++) {
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_bind(curthread, i);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
/* return the 'current' buffer to the global pool */
|
|
|
|
if ((lb = po->po_curbuf[curcpu]) != NULL) {
|
|
|
|
PMCLOG_RESET_BUFFER_DESCRIPTOR(lb);
|
|
|
|
pmc_plb_rele(lb);
|
|
|
|
}
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
2018-05-12 01:26:34 +00:00
|
|
|
thread_lock(curthread);
|
|
|
|
sched_unbind(curthread);
|
|
|
|
thread_unlock(curthread);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/* drop a reference to the fd */
|
2017-11-01 11:37:45 +00:00
|
|
|
if (po->po_file != NULL) {
|
|
|
|
error = fdrop(po->po_file, curthread);
|
2017-11-13 10:43:31 +00:00
|
|
|
po->po_file = NULL;
|
2017-11-01 11:37:45 +00:00
|
|
|
} else
|
|
|
|
error = 0;
|
2005-06-09 19:45:09 +00:00
|
|
|
po->po_error = 0;
|
|
|
|
|
2008-12-15 14:41:55 +00:00
|
|
|
return (error);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush a process' log buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2018-06-05 04:26:40 +00:00
|
|
|
pmclog_flush(struct pmc_owner *po, int force)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
2010-03-08 19:58:00 +00:00
|
|
|
int error;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG1(LOG,FLS,1, "po=%p", po);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a pending error recorded by the logger thread,
|
|
|
|
* return that.
|
|
|
|
*/
|
|
|
|
if (po->po_error)
|
2008-12-15 14:41:55 +00:00
|
|
|
return (po->po_error);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that we do have an active log file.
|
|
|
|
*/
|
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
|
|
|
if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2018-10-05 05:55:56 +00:00
|
|
|
pmclog_schedule_all(po);
|
2011-10-18 15:25:43 +00:00
|
|
|
error:
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
static void
|
2018-10-05 05:55:56 +00:00
|
|
|
pmclog_schedule_one_cond(struct pmc_owner *po)
|
2018-05-12 01:26:34 +00:00
|
|
|
{
|
|
|
|
struct pmclog_buffer *plb;
|
2018-06-04 01:10:23 +00:00
|
|
|
int cpu;
|
2018-05-12 01:26:34 +00:00
|
|
|
|
|
|
|
spinlock_enter();
|
2018-06-04 01:10:23 +00:00
|
|
|
cpu = curcpu;
|
2018-05-12 01:26:34 +00:00
|
|
|
/* tell hardclock not to run again */
|
2018-06-04 01:10:23 +00:00
|
|
|
if (PMC_CPU_HAS_SAMPLES(cpu))
|
2018-05-12 03:45:30 +00:00
|
|
|
PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
|
2018-10-05 05:55:56 +00:00
|
|
|
|
2018-06-04 01:10:23 +00:00
|
|
|
plb = po->po_curbuf[cpu];
|
2018-05-12 01:26:34 +00:00
|
|
|
if (plb && plb->plb_ptr != plb->plb_base)
|
2018-05-28 23:12:26 +00:00
|
|
|
pmclog_schedule_io(po, 1);
|
2018-05-12 01:26:34 +00:00
|
|
|
spinlock_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-10-05 05:55:56 +00:00
|
|
|
pmclog_schedule_all(struct pmc_owner *po)
|
2018-05-12 01:26:34 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Schedule the current buffer if any and not empty.
|
|
|
|
*/
|
|
|
|
for (int i = 0; i < mp_ncpus; i++) {
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_bind(curthread, i);
|
|
|
|
thread_unlock(curthread);
|
2018-10-05 05:55:56 +00:00
|
|
|
pmclog_schedule_one_cond(po);
|
2018-05-12 01:26:34 +00:00
|
|
|
}
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_unbind(curthread);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
}
|
|
|
|
|
2011-10-18 15:25:43 +00:00
|
|
|
int
|
|
|
|
pmclog_close(struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG1(LOG,CLO,1, "po=%p", po);
|
2011-10-18 15:25:43 +00:00
|
|
|
|
2018-01-17 16:41:22 +00:00
|
|
|
pmclog_process_closelog(po);
|
|
|
|
|
2011-10-18 15:25:43 +00:00
|
|
|
mtx_lock(&pmc_kthread_mtx);
|
2018-05-20 19:35:24 +00:00
|
|
|
/*
|
|
|
|
* Initiate shutdown: no new data queued,
|
|
|
|
* thread will close file on last block.
|
|
|
|
*/
|
|
|
|
po->po_flags |= PMC_PO_SHUTDOWN;
|
|
|
|
/* give time for all to see */
|
|
|
|
DELAY(50);
|
|
|
|
|
2011-10-18 15:25:43 +00:00
|
|
|
/*
|
|
|
|
* Schedule the current buffer.
|
2005-06-09 19:45:09 +00:00
|
|
|
*/
|
2018-10-05 05:55:56 +00:00
|
|
|
pmclog_schedule_all(po);
|
2018-05-12 01:26:34 +00:00
|
|
|
wakeup_one(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
mtx_unlock(&pmc_kthread_mtx);
|
|
|
|
|
2011-10-18 15:25:43 +00:00
|
|
|
return (0);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
2007-12-07 08:20:17 +00:00
|
|
|
void
|
|
|
|
pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps)
|
|
|
|
{
|
|
|
|
int n, recordlen;
|
|
|
|
uint32_t flags;
|
|
|
|
struct pmc_owner *po;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG3(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid,
|
2007-12-07 08:20:17 +00:00
|
|
|
ps->ps_nsamples);
|
|
|
|
|
|
|
|
recordlen = offsetof(struct pmclog_callchain, pl_pc) +
|
|
|
|
ps->ps_nsamples * sizeof(uintfptr_t);
|
|
|
|
po = pm->pm_owner;
|
|
|
|
flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_RESERVE_SAFE(po, CALLCHAIN, recordlen, ps->ps_tsc);
|
2007-12-07 08:20:17 +00:00
|
|
|
PMCLOG_EMIT32(ps->ps_pid);
|
2018-05-23 17:44:29 +00:00
|
|
|
PMCLOG_EMIT32(ps->ps_tid);
|
2007-12-07 08:20:17 +00:00
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
|
|
|
PMCLOG_EMIT32(flags);
|
|
|
|
for (n = 0; n < ps->ps_nsamples; n++)
|
|
|
|
PMCLOG_EMITADDR(ps->ps_pc[n]);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SAFE(po);
|
2007-12-07 08:20:17 +00:00
|
|
|
}
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_closelog(struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog));
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_dropnotify(struct pmc_owner *po)
|
|
|
|
{
|
|
|
|
PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify));
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-03-26 12:20:54 +00:00
|
|
|
pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start,
|
|
|
|
const char *path)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
|
|
|
int pathlen, recordlen;
|
|
|
|
|
2006-03-26 12:20:54 +00:00
|
|
|
KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__));
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
pathlen = strlen(path) + 1; /* #bytes for path name */
|
2006-03-26 12:20:54 +00:00
|
|
|
recordlen = offsetof(struct pmclog_map_in, pl_pathname) +
|
2005-06-09 19:45:09 +00:00
|
|
|
pathlen;
|
|
|
|
|
2006-03-26 12:20:54 +00:00
|
|
|
PMCLOG_RESERVE(po, MAP_IN, recordlen);
|
2005-06-09 19:45:09 +00:00
|
|
|
PMCLOG_EMIT32(pid);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2006-03-26 12:20:54 +00:00
|
|
|
PMCLOG_EMITADDR(start);
|
2005-06-09 19:45:09 +00:00
|
|
|
PMCLOG_EMITSTRING(path,pathlen);
|
2018-06-03 19:37:17 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
2006-03-26 12:20:54 +00:00
|
|
|
void
|
|
|
|
pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start,
|
|
|
|
uintfptr_t end)
|
|
|
|
{
|
|
|
|
KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__));
|
|
|
|
|
|
|
|
PMCLOG_RESERVE(po, MAP_OUT, sizeof(struct pmclog_map_out));
|
|
|
|
PMCLOG_EMIT32(pid);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2006-03-26 12:20:54 +00:00
|
|
|
PMCLOG_EMITADDR(start);
|
|
|
|
PMCLOG_EMITADDR(end);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_pmcallocate(struct pmc *pm)
|
|
|
|
{
|
|
|
|
struct pmc_owner *po;
|
2012-03-28 20:58:30 +00:00
|
|
|
struct pmc_soft *ps;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
po = pm->pm_owner;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG1(LOG,ALL,1, "pm=%p", pm);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2012-03-28 20:58:30 +00:00
|
|
|
if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) {
|
|
|
|
PMCLOG_RESERVE(po, PMCALLOCATEDYN,
|
|
|
|
sizeof(struct pmclog_pmcallocatedyn));
|
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
|
|
|
PMCLOG_EMIT32(pm->pm_event);
|
|
|
|
PMCLOG_EMIT32(pm->pm_flags);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2018-06-06 02:48:09 +00:00
|
|
|
PMCLOG_EMIT64(pm->pm_sc.pm_reloadcount);
|
2012-03-28 20:58:30 +00:00
|
|
|
ps = pmc_soft_ev_acquire(pm->pm_event);
|
|
|
|
if (ps != NULL)
|
|
|
|
PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX);
|
|
|
|
else
|
|
|
|
PMCLOG_EMITNULLSTRING(PMC_NAME_MAX);
|
|
|
|
pmc_soft_ev_release(ps);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2012-03-28 20:58:30 +00:00
|
|
|
} else {
|
|
|
|
PMCLOG_RESERVE(po, PMCALLOCATE,
|
|
|
|
sizeof(struct pmclog_pmcallocate));
|
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
|
|
|
PMCLOG_EMIT32(pm->pm_event);
|
|
|
|
PMCLOG_EMIT32(pm->pm_flags);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2018-06-06 02:48:09 +00:00
|
|
|
PMCLOG_EMIT64(pm->pm_sc.pm_reloadcount);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2012-03-28 20:58:30 +00:00
|
|
|
}
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path)
|
|
|
|
{
|
|
|
|
int pathlen, recordlen;
|
|
|
|
struct pmc_owner *po;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,ATT,1,"pm=%p pid=%d", pm, pid);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
po = pm->pm_owner;
|
|
|
|
|
|
|
|
pathlen = strlen(path) + 1; /* #bytes for the string */
|
|
|
|
recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen;
|
|
|
|
|
|
|
|
PMCLOG_RESERVE(po, PMCATTACH, recordlen);
|
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
|
|
|
PMCLOG_EMIT32(pid);
|
|
|
|
PMCLOG_EMITSTRING(path, pathlen);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_pmcdetach(struct pmc *pm, pid_t pid)
|
|
|
|
{
|
|
|
|
struct pmc_owner *po;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,ATT,1,"!pm=%p pid=%d", pm, pid);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
po = pm->pm_owner;
|
|
|
|
|
|
|
|
PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach));
|
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
|
|
|
PMCLOG_EMIT32(pid);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
2018-06-05 04:26:40 +00:00
|
|
|
void
|
|
|
|
pmclog_process_proccreate(struct pmc_owner *po, struct proc *p, int sync)
|
|
|
|
{
|
|
|
|
if (sync) {
|
|
|
|
PMCLOG_RESERVE(po, PROC_CREATE, sizeof(struct pmclog_proccreate));
|
|
|
|
PMCLOG_EMIT32(p->p_pid);
|
2018-06-06 02:48:09 +00:00
|
|
|
PMCLOG_EMIT32(p->p_flag);
|
2018-06-05 04:26:40 +00:00
|
|
|
PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1);
|
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
|
|
|
} else {
|
|
|
|
PMCLOG_RESERVE(po, PROC_CREATE, sizeof(struct pmclog_proccreate));
|
|
|
|
PMCLOG_EMIT32(p->p_pid);
|
2018-06-06 02:48:09 +00:00
|
|
|
PMCLOG_EMIT32(p->p_flag);
|
2018-06-05 04:26:40 +00:00
|
|
|
PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Log a context switch event to the log file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2018-05-23 17:44:29 +00:00
|
|
|
pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v, struct thread *td)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
|
|
|
struct pmc_owner *po;
|
|
|
|
|
|
|
|
KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW,
|
|
|
|
("[pmclog,%d] log-process-csw called gratuitously", __LINE__));
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG3(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
|
2005-06-09 19:45:09 +00:00
|
|
|
v);
|
|
|
|
|
|
|
|
po = pm->pm_owner;
|
|
|
|
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_RESERVE_SAFE(po, PROCCSW, sizeof(struct pmclog_proccsw), pmc_rdtsc());
|
2005-06-09 19:45:09 +00:00
|
|
|
PMCLOG_EMIT64(v);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
2005-06-09 19:45:09 +00:00
|
|
|
PMCLOG_EMIT32(pp->pp_proc->p_pid);
|
2018-05-23 17:44:29 +00:00
|
|
|
PMCLOG_EMIT32(td->td_tid);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2018-05-28 23:12:26 +00:00
|
|
|
PMCLOG_DESPATCH_SCHED_LOCK(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2005-06-30 19:01:26 +00:00
|
|
|
pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid,
|
|
|
|
uintfptr_t startaddr, char *path)
|
2005-06-09 19:45:09 +00:00
|
|
|
{
|
|
|
|
int pathlen, recordlen;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG3(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
pathlen = strlen(path) + 1; /* #bytes for the path */
|
|
|
|
recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen;
|
|
|
|
PMCLOG_RESERVE(po, PROCEXEC, recordlen);
|
|
|
|
PMCLOG_EMIT32(pid);
|
2018-05-26 19:26:19 +00:00
|
|
|
PMCLOG_EMIT32(pmid);
|
2018-05-26 19:29:19 +00:00
|
|
|
PMCLOG_EMITADDR(startaddr);
|
2005-06-09 19:45:09 +00:00
|
|
|
PMCLOG_EMITSTRING(path,pathlen);
|
2018-06-05 04:26:40 +00:00
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log a process exit event (and accumulated pmc value) to the log file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp)
|
|
|
|
{
|
|
|
|
int ri;
|
|
|
|
struct pmc_owner *po;
|
|
|
|
|
|
|
|
ri = PMC_TO_ROWINDEX(pm);
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG3(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
|
2005-06-09 19:45:09 +00:00
|
|
|
pp->pp_pmcs[ri].pp_pmcval);
|
|
|
|
|
|
|
|
po = pm->pm_owner;
|
|
|
|
|
|
|
|
PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit));
|
|
|
|
PMCLOG_EMIT32(pm->pm_id);
|
2018-05-26 19:26:19 +00:00
|
|
|
PMCLOG_EMIT32(pp->pp_proc->p_pid);
|
2018-05-26 19:29:19 +00:00
|
|
|
PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval);
|
2005-06-09 19:45:09 +00:00
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log a fork event.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid)
|
|
|
|
{
|
|
|
|
PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork));
|
|
|
|
PMCLOG_EMIT32(oldpid);
|
|
|
|
PMCLOG_EMIT32(newpid);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log a process exit event of the form suitable for system-wide PMCs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_sysexit(struct pmc_owner *po, pid_t pid)
|
|
|
|
{
|
|
|
|
PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit));
|
|
|
|
PMCLOG_EMIT32(pid);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
|
2018-06-05 04:26:40 +00:00
|
|
|
void
|
|
|
|
pmclog_process_threadcreate(struct pmc_owner *po, struct thread *td, int sync)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = td->td_proc;
|
|
|
|
if (sync) {
|
|
|
|
PMCLOG_RESERVE(po, THR_CREATE, sizeof(struct pmclog_threadcreate));
|
|
|
|
PMCLOG_EMIT32(td->td_tid);
|
|
|
|
PMCLOG_EMIT32(p->p_pid);
|
2018-06-06 02:48:09 +00:00
|
|
|
PMCLOG_EMIT32(p->p_flag);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2018-06-05 04:26:40 +00:00
|
|
|
PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1);
|
|
|
|
PMCLOG_DESPATCH_SYNC(po);
|
|
|
|
} else {
|
|
|
|
PMCLOG_RESERVE(po, THR_CREATE, sizeof(struct pmclog_threadcreate));
|
|
|
|
PMCLOG_EMIT32(td->td_tid);
|
|
|
|
PMCLOG_EMIT32(p->p_pid);
|
2018-06-06 02:48:09 +00:00
|
|
|
PMCLOG_EMIT32(p->p_flag);
|
2018-06-07 02:03:22 +00:00
|
|
|
PMCLOG_EMIT32(0);
|
2018-06-05 04:26:40 +00:00
|
|
|
PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_process_threadexit(struct pmc_owner *po, struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
PMCLOG_RESERVE(po, THR_EXIT, sizeof(struct pmclog_threadexit));
|
|
|
|
PMCLOG_EMIT32(td->td_tid);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
}
|
|
|
|
|
2005-06-09 19:45:09 +00:00
|
|
|
/*
|
|
|
|
* Write a user log entry.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2015-05-08 19:40:00 +00:00
|
|
|
PMCDBG2(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata);
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
PMCLOG_RESERVE_WITH_ERROR(po, USERDATA,
|
|
|
|
sizeof(struct pmclog_userdata));
|
|
|
|
PMCLOG_EMIT32(wl->pm_userdata);
|
|
|
|
PMCLOG_DESPATCH(po);
|
|
|
|
|
|
|
|
error:
|
2008-12-15 14:41:55 +00:00
|
|
|
return (error);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialization.
|
|
|
|
*
|
|
|
|
* Create a pool of log buffers and initialize mutexes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_initialize()
|
|
|
|
{
|
|
|
|
struct pmclog_buffer *plb;
|
2018-10-30 18:26:34 +00:00
|
|
|
int domain, ncpus, total;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) {
|
2008-12-15 14:41:55 +00:00
|
|
|
(void) printf("hwpmc: tunable logbuffersize=%d must be "
|
2018-05-12 01:26:34 +00:00
|
|
|
"greater than zero and less than or equal to 16MB.\n",
|
|
|
|
pmclog_buffer_size);
|
2005-06-09 19:45:09 +00:00
|
|
|
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-05-12 01:26:34 +00:00
|
|
|
if (pmc_nlogbuffers_pcpu <= 0) {
|
2005-06-09 19:45:09 +00:00
|
|
|
(void) printf("hwpmc: tunable nlogbuffers=%d must be greater "
|
2018-05-12 01:26:34 +00:00
|
|
|
"than zero.\n", pmc_nlogbuffers_pcpu);
|
|
|
|
pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU;
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
2018-05-12 01:26:34 +00:00
|
|
|
if (pmc_nlogbuffers_pcpu*pmclog_buffer_size > 32*1024) {
|
|
|
|
(void) printf("hwpmc: memory allocated pcpu must be less than 32MB (is %dK).\n",
|
|
|
|
pmc_nlogbuffers_pcpu*pmclog_buffer_size);
|
|
|
|
pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU;
|
|
|
|
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
|
|
|
|
}
|
2018-07-06 06:21:24 +00:00
|
|
|
for (domain = 0; domain < vm_ndomains; domain++) {
|
2018-10-30 18:26:34 +00:00
|
|
|
ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus;
|
|
|
|
total = ncpus * pmc_nlogbuffers_pcpu;
|
2018-05-12 01:26:34 +00:00
|
|
|
|
2018-10-30 18:26:34 +00:00
|
|
|
plb = malloc_domainset(sizeof(struct pmclog_buffer) * total,
|
|
|
|
M_PMC, DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
|
2018-05-12 01:26:34 +00:00
|
|
|
pmc_dom_hdrs[domain]->pdbh_plbs = plb;
|
2018-10-30 18:26:34 +00:00
|
|
|
for (; total > 0; total--, plb++) {
|
2018-05-12 01:26:34 +00:00
|
|
|
void *buf;
|
|
|
|
|
2018-10-30 18:26:34 +00:00
|
|
|
buf = malloc_domainset(1024 * pmclog_buffer_size, M_PMC,
|
|
|
|
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
|
2018-05-12 01:26:34 +00:00
|
|
|
PMCLOG_INIT_BUFFER_DESCRIPTOR(plb, buf, domain);
|
|
|
|
pmc_plb_rele_unlocked(plb);
|
|
|
|
}
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
2007-04-19 08:02:51 +00:00
|
|
|
mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shutdown logging.
|
|
|
|
*
|
|
|
|
* Destroy mutexes and release memory back the to free pool.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
pmclog_shutdown()
|
|
|
|
{
|
|
|
|
struct pmclog_buffer *plb;
|
2018-05-12 01:26:34 +00:00
|
|
|
int domain;
|
2005-06-09 19:45:09 +00:00
|
|
|
|
|
|
|
mtx_destroy(&pmc_kthread_mtx);
|
|
|
|
|
2018-07-06 06:21:24 +00:00
|
|
|
for (domain = 0; domain < vm_ndomains; domain++) {
|
2018-05-12 01:26:34 +00:00
|
|
|
while ((plb = TAILQ_FIRST(&pmc_dom_hdrs[domain]->pdbh_head)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&pmc_dom_hdrs[domain]->pdbh_head, plb, plb_next);
|
|
|
|
free(plb->plb_base, M_PMC);
|
|
|
|
}
|
|
|
|
free(pmc_dom_hdrs[domain]->pdbh_plbs, M_PMC);
|
2005-06-09 19:45:09 +00:00
|
|
|
}
|
|
|
|
}
|