x86: remove gcov kernel support
Reviewed by: jhb Sponsored by: The FreeBSD Foundation Differential revision: https://reviews.freebsd.org/D29529
This commit is contained in:
parent
76b1b5ce6d
commit
aa3ea612be
@ -81,7 +81,6 @@ as_lapic_eoi:
|
||||
*/
|
||||
.macro ISR_VEC index, vec_name
|
||||
INTR_HANDLER \vec_name
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
cmpl $0,x2apic_mode
|
||||
je 1f
|
||||
movl $(MSR_APIC_ISR0 + \index),%ecx
|
||||
@ -98,7 +97,6 @@ as_lapic_eoi:
|
||||
movl %eax, %edi /* pass the IRQ */
|
||||
call lapic_handle_intr
|
||||
3:
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
.endm
|
||||
|
||||
@ -127,28 +125,22 @@ IDTVEC(spuriousint)
|
||||
* Local APIC periodic timer handler.
|
||||
*/
|
||||
INTR_HANDLER timerint
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp, %rdi
|
||||
call lapic_handle_timer
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Local APIC CMCI handler.
|
||||
*/
|
||||
INTR_HANDLER cmcint
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
call lapic_handle_cmc
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Local APIC error interrupt handler.
|
||||
*/
|
||||
INTR_HANDLER errorint
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
call lapic_handle_error
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
#ifdef XENHVM
|
||||
@ -157,10 +149,8 @@ IDTVEC(spuriousint)
|
||||
* Only used when the hypervisor supports direct vector callbacks.
|
||||
*/
|
||||
INTR_HANDLER xen_intr_upcall
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp, %rdi
|
||||
call xen_intr_handle_upcall
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
#endif
|
||||
|
||||
@ -184,9 +174,7 @@ IDTVEC(spuriousint)
|
||||
*/
|
||||
INTR_HANDLER ipi_intr_bitmap_handler
|
||||
call as_lapic_eoi
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
call ipi_bitmap_handler
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
@ -210,9 +198,7 @@ IDTVEC(spuriousint)
|
||||
*/
|
||||
INTR_HANDLER ipi_swi
|
||||
call as_lapic_eoi
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
call ipi_swi_handler
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
|
@ -44,11 +44,9 @@
|
||||
*/
|
||||
.macro INTR irq_num, vec_name
|
||||
INTR_HANDLER \vec_name
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp, %rsi
|
||||
movl $\irq_num, %edi /* pass the IRQ */
|
||||
call atpic_handle_intr
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
.endm
|
||||
|
||||
|
@ -101,9 +101,6 @@ dtrace_invop_calltrap_addr:
|
||||
* registers are reloaded on return to the usermode.
|
||||
*/
|
||||
|
||||
MCOUNT_LABEL(user)
|
||||
MCOUNT_LABEL(btrap)
|
||||
|
||||
/* Traps that we leave interrupts disabled for. */
|
||||
.macro TRAP_NOEN l, trapno
|
||||
PTI_ENTRY \l,\l\()_pti_k,\l\()_pti_u
|
||||
@ -257,7 +254,6 @@ alltraps_pushregs_no_rax:
|
||||
pushfq
|
||||
andq $~(PSL_D | PSL_AC),(%rsp)
|
||||
popfq
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
#ifdef KDTRACE_HOOKS
|
||||
/*
|
||||
* DTrace Function Boundary Trace (fbt) probes are triggered
|
||||
@ -288,7 +284,6 @@ alltraps_pushregs_no_rax:
|
||||
calltrap:
|
||||
movq %rsp,%rdi
|
||||
call trap_check
|
||||
MEXITCOUNT
|
||||
jmp doreti /* Handle any pending ASTs */
|
||||
|
||||
/*
|
||||
@ -576,7 +571,6 @@ fast_syscall_common:
|
||||
movq %r14,TF_R14(%rsp) /* C preserved */
|
||||
movq %r15,TF_R15(%rsp) /* C preserved */
|
||||
movl $TF_HASSEGS,TF_FLAGS(%rsp)
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq PCPU(CURTHREAD),%rdi
|
||||
movq %rsp,TD_FRAME(%rdi)
|
||||
movl TF_RFLAGS(%rsp),%esi
|
||||
@ -594,7 +588,6 @@ fast_syscall_common:
|
||||
call handle_ibrs_exit
|
||||
callq *mds_handler
|
||||
/* Restore preserved registers. */
|
||||
MEXITCOUNT
|
||||
movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
|
||||
movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
|
||||
movq TF_RDX(%rsp),%rdx /* return value 2 */
|
||||
@ -621,7 +614,6 @@ fast_syscall_common:
|
||||
jmp 1b
|
||||
|
||||
4: /* Requested full context restore, use doreti for that. */
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
@ -692,10 +684,8 @@ IDTVEC(dbg)
|
||||
rdmsr
|
||||
movl %eax,%r14d
|
||||
call handle_ibrs_entry
|
||||
2: FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp,%rdi
|
||||
2: movq %rsp,%rdi
|
||||
call trap
|
||||
MEXITCOUNT
|
||||
testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
|
||||
je 3f
|
||||
movl %r14d,%eax
|
||||
@ -866,10 +856,8 @@ nmi_fromuserspace:
|
||||
3:
|
||||
/* Note: this label is also used by ddb and gdb: */
|
||||
nmi_calltrap:
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp,%rdi
|
||||
call trap
|
||||
MEXITCOUNT
|
||||
#ifdef HWPMC_HOOKS
|
||||
/*
|
||||
* Capture a userspace callchain if needed.
|
||||
@ -1055,10 +1043,8 @@ mchk_fromuserspace:
|
||||
1: call handle_ibrs_entry
|
||||
/* Note: this label is also used by ddb and gdb: */
|
||||
mchk_calltrap:
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp,%rdi
|
||||
call mca_intr
|
||||
MEXITCOUNT
|
||||
testl %ebx,%ebx /* %ebx != 0 => return to userland */
|
||||
jnz doreti_exit
|
||||
/*
|
||||
@ -1089,7 +1075,6 @@ ENTRY(fork_trampoline)
|
||||
movq %rbx,%rsi /* arg1 */
|
||||
movq %rsp,%rdx /* trapframe pointer */
|
||||
call fork_exit
|
||||
MEXITCOUNT
|
||||
jmp doreti /* Handle any ASTs */
|
||||
|
||||
/*
|
||||
@ -1115,8 +1100,6 @@ ENTRY(fork_trampoline)
|
||||
.p2align 4
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
MCOUNT_LABEL(bintr)
|
||||
|
||||
#include <amd64/amd64/apic_vector.S>
|
||||
|
||||
#ifdef DEV_ATPIC
|
||||
@ -1128,9 +1111,6 @@ MCOUNT_LABEL(bintr)
|
||||
#include <amd64/amd64/atpic_vector.S>
|
||||
#endif
|
||||
|
||||
.text
|
||||
MCOUNT_LABEL(eintr)
|
||||
|
||||
/*
|
||||
* void doreti(struct trapframe)
|
||||
*
|
||||
@ -1141,7 +1121,6 @@ MCOUNT_LABEL(eintr)
|
||||
.type doreti,@function
|
||||
.globl doreti
|
||||
doreti:
|
||||
FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
|
||||
/*
|
||||
* Check if ASTs can be handled now.
|
||||
*/
|
||||
@ -1171,7 +1150,6 @@ doreti_ast:
|
||||
* registers. The fault is handled in trap.c.
|
||||
*/
|
||||
doreti_exit:
|
||||
MEXITCOUNT
|
||||
movq PCPU(CURPCB),%r8
|
||||
|
||||
/*
|
||||
@ -1332,7 +1310,6 @@ doreti_iret_fault:
|
||||
movl $T_PROTFLT,TF_TRAPNO(%rsp)
|
||||
movq $0,TF_ERR(%rsp) /* XXX should be the error code */
|
||||
movq $0,TF_ADDR(%rsp)
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
jmp calltrap
|
||||
|
||||
ALIGN_TEXT
|
||||
|
@ -54,7 +54,7 @@
|
||||
*
|
||||
* We are already in long mode, on a 64 bit %cs and running at KERNBASE.
|
||||
*/
|
||||
NON_GPROF_ENTRY(btext)
|
||||
ENTRY(btext)
|
||||
|
||||
/* Tell the bios to warmboot next time */
|
||||
movw $0x1234,0x472
|
||||
@ -79,7 +79,7 @@ NON_GPROF_ENTRY(btext)
|
||||
jmp 0b
|
||||
|
||||
/* la57_trampoline(%rdi pml5) */
|
||||
NON_GPROF_ENTRY(la57_trampoline)
|
||||
ENTRY(la57_trampoline)
|
||||
movq %rsp,%r11
|
||||
movq %rbx,%r10
|
||||
leaq la57_trampoline_end(%rip),%rsp
|
||||
@ -118,11 +118,11 @@ l2: movq %r11,%rsp
|
||||
movq %r10,%rbx
|
||||
retq
|
||||
.p2align 4,0
|
||||
NON_GPROF_ENTRY(la57_trampoline_gdt_desc)
|
||||
ENTRY(la57_trampoline_gdt_desc)
|
||||
.word la57_trampoline_end - la57_trampoline_gdt
|
||||
.long 0 /* filled by pmap_bootstrap_la57 */
|
||||
.p2align 4,0
|
||||
NON_GPROF_ENTRY(la57_trampoline_gdt)
|
||||
ENTRY(la57_trampoline_gdt)
|
||||
.long 0x00000000 /* null desc */
|
||||
.long 0x00000000
|
||||
.long 0x00000000 /* 64bit code */
|
||||
@ -132,7 +132,7 @@ NON_GPROF_ENTRY(la57_trampoline_gdt)
|
||||
.long 0x0000ffff /* universal data */
|
||||
.long 0x00cf9300
|
||||
.dcb.l 16,0
|
||||
NON_GPROF_ENTRY(la57_trampoline_end)
|
||||
ENTRY(la57_trampoline_end)
|
||||
|
||||
.bss
|
||||
ALIGN_DATA /* just to be sure */
|
||||
|
@ -1,323 +0,0 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
||||
*
|
||||
* Copyright (c) 1996 Bruce D. Evans.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#ifdef GUPROF
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/cpu.h>
|
||||
#include <sys/eventhandler.h>
|
||||
#include <sys/gmon.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
#include <machine/clock.h>
|
||||
#include <machine/timerreg.h>
|
||||
|
||||
#define CPUTIME_CLOCK_UNINITIALIZED 0
|
||||
#define CPUTIME_CLOCK_I8254 1
|
||||
#define CPUTIME_CLOCK_TSC 2
|
||||
#define CPUTIME_CLOCK_I8254_SHIFT 7
|
||||
|
||||
int cputime_bias = 1; /* initialize for locality of reference */
|
||||
|
||||
static int cputime_clock = CPUTIME_CLOCK_UNINITIALIZED;
|
||||
static int cputime_prof_active;
|
||||
#endif /* GUPROF */
|
||||
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
#if defined(SMP) && defined(GUPROF)
|
||||
#define MPLOCK " \n\
|
||||
movl $1,%edx \n\
|
||||
9: \n\
|
||||
xorl %eax,%eax \n\
|
||||
lock \n\
|
||||
cmpxchgl %edx,mcount_lock \n\
|
||||
jne 9b \n"
|
||||
#define MPUNLOCK "movl $0,mcount_lock \n"
|
||||
#else /* !(SMP && GUPROF) */
|
||||
#define MPLOCK
|
||||
#define MPUNLOCK
|
||||
#endif /* SMP && GUPROF */
|
||||
|
||||
__asm(" \n\
|
||||
GM_STATE = 0 \n\
|
||||
GMON_PROF_OFF = 3 \n\
|
||||
\n\
|
||||
.text \n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl __mcount \n\
|
||||
.type __mcount,@function \n\
|
||||
__mcount: \n\
|
||||
# \n\
|
||||
# Check that we are profiling. Do it early for speed. \n\
|
||||
# \n\
|
||||
cmpl $GMON_PROF_OFF,_gmonparam+GM_STATE \n\
|
||||
je .mcount_exit \n\
|
||||
# \n\
|
||||
# __mcount is the same as [.]mcount except the caller \n\
|
||||
# hasn't changed the stack except to call here, so the \n\
|
||||
# caller's raddr is above our raddr. \n\
|
||||
# \n\
|
||||
pushq %rax \n\
|
||||
pushq %rdx \n\
|
||||
pushq %rcx \n\
|
||||
pushq %rsi \n\
|
||||
pushq %rdi \n\
|
||||
pushq %r8 \n\
|
||||
pushq %r9 \n\
|
||||
movq 7*8+8(%rsp),%rdi \n\
|
||||
jmp .got_frompc \n\
|
||||
\n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl .mcount \n\
|
||||
.mcount: \n\
|
||||
cmpl $GMON_PROF_OFF,_gmonparam+GM_STATE \n\
|
||||
je .mcount_exit \n\
|
||||
# \n\
|
||||
# The caller's stack frame has already been built, so \n\
|
||||
# %rbp is the caller's frame pointer. The caller's \n\
|
||||
# raddr is in the caller's frame following the caller's \n\
|
||||
# caller's frame pointer. \n\
|
||||
# \n\
|
||||
pushq %rax \n\
|
||||
pushq %rdx \n\
|
||||
pushq %rcx \n\
|
||||
pushq %rsi \n\
|
||||
pushq %rdi \n\
|
||||
pushq %r8 \n\
|
||||
pushq %r9 \n\
|
||||
movq 8(%rbp),%rdi \n\
|
||||
.got_frompc: \n\
|
||||
# \n\
|
||||
# Our raddr is the caller's pc. \n\
|
||||
# \n\
|
||||
movq 7*8(%rsp),%rsi \n\
|
||||
\n\
|
||||
pushfq \n\
|
||||
cli \n"
|
||||
MPLOCK " \n\
|
||||
call mcount \n"
|
||||
MPUNLOCK " \n\
|
||||
popfq \n\
|
||||
popq %r9 \n\
|
||||
popq %r8 \n\
|
||||
popq %rdi \n\
|
||||
popq %rsi \n\
|
||||
popq %rcx \n\
|
||||
popq %rdx \n\
|
||||
popq %rax \n\
|
||||
.mcount_exit: \n\
|
||||
ret $0 \n\
|
||||
");
|
||||
#else /* !__GNUCLIKE_ASM */
|
||||
#error "this file needs to be ported to your compiler"
|
||||
#endif /* __GNUCLIKE_ASM */
|
||||
|
||||
#ifdef GUPROF
|
||||
/*
|
||||
* [.]mexitcount saves the return register(s), loads selfpc and calls
|
||||
* mexitcount(selfpc) to do the work. Someday it should be in a machine
|
||||
* dependent file together with cputime(), __mcount and [.]mcount. cputime()
|
||||
* can't just be put in machdep.c because it has to be compiled without -pg.
|
||||
*/
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
__asm(" \n\
|
||||
.text \n\
|
||||
# \n\
|
||||
# Dummy label to be seen when gprof -u hides [.]mexitcount. \n\
|
||||
# \n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl __mexitcount \n\
|
||||
.type __mexitcount,@function \n\
|
||||
__mexitcount: \n\
|
||||
nop \n\
|
||||
\n\
|
||||
GMON_PROF_HIRES = 4 \n\
|
||||
\n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl .mexitcount \n\
|
||||
.mexitcount: \n\
|
||||
cmpl $GMON_PROF_HIRES,_gmonparam+GM_STATE \n\
|
||||
jne .mexitcount_exit \n\
|
||||
pushq %rax \n\
|
||||
pushq %rdx \n\
|
||||
pushq %rcx \n\
|
||||
pushq %rsi \n\
|
||||
pushq %rdi \n\
|
||||
pushq %r8 \n\
|
||||
pushq %r9 \n\
|
||||
movq 7*8(%rsp),%rdi \n\
|
||||
pushfq \n\
|
||||
cli \n"
|
||||
MPLOCK " \n\
|
||||
call mexitcount \n"
|
||||
MPUNLOCK " \n\
|
||||
popfq \n\
|
||||
popq %r9 \n\
|
||||
popq %r8 \n\
|
||||
popq %rdi \n\
|
||||
popq %rsi \n\
|
||||
popq %rcx \n\
|
||||
popq %rdx \n\
|
||||
popq %rax \n\
|
||||
.mexitcount_exit: \n\
|
||||
ret $0 \n\
|
||||
");
|
||||
#endif /* __GNUCLIKE_ASM */
|
||||
|
||||
/*
|
||||
* Return the time elapsed since the last call. The units are machine-
|
||||
* dependent.
|
||||
*/
|
||||
int
|
||||
cputime()
|
||||
{
|
||||
u_int count;
|
||||
int delta;
|
||||
u_char high, low;
|
||||
static u_int prev_count;
|
||||
|
||||
if (cputime_clock == CPUTIME_CLOCK_TSC) {
|
||||
/*
|
||||
* Scale the TSC a little to make cputime()'s frequency
|
||||
* fit in an int, assuming that the TSC frequency fits
|
||||
* in a u_int. Use a fixed scale since dynamic scaling
|
||||
* would be slower and we can't really use the low bit
|
||||
* of precision.
|
||||
*/
|
||||
count = (u_int)rdtsc() & ~1u;
|
||||
delta = (int)(count - prev_count) >> 1;
|
||||
prev_count = count;
|
||||
return (delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the current value of the 8254 timer counter 0.
|
||||
*/
|
||||
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
|
||||
low = inb(TIMER_CNTR0);
|
||||
high = inb(TIMER_CNTR0);
|
||||
count = ((high << 8) | low) << CPUTIME_CLOCK_I8254_SHIFT;
|
||||
|
||||
/*
|
||||
* The timer counts down from TIMER_CNTR0_MAX to 0 and then resets.
|
||||
* While profiling is enabled, this routine is called at least twice
|
||||
* per timer reset (for mcounting and mexitcounting hardclock()),
|
||||
* so at most one reset has occurred since the last call, and one
|
||||
* has occurred iff the current count is larger than the previous
|
||||
* count. This allows counter underflow to be detected faster
|
||||
* than in microtime().
|
||||
*/
|
||||
delta = prev_count - count;
|
||||
prev_count = count;
|
||||
if ((int) delta <= 0)
|
||||
return (delta + (i8254_max_count << CPUTIME_CLOCK_I8254_SHIFT));
|
||||
return (delta);
|
||||
}
|
||||
|
||||
static int
|
||||
sysctl_machdep_cputime_clock(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int clock;
|
||||
int error;
|
||||
|
||||
clock = cputime_clock;
|
||||
error = sysctl_handle_opaque(oidp, &clock, sizeof clock, req);
|
||||
if (error == 0 && req->newptr != NULL) {
|
||||
if (clock < 0 || clock > CPUTIME_CLOCK_TSC)
|
||||
return (EINVAL);
|
||||
cputime_clock = clock;
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
SYSCTL_PROC(_machdep, OID_AUTO, cputime_clock,
|
||||
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(u_int),
|
||||
sysctl_machdep_cputime_clock, "I",
|
||||
"");
|
||||
|
||||
/*
|
||||
* The start and stop routines need not be here since we turn off profiling
|
||||
* before calling them. They are here for convenience.
|
||||
*/
|
||||
|
||||
void
|
||||
startguprof(gp)
|
||||
struct gmonparam *gp;
|
||||
{
|
||||
uint64_t freq;
|
||||
|
||||
freq = atomic_load_acq_64(&tsc_freq);
|
||||
if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) {
|
||||
if (freq != 0 && mp_ncpus == 1)
|
||||
cputime_clock = CPUTIME_CLOCK_TSC;
|
||||
else
|
||||
cputime_clock = CPUTIME_CLOCK_I8254;
|
||||
}
|
||||
if (cputime_clock == CPUTIME_CLOCK_TSC) {
|
||||
gp->profrate = freq >> 1;
|
||||
cputime_prof_active = 1;
|
||||
} else
|
||||
gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
|
||||
cputime_bias = 0;
|
||||
cputime();
|
||||
}
|
||||
|
||||
void
|
||||
stopguprof(gp)
|
||||
struct gmonparam *gp;
|
||||
{
|
||||
if (cputime_clock == CPUTIME_CLOCK_TSC)
|
||||
cputime_prof_active = 0;
|
||||
}
|
||||
|
||||
/* If the cpu frequency changed while profiling, report a warning. */
|
||||
static void
|
||||
tsc_freq_changed(void *arg, const struct cf_level *level, int status)
|
||||
{
|
||||
|
||||
/*
|
||||
* If there was an error during the transition or
|
||||
* TSC is P-state invariant, don't do anything.
|
||||
*/
|
||||
if (status != 0 || tsc_is_invariant)
|
||||
return;
|
||||
if (cputime_prof_active && cputime_clock == CPUTIME_CLOCK_TSC)
|
||||
printf("warning: cpu freq changed while profiling active\n");
|
||||
}
|
||||
|
||||
EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL,
|
||||
EVENTHANDLER_PRI_ANY);
|
||||
|
||||
#endif /* GUPROF */
|
@ -38,7 +38,7 @@
|
||||
* Signal trampoline, copied to top of user stack
|
||||
*
|
||||
*/
|
||||
NON_GPROF_ENTRY(sigcode)
|
||||
ENTRY(sigcode)
|
||||
call *SIGF_HANDLER(%rsp) /* call signal handler */
|
||||
lea SIGF_UC(%rsp),%rdi /* get ucontext_t */
|
||||
pushq $0 /* junk to fake return addr. */
|
||||
|
@ -1490,7 +1490,6 @@ ENTRY(lgdt)
|
||||
popq %rax
|
||||
pushq $KCSEL
|
||||
pushq %rax
|
||||
MEXITCOUNT
|
||||
lretq
|
||||
END(lgdt)
|
||||
|
||||
|
@ -69,11 +69,11 @@
|
||||
.text
|
||||
.p2align PAGE_SHIFT, 0x90 /* Hypercall_page needs to be PAGE aligned */
|
||||
|
||||
NON_GPROF_ENTRY(hypercall_page)
|
||||
ENTRY(hypercall_page)
|
||||
.skip 0x1000, 0x90 /* Fill with "nop"s */
|
||||
|
||||
/* Legacy PVH entry point, to be removed. */
|
||||
NON_GPROF_ENTRY(xen_start)
|
||||
ENTRY(xen_start)
|
||||
/* Don't trust what the loader gives for rflags. */
|
||||
pushq $PSL_KERNEL
|
||||
popfq
|
||||
@ -97,7 +97,7 @@ NON_GPROF_ENTRY(xen_start)
|
||||
|
||||
/* PVH entry point. */
|
||||
.code32
|
||||
NON_GPROF_ENTRY(xen_start32)
|
||||
ENTRY(xen_start32)
|
||||
|
||||
/* Load flat GDT */
|
||||
movl $VTOP(gdtdesc32), %eax
|
||||
|
@ -7,10 +7,6 @@
|
||||
# $FreeBSD$
|
||||
#
|
||||
|
||||
#
|
||||
# We want LINT to cover profiling as well.
|
||||
profile 2
|
||||
|
||||
#
|
||||
# Enable the kernel DTrace hooks which are required to load the DTrace
|
||||
# kernel modules.
|
||||
|
@ -73,8 +73,6 @@ int0x80_syscall_common:
|
||||
pushfq
|
||||
andq $~(PSL_D | PSL_AC),(%rsp)
|
||||
popfq
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp, %rdi
|
||||
call ia32_syscall
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
@ -55,92 +55,14 @@
|
||||
#define CNAME(csym) csym
|
||||
|
||||
#define ALIGN_DATA .p2align 3 /* 8 byte alignment, zero filled */
|
||||
#ifdef GPROF
|
||||
#define ALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
|
||||
#else
|
||||
#define ALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
|
||||
#endif
|
||||
#define SUPERALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
|
||||
|
||||
#define GEN_ENTRY(name) ALIGN_TEXT; .globl CNAME(name); \
|
||||
.type CNAME(name),@function; CNAME(name):
|
||||
#define NON_GPROF_ENTRY(name) GEN_ENTRY(name)
|
||||
#define NON_GPROF_RET .byte 0xc3 /* opcode for `ret' */
|
||||
|
||||
#define END(name) .size name, . - name
|
||||
|
||||
#ifdef GPROF
|
||||
/*
|
||||
* __mcount is like [.]mcount except that doesn't require its caller to set
|
||||
* up a frame pointer. It must be called before pushing anything onto the
|
||||
* stack. gcc should eventually generate code to call __mcount in most
|
||||
* cases. This would make -pg in combination with -fomit-frame-pointer
|
||||
* useful. gcc has a configuration variable PROFILE_BEFORE_PROLOGUE to
|
||||
* allow profiling before setting up the frame pointer, but this is
|
||||
* inadequate for good handling of special cases, e.g., -fpic works best
|
||||
* with profiling after the prologue.
|
||||
*
|
||||
* [.]mexitcount is a new function to support non-statistical profiling if an
|
||||
* accurate clock is available. For C sources, calls to it are generated
|
||||
* by the FreeBSD extension `-mprofiler-epilogue' to gcc. It is best to
|
||||
* call [.]mexitcount at the end of a function like the MEXITCOUNT macro does,
|
||||
* but gcc currently generates calls to it at the start of the epilogue to
|
||||
* avoid problems with -fpic.
|
||||
*
|
||||
* [.]mcount and __mcount may clobber the call-used registers and %ef.
|
||||
* [.]mexitcount may clobber %ecx and %ef.
|
||||
*
|
||||
* Cross-jumping makes non-statistical profiling timing more complicated.
|
||||
* It is handled in many cases by calling [.]mexitcount before jumping. It
|
||||
* is handled for conditional jumps using CROSSJUMP() and CROSSJUMP_LABEL().
|
||||
* It is handled for some fault-handling jumps by not sharing the exit
|
||||
* routine.
|
||||
*
|
||||
* ALTENTRY() must be before a corresponding ENTRY() so that it can jump to
|
||||
* the main entry point. Note that alt entries are counted twice. They
|
||||
* have to be counted as ordinary entries for gprof to get the call times
|
||||
* right for the ordinary entries.
|
||||
*
|
||||
* High local labels are used in macros to avoid clashes with local labels
|
||||
* in functions.
|
||||
*
|
||||
* Ordinary `ret' is used instead of a macro `RET' because there are a lot
|
||||
* of `ret's. 0xc3 is the opcode for `ret' (`#define ret ... ret' can't
|
||||
* be used because this file is sometimes preprocessed in traditional mode).
|
||||
* `ret' clobbers eflags but this doesn't matter.
|
||||
*/
|
||||
#define ALTENTRY(name) GEN_ENTRY(name) ; MCOUNT ; MEXITCOUNT ; jmp 9f
|
||||
#define CROSSJUMP(jtrue, label, jfalse) \
|
||||
jfalse 8f; MEXITCOUNT; jmp __CONCAT(to,label); 8:
|
||||
#define CROSSJUMPTARGET(label) \
|
||||
ALIGN_TEXT; __CONCAT(to,label): ; MCOUNT; jmp label
|
||||
#define ENTRY(name) GEN_ENTRY(name) ; 9: ; MCOUNT
|
||||
#define FAKE_MCOUNT(caller) pushq caller ; call __mcount ; popq %rcx
|
||||
#define MCOUNT call __mcount
|
||||
#define MCOUNT_LABEL(name) GEN_ENTRY(name) ; nop ; ALIGN_TEXT
|
||||
#ifdef GUPROF
|
||||
#define MEXITCOUNT call .mexitcount
|
||||
#define ret MEXITCOUNT ; NON_GPROF_RET
|
||||
#else
|
||||
#define MEXITCOUNT
|
||||
#endif
|
||||
|
||||
#else /* !GPROF */
|
||||
/*
|
||||
* ALTENTRY() has to align because it is before a corresponding ENTRY().
|
||||
* ENTRY() has to align to because there may be no ALTENTRY() before it.
|
||||
* If there is a previous ALTENTRY() then the alignment code for ENTRY()
|
||||
* is empty.
|
||||
*/
|
||||
#define ALTENTRY(name) GEN_ENTRY(name)
|
||||
#define CROSSJUMP(jtrue, label, jfalse) jtrue label
|
||||
#define CROSSJUMPTARGET(label)
|
||||
#define ENTRY(name) GEN_ENTRY(name)
|
||||
#define FAKE_MCOUNT(caller)
|
||||
#define MCOUNT
|
||||
#define MCOUNT_LABEL(name)
|
||||
#define MEXITCOUNT
|
||||
#endif /* GPROF */
|
||||
#define ALTENTRY(name) GEN_ENTRY(name)
|
||||
#define END(name) .size name, . - name
|
||||
|
||||
/*
|
||||
* Convenience for adding frame pointers to hand-coded ASM. Useful for
|
||||
|
@ -35,74 +35,9 @@
|
||||
#ifndef _MACHINE_PROFILE_H_
|
||||
#define _MACHINE_PROFILE_H_
|
||||
|
||||
#ifndef _SYS_CDEFS_H_
|
||||
#error this file needs sys/cdefs.h as a prerequisite
|
||||
#endif
|
||||
#ifndef _KERNEL
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
/*
|
||||
* Config generates something to tell the compiler to align functions on 16
|
||||
* byte boundaries. A strict alignment is good for keeping the tables small.
|
||||
*/
|
||||
#define FUNCTION_ALIGNMENT 16
|
||||
|
||||
/*
|
||||
* The kernel uses assembler stubs instead of unportable inlines.
|
||||
* This is mainly to save a little time when profiling is not enabled,
|
||||
* which is the usual case for the kernel.
|
||||
*/
|
||||
#define _MCOUNT_DECL void mcount
|
||||
#define MCOUNT
|
||||
|
||||
#ifdef GUPROF
|
||||
#define MCOUNT_DECL(s)
|
||||
#define MCOUNT_ENTER(s)
|
||||
#define MCOUNT_EXIT(s)
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
#define MCOUNT_OVERHEAD(label) \
|
||||
__asm __volatile("pushq %0; call __mcount; popq %%rcx" \
|
||||
: \
|
||||
: "i" (label) \
|
||||
: "cx", "r10", "r11", "memory")
|
||||
#define MEXITCOUNT_OVERHEAD() \
|
||||
__asm __volatile("call .mexitcount; 1:" \
|
||||
: : \
|
||||
: "r10", "r11", "memory")
|
||||
#define MEXITCOUNT_OVERHEAD_GETLABEL(labelp) \
|
||||
__asm __volatile("movq $1b,%0" : "=rm" (labelp))
|
||||
#else
|
||||
#error this file needs to be ported to your compiler
|
||||
#endif /* !__GNUCLIKE_ASM */
|
||||
#else /* !GUPROF */
|
||||
#define MCOUNT_DECL(s) register_t s;
|
||||
#ifdef SMP
|
||||
extern int mcount_lock;
|
||||
#define MCOUNT_ENTER(s) { s = intr_disable(); \
|
||||
while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \
|
||||
/* nothing */ ; }
|
||||
#define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \
|
||||
intr_restore(s); }
|
||||
#else
|
||||
#define MCOUNT_ENTER(s) { s = intr_disable(); }
|
||||
#define MCOUNT_EXIT(s) (intr_restore(s))
|
||||
#endif
|
||||
#endif /* GUPROF */
|
||||
|
||||
void bintr(void);
|
||||
void btrap(void);
|
||||
void eintr(void);
|
||||
void user(void);
|
||||
|
||||
#define MCOUNT_FROMPC_USER(pc) \
|
||||
((pc < (uintfptr_t)VM_MAXUSER_ADDRESS) ? (uintfptr_t)user : pc)
|
||||
|
||||
#define MCOUNT_FROMPC_INTR(pc) \
|
||||
((pc >= (uintfptr_t)btrap && pc < (uintfptr_t)eintr) ? \
|
||||
((pc >= (uintfptr_t)bintr) ? (uintfptr_t)bintr : \
|
||||
(uintfptr_t)btrap) : ~0UL)
|
||||
|
||||
#else /* !_KERNEL */
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
#define FUNCTION_ALIGNMENT 4
|
||||
|
||||
@ -172,28 +107,18 @@ mcount() \
|
||||
|
||||
typedef u_long uintfptr_t;
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
/*
|
||||
* An unsigned integral type that can hold non-negative difference between
|
||||
* function pointers.
|
||||
*/
|
||||
typedef u_long fptrdiff_t;
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
void mcount(uintfptr_t frompc, uintfptr_t selfpc);
|
||||
|
||||
#else /* !_KERNEL */
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
void mcount(void) __asm(".mcount");
|
||||
#endif
|
||||
__END_DECLS
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* !_KERNEL */
|
||||
|
||||
#endif /* !_MACHINE_PROFILE_H_ */
|
||||
|
@ -17,7 +17,7 @@ linux_platform:
|
||||
* To avoid excess stack frame the signal trampoline code emulates
|
||||
* the 'call' instruction.
|
||||
*/
|
||||
NON_GPROF_ENTRY(linux_rt_sigcode)
|
||||
ENTRY(linux_rt_sigcode)
|
||||
movq %rsp, %rbx /* preserve sigframe */
|
||||
call .getip
|
||||
.getip:
|
||||
@ -32,28 +32,28 @@ NON_GPROF_ENTRY(linux_rt_sigcode)
|
||||
.endrtsigcode:
|
||||
0: jmp 0b
|
||||
|
||||
NON_GPROF_ENTRY(__vdso_clock_gettime)
|
||||
ENTRY(__vdso_clock_gettime)
|
||||
movq $LINUX_SYS_linux_clock_gettime,%rax
|
||||
syscall
|
||||
ret
|
||||
.weak clock_gettime
|
||||
.set clock_gettime, __vdso_clock_gettime
|
||||
|
||||
NON_GPROF_ENTRY(__vdso_time)
|
||||
ENTRY(__vdso_time)
|
||||
movq $LINUX_SYS_linux_time,%rax
|
||||
syscall
|
||||
ret
|
||||
.weak time
|
||||
.set time, __vdso_time
|
||||
|
||||
NON_GPROF_ENTRY(__vdso_gettimeofday)
|
||||
ENTRY(__vdso_gettimeofday)
|
||||
movq $LINUX_SYS_gettimeofday,%rax
|
||||
syscall
|
||||
ret
|
||||
.weak gettimeofday
|
||||
.set gettimeofday, __vdso_gettimeofday
|
||||
|
||||
NON_GPROF_ENTRY(__vdso_getcpu)
|
||||
ENTRY(__vdso_getcpu)
|
||||
movq $-38,%rax /* not implemented */
|
||||
ret
|
||||
.weak getcpu
|
||||
|
@ -18,7 +18,7 @@ linux_platform:
|
||||
* To avoid excess stack frame the signal trampoline code emulates
|
||||
* the 'call' instruction.
|
||||
*/
|
||||
NON_GPROF_ENTRY(linux32_sigcode)
|
||||
ENTRY(linux32_sigcode)
|
||||
movl %esp, %ebx /* preserve sigframe */
|
||||
call .getip0
|
||||
.getip0:
|
||||
@ -33,7 +33,7 @@ NON_GPROF_ENTRY(linux32_sigcode)
|
||||
.endsigcode:
|
||||
0: jmp 0b
|
||||
|
||||
NON_GPROF_ENTRY(linux32_rt_sigcode)
|
||||
ENTRY(linux32_rt_sigcode)
|
||||
leal LINUX_RT_SIGF_UC(%esp),%ebx /* linux ucp */
|
||||
leal LINUX_RT_SIGF_SC(%ebx),%ecx /* linux sigcontext */
|
||||
movl %esp, %edi
|
||||
@ -49,7 +49,7 @@ NON_GPROF_ENTRY(linux32_rt_sigcode)
|
||||
.endrtsigcode:
|
||||
0: jmp 0b
|
||||
|
||||
NON_GPROF_ENTRY(linux32_vsyscall)
|
||||
ENTRY(linux32_vsyscall)
|
||||
.startvsyscall:
|
||||
int $0x80
|
||||
ret
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "assym.inc"
|
||||
|
||||
#define INTR_POP \
|
||||
MEXITCOUNT; \
|
||||
movq TF_RDI(%rsp),%rdi; \
|
||||
movq TF_RSI(%rsp),%rsi; \
|
||||
movq TF_RDX(%rsp),%rdx; \
|
||||
@ -72,7 +71,7 @@
|
||||
movq %rsp, %rsi
|
||||
movq TF_RAX(%rsp), %rdx
|
||||
call dtrace_invop
|
||||
ALTENTRY(dtrace_invop_callsite)
|
||||
ENTRY(dtrace_invop_callsite)
|
||||
cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
|
||||
je bp_push
|
||||
cmpl $DTRACE_INVOP_LEAVE, %eax
|
||||
|
@ -35,7 +35,7 @@ INCLUDES+= -I$S/contrib/libfdt -I$S/contrib/device-tree/include
|
||||
LINUX_DTS_VERSION!= awk '/freebsd,dts-version/ { sub(/;$$/,"", $$NF); print $$NF }' $S/dts/freebsd-compatible.dts
|
||||
CFLAGS += -DLINUX_DTS_VERSION=\"${LINUX_DTS_VERSION}\"
|
||||
|
||||
.if !defined(DEBUG) && !defined(PROFLEVEL)
|
||||
.if !defined(DEBUG)
|
||||
STRIP_FLAGS = -S
|
||||
.endif
|
||||
|
||||
|
@ -855,43 +855,43 @@ dev/aic7xxx/aic7xxx_pci.c optional ahc pci
|
||||
dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print
|
||||
dev/al_eth/al_eth.c optional al_eth fdt \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
dev/al_eth/al_init_eth_lm.c optional al_eth fdt \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
dev/al_eth/al_init_eth_kr.c optional al_eth fdt \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_iofic.c optional al_iofic \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_udma_config.c optional al_udma \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_hal_udma_main.c optional al_udma \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/al_serdes.c optional al_serdes \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
dev/alc/if_alc.c optional alc pci
|
||||
dev/ale/if_ale.c optional ale pci
|
||||
dev/alpm/alpm.c optional alpm pci
|
||||
@ -4069,7 +4069,6 @@ libkern/inet_ntop.c standard
|
||||
libkern/inet_pton.c standard
|
||||
libkern/jenkins_hash.c standard
|
||||
libkern/murmur3_32.c standard
|
||||
libkern/mcount.c optional profiling-routine
|
||||
libkern/memcchr.c standard
|
||||
libkern/memchr.c standard
|
||||
libkern/memmem.c optional gdb
|
||||
|
@ -121,7 +121,6 @@ amd64/amd64/minidump_machdep.c standard
|
||||
amd64/amd64/mp_machdep.c optional smp
|
||||
amd64/amd64/mpboot.S optional smp
|
||||
amd64/amd64/pmap.c standard
|
||||
amd64/amd64/prof_machdep.c optional profiling-routine
|
||||
amd64/amd64/ptrace_machdep.c standard
|
||||
amd64/amd64/sigtramp.S standard
|
||||
amd64/amd64/support.S standard
|
||||
|
@ -152,4 +152,4 @@ arm/annapurna/alpine/alpine_pci.c optional al_pci fdt
|
||||
arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt
|
||||
arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
|
@ -109,32 +109,32 @@ dev/iommu/iommu_gas.c optional iommu
|
||||
crypto/armv8/armv8_crypto.c optional armv8crypto
|
||||
armv8_crypto_wrap.o optional armv8crypto \
|
||||
dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} -march=armv8-a+crypto ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "armv8_crypto_wrap.o"
|
||||
aesv8-armx.o optional armv8crypto \
|
||||
dependency "$S/crypto/openssl/aarch64/aesv8-armx.S" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ -I$S/crypto/openssl/crypto ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ -I$S/crypto/openssl/crypto ${WERROR} ${NO_WCAST_QUAL} -march=armv8-a+crypto ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "aesv8-armx.o"
|
||||
ghashv8-armx.o optional armv8crypto \
|
||||
dependency "$S/crypto/openssl/aarch64/ghashv8-armx.S" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ -I$S/crypto/openssl/crypto ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ -I$S/crypto/openssl/crypto ${WERROR} ${NO_WCAST_QUAL} -march=armv8-a+crypto ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "ghashv8-armx.o"
|
||||
|
||||
crypto/des/des_enc.c optional netsmb
|
||||
crypto/openssl/ossl_aarch64.c optional ossl
|
||||
crypto/openssl/aarch64/chacha-armv8.S optional ossl \
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}"
|
||||
crypto/openssl/aarch64/poly1305-armv8.S optional ossl \
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}"
|
||||
crypto/openssl/aarch64/sha1-armv8.S optional ossl \
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}"
|
||||
crypto/openssl/aarch64/sha256-armv8.S optional ossl \
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}"
|
||||
crypto/openssl/aarch64/sha512-armv8.S optional ossl \
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}"
|
||||
|
||||
dev/acpica/acpi_bus_if.m optional acpi
|
||||
dev/acpica/acpi_if.m optional acpi
|
||||
@ -355,7 +355,7 @@ arm/annapurna/alpine/alpine_pci.c optional al_pci fdt
|
||||
arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt
|
||||
arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \
|
||||
no-depend \
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"
|
||||
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
|
||||
|
||||
# Broadcom
|
||||
arm64/broadcom/brcmmdio/mdio_mux_iproc.c optional soc_brcm_ns2 fdt
|
||||
|
@ -190,7 +190,6 @@ i386/i386/perfmon.c optional perfmon
|
||||
i386/i386/pmap_base.c standard
|
||||
i386/i386/pmap_nopae.c standard
|
||||
i386/i386/pmap_pae.c standard
|
||||
i386/i386/prof_machdep.c optional profiling-routine
|
||||
i386/i386/ptrace_machdep.c standard
|
||||
i386/i386/sigtramp.s standard
|
||||
i386/i386/support.s standard
|
||||
|
@ -23,27 +23,27 @@ cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with
|
||||
crypto/aesni/aesni.c optional aesni
|
||||
aesni_ghash.o optional aesni \
|
||||
dependency "$S/crypto/aesni/aesni_ghash.c" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "aesni_ghash.o"
|
||||
aesni_ccm.o optional aesni \
|
||||
dependency "$S/crypto/aesni/aesni_ccm.c" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "aesni_ccm.o"
|
||||
aesni_wrap.o optional aesni \
|
||||
dependency "$S/crypto/aesni/aesni_wrap.c" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} -mmmx -msse -msse4 -maes ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "aesni_wrap.o"
|
||||
intel_sha1.o optional aesni \
|
||||
dependency "$S/crypto/aesni/intel_sha1.c" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} -mmmx -msse -msse4 -msha ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "intel_sha1.o"
|
||||
intel_sha256.o optional aesni \
|
||||
dependency "$S/crypto/aesni/intel_sha256.c" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \
|
||||
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} -mmmx -msse -msse4 -msha ${.IMPSRC}" \
|
||||
no-implicit-rule \
|
||||
clean "intel_sha256.o"
|
||||
crypto/openssl/ossl_x86.c optional ossl
|
||||
|
@ -463,7 +463,4 @@ embedfs_${MFS_IMAGE:T:R}.o: ${MFS_IMAGE} $S/dev/md/embedfs.S
|
||||
.endif
|
||||
.endif
|
||||
|
||||
# XXX strictly, everything depends on Makefile because changes to ${PROF}
|
||||
# only appear there, but we don't handle that.
|
||||
|
||||
.include "kern.mk"
|
||||
|
@ -92,21 +92,6 @@ CFLAGS+= -fno-common
|
||||
# XXX LOCORE means "don't declare C stuff" not "for locore.s".
|
||||
ASM_CFLAGS= -x assembler-with-cpp -DLOCORE ${CFLAGS} ${ASM_CFLAGS.${.IMPSRC:T}}
|
||||
|
||||
.if defined(PROFLEVEL) && ${PROFLEVEL} >= 1
|
||||
CFLAGS+= -DGPROF
|
||||
CFLAGS.gcc+= -falign-functions=16
|
||||
.if ${PROFLEVEL} >= 2
|
||||
CFLAGS+= -DGPROF4 -DGUPROF
|
||||
PROF= -pg
|
||||
.if ${COMPILER_TYPE} == "gcc"
|
||||
PROF+= -mprofiler-epilogue
|
||||
.endif
|
||||
.else
|
||||
PROF= -pg
|
||||
.endif
|
||||
.endif
|
||||
DEFINED_PROF= ${PROF}
|
||||
|
||||
KCSAN_ENABLED!= grep KCSAN opt_global.h || true ; echo
|
||||
.if !empty(KCSAN_ENABLED)
|
||||
SAN_CFLAGS+= -fsanitize=thread
|
||||
@ -138,8 +123,8 @@ GCOV_CFLAGS+= -fprofile-arcs -ftest-coverage
|
||||
|
||||
CFLAGS+= ${GCOV_CFLAGS}
|
||||
|
||||
# Put configuration-specific C flags last (except for ${PROF}) so that they
|
||||
# can override the others.
|
||||
# Put configuration-specific C flags last so that they can override
|
||||
# the others.
|
||||
CFLAGS+= ${CONF_CFLAGS}
|
||||
|
||||
.if defined(LINKER_FEATURES) && ${LINKER_FEATURES:Mbuild-id}
|
||||
@ -176,13 +161,12 @@ CFLAGS+= -fPIE
|
||||
.endif
|
||||
.endif
|
||||
|
||||
NORMAL_C= ${CC} -c ${CFLAGS} ${WERROR} ${PROF} ${.IMPSRC}
|
||||
NORMAL_C= ${CC} -c ${CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
NORMAL_S= ${CC:N${CCACHE_BIN}} -c ${ASM_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
PROFILE_C= ${CC} -c ${CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
NORMAL_C_NOWERROR= ${CC} -c ${CFLAGS} ${PROF} ${.IMPSRC}
|
||||
NORMAL_C_NOWERROR= ${CC} -c ${CFLAGS} ${.IMPSRC}
|
||||
|
||||
NORMAL_M= ${AWK} -f $S/tools/makeobjops.awk ${.IMPSRC} -c ; \
|
||||
${CC} -c ${CFLAGS} ${WERROR} ${PROF} ${.PREFIX}.c
|
||||
${CC} -c ${CFLAGS} ${WERROR} ${.PREFIX}.c
|
||||
|
||||
NORMAL_FW= uudecode -o ${.TARGET} ${.ALLSRC}
|
||||
NORMAL_FWO= ${CC:N${CCACHE_BIN}} -c ${ASM_CFLAGS} ${WERROR} -o ${.TARGET} \
|
||||
@ -192,9 +176,7 @@ NORMAL_FWO= ${CC:N${CCACHE_BIN}} -c ${ASM_CFLAGS} ${WERROR} -o ${.TARGET} \
|
||||
# for ZSTD in the kernel (include zstd/lib/freebsd before other CFLAGS)
|
||||
ZSTD_C= ${CC} -c -DZSTD_HEAPMODE=1 -I$S/contrib/zstd/lib/freebsd ${CFLAGS} \
|
||||
-I$S/contrib/zstd/lib -I$S/contrib/zstd/lib/common ${WERROR} \
|
||||
-Wno-missing-prototypes ${PROF} -U__BMI__ \
|
||||
-DZSTD_NO_INTRINSICS \
|
||||
${.IMPSRC}
|
||||
-Wno-missing-prototypes -U__BMI__ -DZSTD_NO_INTRINSICS ${.IMPSRC}
|
||||
# https://github.com/facebook/zstd/commit/812e8f2a [zstd 1.4.1]
|
||||
# "Note that [GCC] autovectorization still does not do a good job on the
|
||||
# optimized version, so it's turned off via attribute and flag. I found
|
||||
@ -237,7 +219,7 @@ CDDL_CFLAGS= \
|
||||
-include ${ZINCDIR}/os/freebsd/spl/sys/ccompile.h \
|
||||
-I$S/cddl/contrib/opensolaris/uts/common \
|
||||
-I$S -I$S/cddl/compat/opensolaris
|
||||
CDDL_C= ${CC} -c ${CDDL_CFLAGS} ${WERROR} ${PROF} ${.IMPSRC}
|
||||
CDDL_C= ${CC} -c ${CDDL_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
|
||||
# Special flags for managing the compat compiles for ZFS
|
||||
ZFS_CFLAGS+= ${CDDL_CFLAGS} -DBUILDING_ZFS -DHAVE_UIO_ZEROCOPY \
|
||||
@ -258,8 +240,8 @@ ZFS_CFLAGS+= -DBITS_PER_LONG=64
|
||||
|
||||
|
||||
ZFS_ASM_CFLAGS= -x assembler-with-cpp -DLOCORE ${ZFS_CFLAGS}
|
||||
ZFS_C= ${CC} -c ${ZFS_CFLAGS} ${WERROR} ${PROF} ${.IMPSRC}
|
||||
ZFS_RPC_C= ${CC} -c ${ZFS_CFLAGS} -DHAVE_RPC_TYPES ${WERROR} ${PROF} ${.IMPSRC}
|
||||
ZFS_C= ${CC} -c ${ZFS_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
ZFS_RPC_C= ${CC} -c ${ZFS_CFLAGS} -DHAVE_RPC_TYPES ${WERROR} ${.IMPSRC}
|
||||
ZFS_S= ${CC} -c ${ZFS_ASM_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
|
||||
|
||||
@ -272,7 +254,7 @@ DTRACE_CFLAGS+= -I$S/cddl/contrib/opensolaris/uts/intel -I$S/cddl/dev/dtrace/x86
|
||||
DTRACE_CFLAGS+= -I$S/cddl/contrib/opensolaris/common/util -I$S -DDIS_MEM -DSMP -I$S/cddl/compat/opensolaris
|
||||
DTRACE_CFLAGS+= -I$S/cddl/contrib/opensolaris/uts/common
|
||||
DTRACE_ASM_CFLAGS= -x assembler-with-cpp -DLOCORE ${DTRACE_CFLAGS}
|
||||
DTRACE_C= ${CC} -c ${DTRACE_CFLAGS} ${WERROR} ${PROF} ${.IMPSRC}
|
||||
DTRACE_C= ${CC} -c ${DTRACE_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
DTRACE_S= ${CC} -c ${DTRACE_ASM_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
|
||||
# Special flags for managing the compat compiles for DTrace/FBT
|
||||
@ -280,7 +262,7 @@ FBT_CFLAGS= -DBUILDING_DTRACE -nostdinc -I$S/cddl/dev/fbt/${MACHINE_CPUARCH} -I$
|
||||
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
|
||||
FBT_CFLAGS+= -I$S/cddl/dev/fbt/x86
|
||||
.endif
|
||||
FBT_C= ${CC} -c ${FBT_CFLAGS} ${WERROR} ${PROF} ${.IMPSRC}
|
||||
FBT_C= ${CC} -c ${FBT_CFLAGS} ${WERROR} ${.IMPSRC}
|
||||
|
||||
.if ${MK_CTF} != "no"
|
||||
NORMAL_CTFCONVERT= ${CTFCONVERT} ${CTFFLAGS} ${.TARGET}
|
||||
@ -300,7 +282,7 @@ OFEDINCLUDES= -I$S/ofed/include -I$S/ofed/include/uapi ${LINUXKPI_INCLUDES}
|
||||
OFEDNOERR= -Wno-cast-qual -Wno-pointer-arith
|
||||
OFEDCFLAGS= ${CFLAGS:N-I*} -DCONFIG_INFINIBAND_USER_MEM \
|
||||
${OFEDINCLUDES} ${CFLAGS:M-I*} ${OFEDNOERR}
|
||||
OFED_C_NOIMP= ${CC} -c -o ${.TARGET} ${OFEDCFLAGS} ${WERROR} ${PROF}
|
||||
OFED_C_NOIMP= ${CC} -c -o ${.TARGET} ${OFEDCFLAGS} ${WERROR}
|
||||
OFED_C= ${OFED_C_NOIMP} ${.IMPSRC}
|
||||
|
||||
# mlxfw C flags.
|
||||
|
@ -8,7 +8,6 @@ COUNT_XINVLTLB_HITS opt_smp.h
|
||||
COUNT_IPIS opt_smp.h
|
||||
DISABLE_PG_G opt_pmap.h
|
||||
DISABLE_PSE opt_pmap.h
|
||||
I586_PMC_GUPROF opt_i586_guprof.h
|
||||
MAXMEM
|
||||
MPTABLE_FORCE_HTT
|
||||
MP_WATCHDOG
|
||||
|
@ -39,8 +39,6 @@
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
INTR_HANDLER vmbus_isr
|
||||
FAKE_MCOUNT(TF_RIP(%rsp))
|
||||
movq %rsp, %rdi
|
||||
call vmbus_handle_intr
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
@ -44,9 +44,7 @@ IDTVEC(vmbus_isr)
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
pushl %esp
|
||||
call vmbus_handle_intr
|
||||
add $4, %esp
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
@ -7,10 +7,6 @@
|
||||
# $FreeBSD$
|
||||
#
|
||||
|
||||
#
|
||||
# We want LINT to cover profiling as well.
|
||||
profile 2
|
||||
|
||||
#
|
||||
# Enable the kernel DTrace hooks which are required to load the DTrace
|
||||
# kernel modules.
|
||||
@ -907,7 +903,6 @@ options PV_STATS
|
||||
|
||||
options FB_INSTALL_CDEV # install a CDEV entry in /dev
|
||||
|
||||
options I586_PMC_GUPROF=0x70000
|
||||
options KBDIO_DEBUG=2
|
||||
options KBD_MAXRETRY=4
|
||||
options KBD_MAXWAIT=6
|
||||
|
@ -79,7 +79,6 @@ X\()\vec_name:
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
cmpl $0,x2apic_mode
|
||||
je 2f
|
||||
movl $(MSR_APIC_ISR0 + \index),%ecx
|
||||
@ -98,7 +97,6 @@ X\()\vec_name:
|
||||
call *%eax
|
||||
addl $8, %esp /* discard parameter */
|
||||
4:
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
.endm
|
||||
|
||||
@ -136,12 +134,10 @@ IDTVEC(timerint)
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
pushl %esp
|
||||
movl $lapic_handle_timer, %eax
|
||||
call *%eax
|
||||
add $4, %esp
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
@ -155,10 +151,8 @@ IDTVEC(cmcint)
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
movl $lapic_handle_cmc, %eax
|
||||
call *%eax
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
@ -172,10 +166,8 @@ IDTVEC(errorint)
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
movl $lapic_handle_error, %eax
|
||||
call *%eax
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
#ifdef XENHVM
|
||||
@ -190,12 +182,10 @@ IDTVEC(xen_intr_upcall)
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
pushl %esp
|
||||
movl $xen_intr_handle_upcall, %eax
|
||||
call *%eax
|
||||
add $4, %esp
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
#endif
|
||||
|
||||
@ -272,10 +262,8 @@ IDTVEC(ipi_intr_bitmap_handler)
|
||||
cld
|
||||
KENTER
|
||||
call as_lapic_eoi
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
movl $ipi_bitmap_handler, %eax
|
||||
call *%eax
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
@ -319,10 +307,8 @@ IDTVEC(ipi_swi)
|
||||
cld
|
||||
KENTER
|
||||
call as_lapic_eoi
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
movl $ipi_swi_handler, %eax
|
||||
call *%eax
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
|
@ -55,14 +55,12 @@ X\()\vec_name:
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
pushl %esp
|
||||
pushl $\irq_num /* pass the IRQ */
|
||||
movl $atpic_handle_intr, %eax
|
||||
call *%eax
|
||||
addl $8, %esp /* discard the parameters */
|
||||
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
.endm
|
||||
|
||||
|
@ -96,9 +96,6 @@ tramp_idleptd: .long 0
|
||||
* must be PIC.
|
||||
*/
|
||||
|
||||
MCOUNT_LABEL(user)
|
||||
MCOUNT_LABEL(btrap)
|
||||
|
||||
#define TRAP(a) pushl $(a) ; jmp alltraps
|
||||
|
||||
IDTVEC(div)
|
||||
@ -172,7 +169,6 @@ alltraps_with_regs_pushed:
|
||||
SET_KERNEL_SREGS
|
||||
cld
|
||||
KENTER
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
calltrap:
|
||||
pushl %esp
|
||||
movl $trap,%eax
|
||||
@ -182,7 +178,6 @@ calltrap:
|
||||
/*
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
.globl irettraps
|
||||
@ -227,8 +222,7 @@ irettraps:
|
||||
movl $(2 * TF_SZ - TF_FS), %ecx
|
||||
jmp 6f
|
||||
/* kernel mode, normal */
|
||||
5: FAKE_MCOUNT(TF_EIP(%esp))
|
||||
jmp calltrap
|
||||
5: jmp calltrap
|
||||
6: cmpl $PMAP_TRM_MIN_ADDRESS, %esp /* trampoline stack ? */
|
||||
jb 5b /* if not, no need to change stacks */
|
||||
movl (tramp_idleptd - 1b)(%ebx), %eax
|
||||
@ -239,7 +233,6 @@ irettraps:
|
||||
movl %esp, %esi
|
||||
rep; movsb
|
||||
movl %edx, %esp
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
jmp calltrap
|
||||
|
||||
/*
|
||||
@ -307,7 +300,6 @@ IDTVEC(dbg)
|
||||
1: popl %eax
|
||||
movl (tramp_idleptd - 1b)(%eax), %eax
|
||||
movl %eax, %cr3
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
testl $PSL_VM, TF_EFLAGS(%esp)
|
||||
jnz dbg_user
|
||||
testb $SEL_RPL_MASK,TF_CS(%esp)
|
||||
@ -321,7 +313,6 @@ dbg_user:
|
||||
call *%eax
|
||||
add $4, %esp
|
||||
movl $T_RESERVED, TF_TRAPNO(%esp)
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
IDTVEC(mchk)
|
||||
@ -352,7 +343,6 @@ nmi_mchk_common:
|
||||
1: popl %eax
|
||||
movl (tramp_idleptd - 1b)(%eax), %eax
|
||||
movl %eax, %cr3
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
jmp calltrap
|
||||
|
||||
/*
|
||||
@ -375,12 +365,10 @@ IDTVEC(int0x80_syscall)
|
||||
movl $handle_ibrs_entry,%eax
|
||||
call *%eax
|
||||
sti
|
||||
FAKE_MCOUNT(TF_EIP(%esp))
|
||||
pushl %esp
|
||||
movl $syscall, %eax
|
||||
call *%eax
|
||||
add $4, %esp
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
ENTRY(fork_trampoline)
|
||||
@ -395,25 +383,13 @@ ENTRY(fork_trampoline)
|
||||
/*
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
|
||||
/*
|
||||
* To efficiently implement classification of trap and interrupt handlers
|
||||
* for profiling, there must be only trap handlers between the labels btrap
|
||||
* and bintr, and only interrupt handlers between the labels bintr and
|
||||
* eintr. This is implemented (partly) by including files that contain
|
||||
* some of the handlers. Before including the files, set up a normal asm
|
||||
* environment so that the included files doen't need to know that they are
|
||||
* included.
|
||||
*/
|
||||
|
||||
.data
|
||||
.p2align 4
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
MCOUNT_LABEL(bintr)
|
||||
|
||||
#ifdef DEV_ATPIC
|
||||
#include <i386/i386/atpic_vector.s>
|
||||
@ -437,7 +413,6 @@ MCOUNT_LABEL(bintr)
|
||||
#include <i386/i386/vm86bios.s>
|
||||
|
||||
.text
|
||||
MCOUNT_LABEL(eintr)
|
||||
|
||||
#include <i386/i386/copyout_fast.s>
|
||||
|
||||
@ -451,7 +426,6 @@ MCOUNT_LABEL(eintr)
|
||||
.type doreti,@function
|
||||
.globl doreti
|
||||
doreti:
|
||||
FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
|
||||
doreti_next:
|
||||
/*
|
||||
* Check if ASTs can be handled now. ASTs cannot be safely
|
||||
@ -504,8 +478,6 @@ doreti_ast:
|
||||
* registers. The fault is handled in trap.c.
|
||||
*/
|
||||
doreti_exit:
|
||||
MEXITCOUNT
|
||||
|
||||
cmpl $T_NMI, TF_TRAPNO(%esp)
|
||||
je doreti_iret_nmi
|
||||
cmpl $T_MCHK, TF_TRAPNO(%esp)
|
||||
|
@ -79,7 +79,7 @@ bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
* This is where the bootblocks start us, set the ball rolling...
|
||||
*
|
||||
*/
|
||||
NON_GPROF_ENTRY(btext)
|
||||
ENTRY(btext)
|
||||
|
||||
/* Tell the bios to warmboot next time */
|
||||
movw $0x1234,0x472
|
||||
@ -451,6 +451,6 @@ END(identify_cpu)
|
||||
.text
|
||||
.p2align PAGE_SHIFT, 0x90 /* Hypercall_page needs to be PAGE aligned */
|
||||
|
||||
NON_GPROF_ENTRY(hypercall_page)
|
||||
ENTRY(hypercall_page)
|
||||
.skip 0x1000, 0x90 /* Fill with "nop"s */
|
||||
#endif
|
||||
|
@ -68,7 +68,7 @@
|
||||
*/
|
||||
.p2align 4
|
||||
|
||||
NON_GPROF_ENTRY(MPentry)
|
||||
ENTRY(MPentry)
|
||||
CHECKPOINT(0x36, 3)
|
||||
/*
|
||||
* Enable features on this processor. We don't support SMP on
|
||||
@ -142,7 +142,7 @@ mp_begin: /* now running relocated at KERNBASE */
|
||||
|
||||
BOOTMP1:
|
||||
|
||||
NON_GPROF_ENTRY(bootMP)
|
||||
ENTRY(bootMP)
|
||||
.code16
|
||||
cli
|
||||
CHECKPOINT(0x34, 1)
|
||||
|
@ -1,404 +0,0 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
||||
*
|
||||
* Copyright (c) 1996 Bruce D. Evans.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#ifdef GUPROF
|
||||
#include "opt_i586_guprof.h"
|
||||
#include "opt_perfmon.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/cpu.h>
|
||||
#include <sys/eventhandler.h>
|
||||
#include <sys/gmon.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
#include <machine/clock.h>
|
||||
#include <machine/perfmon.h>
|
||||
#include <machine/timerreg.h>
|
||||
|
||||
#define CPUTIME_CLOCK_UNINITIALIZED 0
|
||||
#define CPUTIME_CLOCK_I8254 1
|
||||
#define CPUTIME_CLOCK_TSC 2
|
||||
#define CPUTIME_CLOCK_I586_PMC 3
|
||||
#define CPUTIME_CLOCK_I8254_SHIFT 7
|
||||
|
||||
int cputime_bias = 1; /* initialize for locality of reference */
|
||||
|
||||
static int cputime_clock = CPUTIME_CLOCK_UNINITIALIZED;
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
|
||||
static u_int cputime_clock_pmc_conf = I586_PMC_GUPROF;
|
||||
static int cputime_clock_pmc_init;
|
||||
static struct gmonparam saved_gmp;
|
||||
#endif
|
||||
#if defined(I586_CPU) || defined(I686_CPU)
|
||||
static int cputime_prof_active;
|
||||
#endif
|
||||
#endif /* GUPROF */
|
||||
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
#if defined(SMP) && defined(GUPROF)
|
||||
#define MPLOCK " \n\
|
||||
movl $1,%edx \n\
|
||||
9: \n\
|
||||
xorl %eax,%eax \n\
|
||||
lock \n\
|
||||
cmpxchgl %edx,mcount_lock \n\
|
||||
jne 9b \n"
|
||||
#define MPUNLOCK "movl $0,mcount_lock \n"
|
||||
#else /* !(SMP && GUPROF) */
|
||||
#define MPLOCK
|
||||
#define MPUNLOCK
|
||||
#endif /* SMP && GUPROF */
|
||||
|
||||
__asm(" \n\
|
||||
GM_STATE = 0 \n\
|
||||
GMON_PROF_OFF = 3 \n\
|
||||
\n\
|
||||
.text \n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl __mcount \n\
|
||||
.type __mcount,@function \n\
|
||||
__mcount: \n\
|
||||
# \n\
|
||||
# Check that we are profiling. Do it early for speed. \n\
|
||||
# \n\
|
||||
cmpl $GMON_PROF_OFF,_gmonparam+GM_STATE \n\
|
||||
je .mcount_exit \n\
|
||||
# \n\
|
||||
# __mcount is the same as [.]mcount except the caller \n\
|
||||
# hasn't changed the stack except to call here, so the \n\
|
||||
# caller's raddr is above our raddr. \n\
|
||||
# \n\
|
||||
movl 4(%esp),%edx \n\
|
||||
jmp .got_frompc \n\
|
||||
\n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl .mcount \n\
|
||||
.mcount: \n\
|
||||
cmpl $GMON_PROF_OFF,_gmonparam+GM_STATE \n\
|
||||
je .mcount_exit \n\
|
||||
# \n\
|
||||
# The caller's stack frame has already been built, so \n\
|
||||
# %ebp is the caller's frame pointer. The caller's \n\
|
||||
# raddr is in the caller's frame following the caller's \n\
|
||||
# caller's frame pointer. \n\
|
||||
# \n\
|
||||
movl 4(%ebp),%edx \n\
|
||||
.got_frompc: \n\
|
||||
# \n\
|
||||
# Our raddr is the caller's pc. \n\
|
||||
# \n\
|
||||
movl (%esp),%eax \n\
|
||||
\n\
|
||||
pushfl \n\
|
||||
pushl %eax \n\
|
||||
pushl %edx \n\
|
||||
cli \n"
|
||||
MPLOCK " \n\
|
||||
call mcount \n"
|
||||
MPUNLOCK " \n\
|
||||
addl $8,%esp \n\
|
||||
popfl \n\
|
||||
.mcount_exit: \n\
|
||||
ret $0 \n\
|
||||
");
|
||||
|
||||
void __mcount(void);
|
||||
void (*__mcountp)(void) = __mcount;
|
||||
#else /* !__GNUCLIKE_ASM */
|
||||
#error "this file needs to be ported to your compiler"
|
||||
#endif /* __GNUCLIKE_ASM */
|
||||
|
||||
#ifdef GUPROF
|
||||
/*
|
||||
* [.]mexitcount saves the return register(s), loads selfpc and calls
|
||||
* mexitcount(selfpc) to do the work. Someday it should be in a machine
|
||||
* dependent file together with cputime(), __mcount and [.]mcount. cputime()
|
||||
* can't just be put in machdep.c because it has to be compiled without -pg.
|
||||
*/
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
__asm(" \n\
|
||||
.text \n\
|
||||
# \n\
|
||||
# Dummy label to be seen when gprof -u hides [.]mexitcount. \n\
|
||||
# \n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl __mexitcount \n\
|
||||
.type __mexitcount,@function \n\
|
||||
__mexitcount: \n\
|
||||
nop \n\
|
||||
\n\
|
||||
GMON_PROF_HIRES = 4 \n\
|
||||
\n\
|
||||
.p2align 4,0x90 \n\
|
||||
.globl .mexitcount \n\
|
||||
.mexitcount: \n\
|
||||
cmpl $GMON_PROF_HIRES,_gmonparam+GM_STATE \n\
|
||||
jne .mexitcount_exit \n\
|
||||
pushl %edx \n\
|
||||
pushl %eax \n\
|
||||
movl 8(%esp),%eax \n\
|
||||
pushfl \n\
|
||||
pushl %eax \n\
|
||||
cli \n"
|
||||
MPLOCK " \n\
|
||||
call mexitcount \n"
|
||||
MPUNLOCK " \n\
|
||||
addl $4,%esp \n\
|
||||
popfl \n\
|
||||
popl %eax \n\
|
||||
popl %edx \n\
|
||||
.mexitcount_exit: \n\
|
||||
ret $0 \n\
|
||||
");
|
||||
#endif /* __GNUCLIKE_ASM */
|
||||
|
||||
void __mexitcount(void);
|
||||
void (*__mexitcountp)(void) = __mexitcount;
|
||||
|
||||
/*
|
||||
* Return the time elapsed since the last call. The units are machine-
|
||||
* dependent.
|
||||
*/
|
||||
int
|
||||
cputime()
|
||||
{
|
||||
u_int count;
|
||||
int delta;
|
||||
#if (defined(I586_CPU) || defined(I686_CPU)) && \
|
||||
defined(PERFMON) && defined(I586_PMC_GUPROF) && !defined(SMP)
|
||||
u_quad_t event_count;
|
||||
#endif
|
||||
u_char high, low;
|
||||
static u_int prev_count;
|
||||
|
||||
#if defined(I586_CPU) || defined(I686_CPU)
|
||||
if (cputime_clock == CPUTIME_CLOCK_TSC) {
|
||||
/*
|
||||
* Scale the TSC a little to make cputime()'s frequency
|
||||
* fit in an int, assuming that the TSC frequency fits
|
||||
* in a u_int. Use a fixed scale since dynamic scaling
|
||||
* would be slower and we can't really use the low bit
|
||||
* of precision.
|
||||
*/
|
||||
count = (u_int)rdtsc() & ~1u;
|
||||
delta = (int)(count - prev_count) >> 1;
|
||||
prev_count = count;
|
||||
return (delta);
|
||||
}
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF) && !defined(SMP)
|
||||
if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
|
||||
/*
|
||||
* XXX permon_read() should be inlined so that the
|
||||
* perfmon module doesn't need to be compiled with
|
||||
* profiling disabled and so that it is fast.
|
||||
*/
|
||||
perfmon_read(0, &event_count);
|
||||
|
||||
count = (u_int)event_count;
|
||||
delta = (int)(count - prev_count);
|
||||
prev_count = count;
|
||||
return (delta);
|
||||
}
|
||||
#endif /* PERFMON && I586_PMC_GUPROF && !SMP */
|
||||
#endif /* I586_CPU || I686_CPU */
|
||||
|
||||
/*
|
||||
* Read the current value of the 8254 timer counter 0.
|
||||
*/
|
||||
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
|
||||
low = inb(TIMER_CNTR0);
|
||||
high = inb(TIMER_CNTR0);
|
||||
count = ((high << 8) | low) << CPUTIME_CLOCK_I8254_SHIFT;
|
||||
|
||||
/*
|
||||
* The timer counts down from TIMER_CNTR0_MAX to 0 and then resets.
|
||||
* While profiling is enabled, this routine is called at least twice
|
||||
* per timer reset (for mcounting and mexitcounting hardclock()),
|
||||
* so at most one reset has occurred since the last call, and one
|
||||
* has occurred iff the current count is larger than the previous
|
||||
* count. This allows counter underflow to be detected faster
|
||||
* than in microtime().
|
||||
*/
|
||||
delta = prev_count - count;
|
||||
prev_count = count;
|
||||
if ((int) delta <= 0)
|
||||
return (delta + (i8254_max_count << CPUTIME_CLOCK_I8254_SHIFT));
|
||||
return (delta);
|
||||
}
|
||||
|
||||
static int
|
||||
sysctl_machdep_cputime_clock(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int clock;
|
||||
int error;
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
|
||||
int event;
|
||||
struct pmc pmc;
|
||||
#endif
|
||||
|
||||
clock = cputime_clock;
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
|
||||
if (clock == CPUTIME_CLOCK_I586_PMC) {
|
||||
pmc.pmc_val = cputime_clock_pmc_conf;
|
||||
clock += pmc.pmc_event;
|
||||
}
|
||||
#endif
|
||||
error = sysctl_handle_opaque(oidp, &clock, sizeof clock, req);
|
||||
if (error == 0 && req->newptr != NULL) {
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
|
||||
if (clock >= CPUTIME_CLOCK_I586_PMC) {
|
||||
event = clock - CPUTIME_CLOCK_I586_PMC;
|
||||
if (event >= 256)
|
||||
return (EINVAL);
|
||||
pmc.pmc_num = 0;
|
||||
pmc.pmc_event = event;
|
||||
pmc.pmc_unit = 0;
|
||||
pmc.pmc_flags = PMCF_E | PMCF_OS | PMCF_USR;
|
||||
pmc.pmc_mask = 0;
|
||||
cputime_clock_pmc_conf = pmc.pmc_val;
|
||||
cputime_clock = CPUTIME_CLOCK_I586_PMC;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (clock < 0 || clock >= CPUTIME_CLOCK_I586_PMC)
|
||||
return (EINVAL);
|
||||
cputime_clock = clock;
|
||||
}
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
SYSCTL_PROC(_machdep, OID_AUTO, cputime_clock,
|
||||
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(u_int),
|
||||
sysctl_machdep_cputime_clock, "I",
|
||||
"");
|
||||
|
||||
/*
|
||||
* The start and stop routines need not be here since we turn off profiling
|
||||
* before calling them. They are here for convenience.
|
||||
*/
|
||||
|
||||
void
|
||||
startguprof(gp)
|
||||
struct gmonparam *gp;
|
||||
{
|
||||
#if defined(I586_CPU) || defined(I686_CPU)
|
||||
uint64_t freq;
|
||||
|
||||
freq = atomic_load_acq_64(&tsc_freq);
|
||||
if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) {
|
||||
if (freq != 0 && mp_ncpus == 1)
|
||||
cputime_clock = CPUTIME_CLOCK_TSC;
|
||||
else
|
||||
cputime_clock = CPUTIME_CLOCK_I8254;
|
||||
}
|
||||
if (cputime_clock == CPUTIME_CLOCK_TSC) {
|
||||
gp->profrate = freq >> 1;
|
||||
cputime_prof_active = 1;
|
||||
} else
|
||||
gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
|
||||
if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
|
||||
if (perfmon_avail() &&
|
||||
perfmon_setup(0, cputime_clock_pmc_conf) == 0) {
|
||||
if (perfmon_start(0) != 0)
|
||||
perfmon_fini(0);
|
||||
else {
|
||||
/* XXX 1 event == 1 us. */
|
||||
gp->profrate = 1000000;
|
||||
|
||||
saved_gmp = *gp;
|
||||
|
||||
/* Zap overheads. They are invalid. */
|
||||
gp->cputime_overhead = 0;
|
||||
gp->mcount_overhead = 0;
|
||||
gp->mcount_post_overhead = 0;
|
||||
gp->mcount_pre_overhead = 0;
|
||||
gp->mexitcount_overhead = 0;
|
||||
gp->mexitcount_post_overhead = 0;
|
||||
gp->mexitcount_pre_overhead = 0;
|
||||
|
||||
cputime_clock_pmc_init = TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* PERFMON && I586_PMC_GUPROF */
|
||||
#else /* !(I586_CPU || I686_CPU) */
|
||||
if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED)
|
||||
cputime_clock = CPUTIME_CLOCK_I8254;
|
||||
gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
|
||||
#endif /* I586_CPU || I686_CPU */
|
||||
cputime_bias = 0;
|
||||
cputime();
|
||||
}
|
||||
|
||||
void
|
||||
stopguprof(gp)
|
||||
struct gmonparam *gp;
|
||||
{
|
||||
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
|
||||
if (cputime_clock_pmc_init) {
|
||||
*gp = saved_gmp;
|
||||
perfmon_fini(0);
|
||||
cputime_clock_pmc_init = FALSE;
|
||||
}
|
||||
#endif
|
||||
#if defined(I586_CPU) || defined(I686_CPU)
|
||||
if (cputime_clock == CPUTIME_CLOCK_TSC)
|
||||
cputime_prof_active = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(I586_CPU) || defined(I686_CPU)
|
||||
/* If the cpu frequency changed while profiling, report a warning. */
|
||||
static void
|
||||
tsc_freq_changed(void *arg, const struct cf_level *level, int status)
|
||||
{
|
||||
|
||||
/*
|
||||
* If there was an error during the transition or
|
||||
* TSC is P-state invariant, don't do anything.
|
||||
*/
|
||||
if (status != 0 || tsc_is_invariant)
|
||||
return;
|
||||
if (cputime_prof_active && cputime_clock == CPUTIME_CLOCK_TSC)
|
||||
printf("warning: cpu freq changed while profiling active\n");
|
||||
}
|
||||
|
||||
EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL,
|
||||
EVENTHANDLER_PRI_ANY);
|
||||
#endif /* I586_CPU || I686_CPU */
|
||||
|
||||
#endif /* GUPROF */
|
@ -48,7 +48,7 @@
|
||||
/*
|
||||
* Signal trampoline, copied to top of user stack
|
||||
*/
|
||||
NON_GPROF_ENTRY(sigcode)
|
||||
ENTRY(sigcode)
|
||||
calll *SIGF_HANDLER(%esp)
|
||||
leal SIGF_UC(%esp),%eax /* get ucontext */
|
||||
pushl %eax
|
||||
|
@ -155,7 +155,6 @@ ENTRY(bcopy)
|
||||
movl 8(%esp),%edx
|
||||
movl %eax,8(%esp)
|
||||
movl %edx,4(%esp)
|
||||
MEXITCOUNT
|
||||
jmp memmove
|
||||
END(bcopy)
|
||||
|
||||
@ -284,7 +283,6 @@ ENTRY(lgdt)
|
||||
movl (%esp),%eax
|
||||
pushl %eax
|
||||
movl $KCSEL,4(%esp)
|
||||
MEXITCOUNT
|
||||
lret
|
||||
END(lgdt)
|
||||
|
||||
|
@ -130,7 +130,6 @@ ENTRY(vm86_bioscall)
|
||||
/*
|
||||
* Return via doreti
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp doreti
|
||||
|
||||
|
||||
|
@ -48,92 +48,14 @@
|
||||
#define CNAME(csym) csym
|
||||
|
||||
#define ALIGN_DATA .p2align 2 /* 4 byte alignment, zero filled */
|
||||
#ifdef GPROF
|
||||
#define ALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
|
||||
#else
|
||||
#define ALIGN_TEXT .p2align 2,0x90 /* 4-byte alignment, nop filled */
|
||||
#endif
|
||||
#define SUPERALIGN_TEXT .p2align 4,0x90 /* 16-byte alignment, nop filled */
|
||||
|
||||
#define GEN_ENTRY(name) ALIGN_TEXT; .globl CNAME(name); \
|
||||
.type CNAME(name),@function; CNAME(name):
|
||||
#define NON_GPROF_ENTRY(name) GEN_ENTRY(name)
|
||||
#define NON_GPROF_RET .byte 0xc3 /* opcode for `ret' */
|
||||
|
||||
#define END(name) .size name, . - name
|
||||
|
||||
#ifdef GPROF
|
||||
/*
|
||||
* __mcount is like [.]mcount except that doesn't require its caller to set
|
||||
* up a frame pointer. It must be called before pushing anything onto the
|
||||
* stack. gcc should eventually generate code to call __mcount in most
|
||||
* cases. This would make -pg in combination with -fomit-frame-pointer
|
||||
* useful. gcc has a configuration variable PROFILE_BEFORE_PROLOGUE to
|
||||
* allow profiling before setting up the frame pointer, but this is
|
||||
* inadequate for good handling of special cases, e.g., -fpic works best
|
||||
* with profiling after the prologue.
|
||||
*
|
||||
* [.]mexitcount is a new function to support non-statistical profiling if an
|
||||
* accurate clock is available. For C sources, calls to it are generated
|
||||
* by the FreeBSD extension `-mprofiler-epilogue' to gcc. It is best to
|
||||
* call [.]mexitcount at the end of a function like the MEXITCOUNT macro does,
|
||||
* but gcc currently generates calls to it at the start of the epilogue to
|
||||
* avoid problems with -fpic.
|
||||
*
|
||||
* [.]mcount and __mcount may clobber the call-used registers and %ef.
|
||||
* [.]mexitcount may clobber %ecx and %ef.
|
||||
*
|
||||
* Cross-jumping makes non-statistical profiling timing more complicated.
|
||||
* It is handled in many cases by calling [.]mexitcount before jumping. It
|
||||
* is handled for conditional jumps using CROSSJUMP() and CROSSJUMP_LABEL().
|
||||
* It is handled for some fault-handling jumps by not sharing the exit
|
||||
* routine.
|
||||
*
|
||||
* ALTENTRY() must be before a corresponding ENTRY() so that it can jump to
|
||||
* the main entry point. Note that alt entries are counted twice. They
|
||||
* have to be counted as ordinary entries for gprof to get the call times
|
||||
* right for the ordinary entries.
|
||||
*
|
||||
* High local labels are used in macros to avoid clashes with local labels
|
||||
* in functions.
|
||||
*
|
||||
* Ordinary `ret' is used instead of a macro `RET' because there are a lot
|
||||
* of `ret's. 0xc3 is the opcode for `ret' (`#define ret ... ret' can't
|
||||
* be used because this file is sometimes preprocessed in traditional mode).
|
||||
* `ret' clobbers eflags but this doesn't matter.
|
||||
*/
|
||||
#define ALTENTRY(name) GEN_ENTRY(name) ; MCOUNT ; MEXITCOUNT ; jmp 9f
|
||||
#define CROSSJUMP(jtrue, label, jfalse) \
|
||||
jfalse 8f; MEXITCOUNT; jmp __CONCAT(to,label); 8:
|
||||
#define CROSSJUMPTARGET(label) \
|
||||
ALIGN_TEXT; __CONCAT(to,label): ; MCOUNT; jmp label
|
||||
#define ENTRY(name) GEN_ENTRY(name) ; 9: ; MCOUNT
|
||||
#define FAKE_MCOUNT(caller) pushl caller ; call *__mcountp ; popl %ecx
|
||||
#define MCOUNT call *__mcountp
|
||||
#define MCOUNT_LABEL(name) GEN_ENTRY(name) ; nop ; ALIGN_TEXT
|
||||
#ifdef GUPROF
|
||||
#define MEXITCOUNT call *__mexitcountp
|
||||
#define ret MEXITCOUNT ; NON_GPROF_RET
|
||||
#else
|
||||
#define MEXITCOUNT
|
||||
#endif
|
||||
|
||||
#else /* !GPROF */
|
||||
/*
|
||||
* ALTENTRY() has to align because it is before a corresponding ENTRY().
|
||||
* ENTRY() has to align to because there may be no ALTENTRY() before it.
|
||||
* If there is a previous ALTENTRY() then the alignment code for ENTRY()
|
||||
* is empty.
|
||||
*/
|
||||
#define ALTENTRY(name) GEN_ENTRY(name)
|
||||
#define CROSSJUMP(jtrue, label, jfalse) jtrue label
|
||||
#define CROSSJUMPTARGET(label)
|
||||
#define ENTRY(name) GEN_ENTRY(name)
|
||||
#define FAKE_MCOUNT(caller)
|
||||
#define MCOUNT
|
||||
#define MCOUNT_LABEL(name)
|
||||
#define MEXITCOUNT
|
||||
#endif /* GPROF */
|
||||
#define ALTENTRY(name) GEN_ENTRY(name)
|
||||
#define END(name) .size name, . - name
|
||||
|
||||
#ifdef LOCORE
|
||||
|
||||
|
@ -35,87 +35,9 @@
|
||||
#ifndef _MACHINE_PROFILE_H_
|
||||
#define _MACHINE_PROFILE_H_
|
||||
|
||||
#ifndef _SYS_CDEFS_H_
|
||||
#error this file needs sys/cdefs.h as a prerequisite
|
||||
#endif
|
||||
#ifndef _KERNEL
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
/*
|
||||
* Config generates something to tell the compiler to align functions on 16
|
||||
* byte boundaries. A strict alignment is good for keeping the tables small.
|
||||
*/
|
||||
#define FUNCTION_ALIGNMENT 16
|
||||
|
||||
/*
|
||||
* The kernel uses assembler stubs instead of unportable inlines.
|
||||
* This is mainly to save a little time when profiling is not enabled,
|
||||
* which is the usual case for the kernel.
|
||||
*/
|
||||
#define _MCOUNT_DECL void mcount
|
||||
#define MCOUNT
|
||||
|
||||
#ifdef GUPROF
|
||||
#define MCOUNT_DECL(s)
|
||||
#define MCOUNT_ENTER(s)
|
||||
#define MCOUNT_EXIT(s)
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
#define MCOUNT_OVERHEAD(label) \
|
||||
__asm __volatile("pushl %0; call __mcount; popl %%ecx" \
|
||||
: \
|
||||
: "i" (label) \
|
||||
: "ax", "dx", "cx", "memory")
|
||||
#define MEXITCOUNT_OVERHEAD() \
|
||||
__asm __volatile("call .mexitcount; 1:" \
|
||||
: : \
|
||||
: "cx", "memory")
|
||||
#define MEXITCOUNT_OVERHEAD_GETLABEL(labelp) \
|
||||
__asm __volatile("movl $1b,%0" : "=rm" (labelp))
|
||||
#else
|
||||
#error
|
||||
#endif /* !__GNUCLIKE_ASM */
|
||||
#else /* !GUPROF */
|
||||
#define MCOUNT_DECL(s) register_t s;
|
||||
#ifdef SMP
|
||||
extern int mcount_lock;
|
||||
#define MCOUNT_ENTER(s) { s = intr_disable(); \
|
||||
while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \
|
||||
/* nothing */ ; }
|
||||
#define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \
|
||||
intr_restore(s); }
|
||||
#else
|
||||
#define MCOUNT_ENTER(s) { s = intr_disable(); }
|
||||
#define MCOUNT_EXIT(s) (intr_restore(s))
|
||||
#endif
|
||||
#endif /* GUPROF */
|
||||
|
||||
void bintr(void);
|
||||
void btrap(void);
|
||||
void eintr(void);
|
||||
#if 0
|
||||
void end_exceptions(void);
|
||||
void start_exceptions(void);
|
||||
#else
|
||||
#include <machine/pmc_mdep.h> /* XXX */
|
||||
#endif
|
||||
void user(void);
|
||||
|
||||
#include <machine/md_var.h> /* XXX for setidt_disp */
|
||||
|
||||
#define MCOUNT_DETRAMP(pc) do { \
|
||||
if ((pc) >= (uintfptr_t)start_exceptions + setidt_disp && \
|
||||
(pc) < (uintfptr_t)end_exceptions + setidt_disp) \
|
||||
(pc) -= setidt_disp; \
|
||||
} while (0)
|
||||
|
||||
#define MCOUNT_FROMPC_INTR(pc) \
|
||||
((pc >= (uintfptr_t)btrap && pc < (uintfptr_t)eintr) ? \
|
||||
((pc >= (uintfptr_t)bintr) ? (uintfptr_t)bintr : \
|
||||
(uintfptr_t)btrap) : ~0U)
|
||||
|
||||
#define MCOUNT_USERPC ((uintfptr_t)user)
|
||||
|
||||
#else /* !_KERNEL */
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
#define FUNCTION_ALIGNMENT 4
|
||||
|
||||
@ -159,28 +81,18 @@ mcount() \
|
||||
|
||||
typedef u_int uintfptr_t;
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
/*
|
||||
* An unsigned integral type that can hold non-negative difference between
|
||||
* function pointers.
|
||||
*/
|
||||
typedef u_int fptrdiff_t;
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
void mcount(uintfptr_t frompc, uintfptr_t selfpc);
|
||||
|
||||
#else /* !_KERNEL */
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
#ifdef __GNUCLIKE_ASM
|
||||
void mcount(void) __asm(".mcount");
|
||||
#endif
|
||||
__END_DECLS
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* !_KERNEL */
|
||||
|
||||
#endif /* !_MACHINE_PROFILE_H_ */
|
||||
|
@ -11,7 +11,7 @@
|
||||
* To avoid excess stack frame the signal trampoline code emulates
|
||||
* the 'call' instruction.
|
||||
*/
|
||||
NON_GPROF_ENTRY(linux_sigcode)
|
||||
ENTRY(linux_sigcode)
|
||||
movl %esp, %ebx /* preserve sigframe */
|
||||
call .getip0
|
||||
.getip0:
|
||||
@ -26,7 +26,7 @@ NON_GPROF_ENTRY(linux_sigcode)
|
||||
.endsigcode:
|
||||
0: jmp 0b
|
||||
|
||||
NON_GPROF_ENTRY(linux_rt_sigcode)
|
||||
ENTRY(linux_rt_sigcode)
|
||||
leal LINUX_RT_SIGF_UC(%esp),%ebx /* linux ucp */
|
||||
leal LINUX_RT_SIGF_SC(%ebx),%ecx /* linux sigcontext */
|
||||
movl %esp, %edi
|
||||
@ -42,7 +42,7 @@ NON_GPROF_ENTRY(linux_rt_sigcode)
|
||||
.endrtsigcode:
|
||||
0: jmp 0b
|
||||
|
||||
NON_GPROF_ENTRY(linux_vsyscall)
|
||||
ENTRY(linux_vsyscall)
|
||||
.startvsyscall:
|
||||
int $0x80
|
||||
ret
|
||||
|
@ -46,351 +46,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <machine/cpu.h>
|
||||
|
||||
#ifdef GPROF
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/gmon.h>
|
||||
#undef MCOUNT
|
||||
|
||||
static MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
|
||||
|
||||
static void kmstartup(void *);
|
||||
SYSINIT(kmem, SI_SUB_KPROF, SI_ORDER_FIRST, kmstartup, NULL);
|
||||
|
||||
struct gmonparam _gmonparam = { GMON_PROF_OFF };
|
||||
|
||||
#ifdef GUPROF
|
||||
void
|
||||
nullfunc_loop_profiled()
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CALIB_SCALE; i++)
|
||||
nullfunc_profiled();
|
||||
}
|
||||
|
||||
#define nullfunc_loop_profiled_end nullfunc_profiled /* XXX */
|
||||
|
||||
void
|
||||
nullfunc_profiled()
|
||||
{
|
||||
}
|
||||
#endif /* GUPROF */
|
||||
|
||||
/*
|
||||
* Update the histograms to support extending the text region arbitrarily.
|
||||
* This is done slightly naively (no sparse regions), so will waste slight
|
||||
* amounts of memory, but will overall work nicely enough to allow profiling
|
||||
* of KLDs.
|
||||
*/
|
||||
void
|
||||
kmupetext(uintfptr_t nhighpc)
|
||||
{
|
||||
struct gmonparam np; /* slightly large */
|
||||
struct gmonparam *p = &_gmonparam;
|
||||
char *cp;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
bcopy(p, &np, sizeof(*p));
|
||||
np.highpc = ROUNDUP(nhighpc, HISTFRACTION * sizeof(HISTCOUNTER));
|
||||
if (np.highpc <= p->highpc)
|
||||
return;
|
||||
np.textsize = np.highpc - p->lowpc;
|
||||
np.kcountsize = np.textsize / HISTFRACTION;
|
||||
np.hashfraction = HASHFRACTION;
|
||||
np.fromssize = np.textsize / HASHFRACTION;
|
||||
np.tolimit = np.textsize * ARCDENSITY / 100;
|
||||
if (np.tolimit < MINARCS)
|
||||
np.tolimit = MINARCS;
|
||||
else if (np.tolimit > MAXARCS)
|
||||
np.tolimit = MAXARCS;
|
||||
np.tossize = np.tolimit * sizeof(struct tostruct);
|
||||
cp = malloc(np.kcountsize + np.fromssize + np.tossize,
|
||||
M_GPROF, M_WAITOK);
|
||||
/*
|
||||
* Check for something else extending highpc while we slept.
|
||||
*/
|
||||
if (np.highpc <= p->highpc) {
|
||||
free(cp, M_GPROF);
|
||||
return;
|
||||
}
|
||||
np.tos = (struct tostruct *)cp;
|
||||
cp += np.tossize;
|
||||
np.kcount = (HISTCOUNTER *)cp;
|
||||
cp += np.kcountsize;
|
||||
np.froms = (u_short *)cp;
|
||||
#ifdef GUPROF
|
||||
/* Reinitialize pointers to overhead counters. */
|
||||
np.cputime_count = &KCOUNT(&np, PC_TO_I(&np, cputime));
|
||||
np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
|
||||
np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
|
||||
#endif
|
||||
critical_enter();
|
||||
bcopy(p->tos, np.tos, p->tossize);
|
||||
bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
|
||||
bcopy(p->kcount, np.kcount, p->kcountsize);
|
||||
bzero((char *)np.kcount + p->kcountsize, np.kcountsize -
|
||||
p->kcountsize);
|
||||
bcopy(p->froms, np.froms, p->fromssize);
|
||||
bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
|
||||
cp = (char *)p->tos;
|
||||
bcopy(&np, p, sizeof(*p));
|
||||
critical_exit();
|
||||
free(cp, M_GPROF);
|
||||
}
|
||||
|
||||
static void
|
||||
kmstartup(void *dummy)
|
||||
{
|
||||
char *cp;
|
||||
struct gmonparam *p = &_gmonparam;
|
||||
#ifdef GUPROF
|
||||
int cputime_overhead;
|
||||
int empty_loop_time;
|
||||
int i;
|
||||
int mcount_overhead;
|
||||
int mexitcount_overhead;
|
||||
int nullfunc_loop_overhead;
|
||||
int nullfunc_loop_profiled_time;
|
||||
uintfptr_t tmp_addr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Round lowpc and highpc to multiples of the density we're using
|
||||
* so the rest of the scaling (here and in gprof) stays in ints.
|
||||
*/
|
||||
p->lowpc = ROUNDDOWN((u_long)btext, HISTFRACTION * sizeof(HISTCOUNTER));
|
||||
p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
|
||||
p->textsize = p->highpc - p->lowpc;
|
||||
printf("Profiling kernel, textsize=%lu [%jx..%jx]\n",
|
||||
p->textsize, (uintmax_t)p->lowpc, (uintmax_t)p->highpc);
|
||||
p->kcountsize = p->textsize / HISTFRACTION;
|
||||
p->hashfraction = HASHFRACTION;
|
||||
p->fromssize = p->textsize / HASHFRACTION;
|
||||
p->tolimit = p->textsize * ARCDENSITY / 100;
|
||||
if (p->tolimit < MINARCS)
|
||||
p->tolimit = MINARCS;
|
||||
else if (p->tolimit > MAXARCS)
|
||||
p->tolimit = MAXARCS;
|
||||
p->tossize = p->tolimit * sizeof(struct tostruct);
|
||||
cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
|
||||
M_GPROF, M_WAITOK | M_ZERO);
|
||||
p->tos = (struct tostruct *)cp;
|
||||
cp += p->tossize;
|
||||
p->kcount = (HISTCOUNTER *)cp;
|
||||
cp += p->kcountsize;
|
||||
p->froms = (u_short *)cp;
|
||||
p->histcounter_type = FUNCTION_ALIGNMENT / HISTFRACTION * NBBY;
|
||||
|
||||
#ifdef GUPROF
|
||||
/* Signed counters. */
|
||||
p->histcounter_type = -p->histcounter_type;
|
||||
|
||||
/* Initialize pointers to overhead counters. */
|
||||
p->cputime_count = &KCOUNT(p, PC_TO_I(p, cputime));
|
||||
p->mcount_count = &KCOUNT(p, PC_TO_I(p, mcount));
|
||||
p->mexitcount_count = &KCOUNT(p, PC_TO_I(p, mexitcount));
|
||||
|
||||
/*
|
||||
* Disable interrupts to avoid interference while we calibrate
|
||||
* things.
|
||||
*/
|
||||
critical_enter();
|
||||
|
||||
/*
|
||||
* Determine overheads.
|
||||
* XXX this needs to be repeated for each useful timer/counter.
|
||||
*/
|
||||
cputime_overhead = 0;
|
||||
startguprof(p);
|
||||
for (i = 0; i < CALIB_SCALE; i++)
|
||||
cputime_overhead += cputime();
|
||||
|
||||
empty_loop();
|
||||
startguprof(p);
|
||||
empty_loop();
|
||||
empty_loop_time = cputime();
|
||||
|
||||
nullfunc_loop_profiled();
|
||||
|
||||
/*
|
||||
* Start profiling. There won't be any normal function calls since
|
||||
* interrupts are disabled, but we will call the profiling routines
|
||||
* directly to determine their overheads.
|
||||
*/
|
||||
p->state = GMON_PROF_HIRES;
|
||||
|
||||
startguprof(p);
|
||||
nullfunc_loop_profiled();
|
||||
|
||||
startguprof(p);
|
||||
for (i = 0; i < CALIB_SCALE; i++)
|
||||
MCOUNT_OVERHEAD(sys_profil);
|
||||
mcount_overhead = KCOUNT(p, PC_TO_I(p, sys_profil));
|
||||
|
||||
startguprof(p);
|
||||
for (i = 0; i < CALIB_SCALE; i++)
|
||||
MEXITCOUNT_OVERHEAD();
|
||||
MEXITCOUNT_OVERHEAD_GETLABEL(tmp_addr);
|
||||
mexitcount_overhead = KCOUNT(p, PC_TO_I(p, tmp_addr));
|
||||
|
||||
p->state = GMON_PROF_OFF;
|
||||
stopguprof(p);
|
||||
|
||||
critical_exit();
|
||||
|
||||
nullfunc_loop_profiled_time = 0;
|
||||
for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
|
||||
tmp_addr < (uintfptr_t)nullfunc_loop_profiled_end;
|
||||
tmp_addr += HISTFRACTION * sizeof(HISTCOUNTER))
|
||||
nullfunc_loop_profiled_time += KCOUNT(p, PC_TO_I(p, tmp_addr));
|
||||
#define CALIB_DOSCALE(count) (((count) + CALIB_SCALE / 3) / CALIB_SCALE)
|
||||
#define c2n(count, freq) ((int)((count) * 1000000000LL / freq))
|
||||
printf("cputime %d, empty_loop %d, nullfunc_loop_profiled %d, mcount %d, mexitcount %d\n",
|
||||
CALIB_DOSCALE(c2n(cputime_overhead, p->profrate)),
|
||||
CALIB_DOSCALE(c2n(empty_loop_time, p->profrate)),
|
||||
CALIB_DOSCALE(c2n(nullfunc_loop_profiled_time, p->profrate)),
|
||||
CALIB_DOSCALE(c2n(mcount_overhead, p->profrate)),
|
||||
CALIB_DOSCALE(c2n(mexitcount_overhead, p->profrate)));
|
||||
cputime_overhead -= empty_loop_time;
|
||||
mcount_overhead -= empty_loop_time;
|
||||
mexitcount_overhead -= empty_loop_time;
|
||||
|
||||
/*-
|
||||
* Profiling overheads are determined by the times between the
|
||||
* following events:
|
||||
* MC1: mcount() is called
|
||||
* MC2: cputime() (called from mcount()) latches the timer
|
||||
* MC3: mcount() completes
|
||||
* ME1: mexitcount() is called
|
||||
* ME2: cputime() (called from mexitcount()) latches the timer
|
||||
* ME3: mexitcount() completes.
|
||||
* The times between the events vary slightly depending on instruction
|
||||
* combination and cache misses, etc. Attempt to determine the
|
||||
* minimum times. These can be subtracted from the profiling times
|
||||
* without much risk of reducing the profiling times below what they
|
||||
* would be when profiling is not configured. Abbreviate:
|
||||
* ab = minimum time between MC1 and MC3
|
||||
* a = minimum time between MC1 and MC2
|
||||
* b = minimum time between MC2 and MC3
|
||||
* cd = minimum time between ME1 and ME3
|
||||
* c = minimum time between ME1 and ME2
|
||||
* d = minimum time between ME2 and ME3.
|
||||
* These satisfy the relations:
|
||||
* ab <= mcount_overhead (just measured)
|
||||
* a + b <= ab
|
||||
* cd <= mexitcount_overhead (just measured)
|
||||
* c + d <= cd
|
||||
* a + d <= nullfunc_loop_profiled_time (just measured)
|
||||
* a >= 0, b >= 0, c >= 0, d >= 0.
|
||||
* Assume that ab and cd are equal to the minimums.
|
||||
*/
|
||||
p->cputime_overhead = CALIB_DOSCALE(cputime_overhead);
|
||||
p->mcount_overhead = CALIB_DOSCALE(mcount_overhead - cputime_overhead);
|
||||
p->mexitcount_overhead = CALIB_DOSCALE(mexitcount_overhead
|
||||
- cputime_overhead);
|
||||
nullfunc_loop_overhead = nullfunc_loop_profiled_time - empty_loop_time;
|
||||
p->mexitcount_post_overhead = CALIB_DOSCALE((mcount_overhead
|
||||
- nullfunc_loop_overhead)
|
||||
/ 4);
|
||||
p->mexitcount_pre_overhead = p->mexitcount_overhead
|
||||
+ p->cputime_overhead
|
||||
- p->mexitcount_post_overhead;
|
||||
p->mcount_pre_overhead = CALIB_DOSCALE(nullfunc_loop_overhead)
|
||||
- p->mexitcount_post_overhead;
|
||||
p->mcount_post_overhead = p->mcount_overhead
|
||||
+ p->cputime_overhead
|
||||
- p->mcount_pre_overhead;
|
||||
printf(
|
||||
"Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d nsec\n",
|
||||
c2n(p->cputime_overhead, p->profrate),
|
||||
c2n(p->mcount_overhead, p->profrate),
|
||||
c2n(p->mcount_pre_overhead, p->profrate),
|
||||
c2n(p->mcount_post_overhead, p->profrate),
|
||||
c2n(p->cputime_overhead, p->profrate),
|
||||
c2n(p->mexitcount_overhead, p->profrate),
|
||||
c2n(p->mexitcount_pre_overhead, p->profrate),
|
||||
c2n(p->mexitcount_post_overhead, p->profrate));
|
||||
printf(
|
||||
"Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d cycles\n",
|
||||
p->cputime_overhead, p->mcount_overhead,
|
||||
p->mcount_pre_overhead, p->mcount_post_overhead,
|
||||
p->cputime_overhead, p->mexitcount_overhead,
|
||||
p->mexitcount_pre_overhead, p->mexitcount_post_overhead);
|
||||
#endif /* GUPROF */
|
||||
}
|
||||
|
||||
/*
|
||||
* Return kernel profiling information.
|
||||
*/
|
||||
static int
|
||||
sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int *name = (int *) arg1;
|
||||
u_int namelen = arg2;
|
||||
struct gmonparam *gp = &_gmonparam;
|
||||
int error;
|
||||
int state;
|
||||
|
||||
/* all sysctl names at this level are terminal */
|
||||
if (namelen != 1)
|
||||
return (ENOTDIR); /* overloaded */
|
||||
|
||||
switch (name[0]) {
|
||||
case GPROF_STATE:
|
||||
state = gp->state;
|
||||
error = sysctl_handle_int(oidp, &state, 0, req);
|
||||
if (error)
|
||||
return (error);
|
||||
if (!req->newptr)
|
||||
return (0);
|
||||
if (state == GMON_PROF_OFF) {
|
||||
gp->state = state;
|
||||
PROC_LOCK(&proc0);
|
||||
stopprofclock(&proc0);
|
||||
PROC_UNLOCK(&proc0);
|
||||
stopguprof(gp);
|
||||
} else if (state == GMON_PROF_ON) {
|
||||
gp->state = GMON_PROF_OFF;
|
||||
stopguprof(gp);
|
||||
gp->profrate = profhz;
|
||||
PROC_LOCK(&proc0);
|
||||
startprofclock(&proc0);
|
||||
PROC_UNLOCK(&proc0);
|
||||
gp->state = state;
|
||||
#ifdef GUPROF
|
||||
} else if (state == GMON_PROF_HIRES) {
|
||||
gp->state = GMON_PROF_OFF;
|
||||
PROC_LOCK(&proc0);
|
||||
stopprofclock(&proc0);
|
||||
PROC_UNLOCK(&proc0);
|
||||
startguprof(gp);
|
||||
gp->state = state;
|
||||
#endif
|
||||
} else if (state != gp->state)
|
||||
return (EINVAL);
|
||||
return (0);
|
||||
case GPROF_COUNT:
|
||||
return (sysctl_handle_opaque(oidp,
|
||||
gp->kcount, gp->kcountsize, req));
|
||||
case GPROF_FROMS:
|
||||
return (sysctl_handle_opaque(oidp,
|
||||
gp->froms, gp->fromssize, req));
|
||||
case GPROF_TOS:
|
||||
return (sysctl_handle_opaque(oidp,
|
||||
gp->tos, gp->tossize, req));
|
||||
case GPROF_GMONPARAM:
|
||||
return (sysctl_handle_opaque(oidp, gp, sizeof *gp, req));
|
||||
default:
|
||||
return (EOPNOTSUPP);
|
||||
}
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
static SYSCTL_NODE(_kern, KERN_PROF, prof,
|
||||
CTLFLAG_RW | CTLFLAG_MPSAFE, sysctl_kern_prof,
|
||||
"");
|
||||
#endif /* GPROF */
|
||||
|
||||
/*
|
||||
* Profiling system call.
|
||||
*
|
||||
|
@ -1,316 +0,0 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Copyright (c) 1983, 1992, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/gmon.h>
|
||||
#ifdef _KERNEL
|
||||
#ifndef GUPROF
|
||||
#include <sys/systm.h>
|
||||
#endif
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/pmap.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* mcount is called on entry to each function compiled with the profiling
|
||||
* switch set. _mcount(), which is declared in a machine-dependent way
|
||||
* with _MCOUNT_DECL, does the actual work and is either inlined into a
|
||||
* C routine or called by an assembly stub. In any case, this magic is
|
||||
* taken care of by the MCOUNT definition in <machine/profile.h>.
|
||||
*
|
||||
* _mcount updates data structures that represent traversals of the
|
||||
* program's call graph edges. frompc and selfpc are the return
|
||||
* address and function address that represents the given call graph edge.
|
||||
*
|
||||
* Note: the original BSD code used the same variable (frompcindex) for
|
||||
* both frompcindex and frompc. Any reasonable, modern compiler will
|
||||
* perform this optimization.
|
||||
*/
|
||||
/* _mcount; may be static, inline, etc */
|
||||
_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
|
||||
{
|
||||
#ifdef GUPROF
|
||||
int delta;
|
||||
#endif
|
||||
fptrdiff_t frompci;
|
||||
u_short *frompcindex;
|
||||
struct tostruct *top, *prevtop;
|
||||
struct gmonparam *p;
|
||||
long toindex;
|
||||
#ifdef _KERNEL
|
||||
MCOUNT_DECL(s)
|
||||
#endif
|
||||
|
||||
p = &_gmonparam;
|
||||
#ifndef GUPROF /* XXX */
|
||||
/*
|
||||
* check that we are profiling
|
||||
* and that we aren't recursively invoked.
|
||||
*/
|
||||
if (p->state != GMON_PROF_ON)
|
||||
return;
|
||||
#endif
|
||||
#ifdef _KERNEL
|
||||
MCOUNT_ENTER(s);
|
||||
#else
|
||||
p->state = GMON_PROF_BUSY;
|
||||
#endif
|
||||
|
||||
#ifdef _KERNEL
|
||||
/* De-relocate any addresses in a (single) trampoline. */
|
||||
#ifdef MCOUNT_DETRAMP
|
||||
MCOUNT_DETRAMP(frompc);
|
||||
MCOUNT_DETRAMP(selfpc);
|
||||
#endif
|
||||
/*
|
||||
* When we are called from an exception handler, frompc may be
|
||||
* a user address. Convert such frompc's to some representation
|
||||
* in kernel address space.
|
||||
*/
|
||||
#ifdef MCOUNT_FROMPC_USER
|
||||
frompc = MCOUNT_FROMPC_USER(frompc);
|
||||
#elif defined(MCOUNT_USERPC)
|
||||
/*
|
||||
* For separate address spaces, we can only guess that addresses
|
||||
* in the range known to us are actually kernel addresses. Outside
|
||||
* of this range, conerting to the user address is fail-safe.
|
||||
*/
|
||||
if (frompc < p->lowpc || frompc - p->lowpc >= p->textsize)
|
||||
frompc = MCOUNT_USERPC;
|
||||
#endif
|
||||
#endif /* _KERNEL */
|
||||
|
||||
frompci = frompc - p->lowpc;
|
||||
if (frompci >= p->textsize)
|
||||
goto done;
|
||||
|
||||
#ifdef GUPROF
|
||||
if (p->state == GMON_PROF_HIRES) {
|
||||
/*
|
||||
* Count the time since cputime() was previously called
|
||||
* against `frompc'. Compensate for overheads.
|
||||
*
|
||||
* cputime() sets its prev_count variable to the count when
|
||||
* it is called. This in effect starts a counter for
|
||||
* the next period of execution (normally from now until
|
||||
* the next call to mcount() or mexitcount()). We set
|
||||
* cputime_bias to compensate for our own overhead.
|
||||
*
|
||||
* We use the usual sampling counters since they can be
|
||||
* located efficiently. 4-byte counters are usually
|
||||
* necessary. gprof will add up the scattered counts
|
||||
* just like it does for statistical profiling. All
|
||||
* counts are signed so that underflow in the subtractions
|
||||
* doesn't matter much (negative counts are normally
|
||||
* compensated for by larger counts elsewhere). Underflow
|
||||
* shouldn't occur, but may be caused by slightly wrong
|
||||
* calibrations or from not clearing cputime_bias.
|
||||
*/
|
||||
delta = cputime() - cputime_bias - p->mcount_pre_overhead;
|
||||
cputime_bias = p->mcount_post_overhead;
|
||||
KCOUNT(p, frompci) += delta;
|
||||
*p->cputime_count += p->cputime_overhead;
|
||||
*p->mcount_count += p->mcount_overhead;
|
||||
}
|
||||
#endif /* GUPROF */
|
||||
|
||||
#ifdef _KERNEL
|
||||
/*
|
||||
* When we are called from an exception handler, frompc is faked
|
||||
* to be for where the exception occurred. We've just solidified
|
||||
* the count for there. Now convert frompci to an index that
|
||||
* represents the kind of exception so that interruptions appear
|
||||
* in the call graph as calls from those index instead of calls
|
||||
* from all over.
|
||||
*/
|
||||
frompc = MCOUNT_FROMPC_INTR(selfpc);
|
||||
if ((frompc - p->lowpc) < p->textsize)
|
||||
frompci = frompc - p->lowpc;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* check that frompc is a reasonable pc value.
|
||||
* for example: signal catchers get called from the stack,
|
||||
* not from text space. too bad.
|
||||
*/
|
||||
if (frompci >= p->textsize)
|
||||
goto done;
|
||||
|
||||
frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
|
||||
toindex = *frompcindex;
|
||||
if (toindex == 0) {
|
||||
/*
|
||||
* first time traversing this arc
|
||||
*/
|
||||
toindex = ++p->tos[0].link;
|
||||
if (toindex >= p->tolimit)
|
||||
/* halt further profiling */
|
||||
goto overflow;
|
||||
|
||||
*frompcindex = toindex;
|
||||
top = &p->tos[toindex];
|
||||
top->selfpc = selfpc;
|
||||
top->count = 1;
|
||||
top->link = 0;
|
||||
goto done;
|
||||
}
|
||||
top = &p->tos[toindex];
|
||||
if (top->selfpc == selfpc) {
|
||||
/*
|
||||
* arc at front of chain; usual case.
|
||||
*/
|
||||
top->count++;
|
||||
goto done;
|
||||
}
|
||||
/*
|
||||
* have to go looking down chain for it.
|
||||
* top points to what we are looking at,
|
||||
* prevtop points to previous top.
|
||||
* we know it is not at the head of the chain.
|
||||
*/
|
||||
for (; /* goto done */; ) {
|
||||
if (top->link == 0) {
|
||||
/*
|
||||
* top is end of the chain and none of the chain
|
||||
* had top->selfpc == selfpc.
|
||||
* so we allocate a new tostruct
|
||||
* and link it to the head of the chain.
|
||||
*/
|
||||
toindex = ++p->tos[0].link;
|
||||
if (toindex >= p->tolimit)
|
||||
goto overflow;
|
||||
|
||||
top = &p->tos[toindex];
|
||||
top->selfpc = selfpc;
|
||||
top->count = 1;
|
||||
top->link = *frompcindex;
|
||||
*frompcindex = toindex;
|
||||
goto done;
|
||||
}
|
||||
/*
|
||||
* otherwise, check the next arc on the chain.
|
||||
*/
|
||||
prevtop = top;
|
||||
top = &p->tos[top->link];
|
||||
if (top->selfpc == selfpc) {
|
||||
/*
|
||||
* there it is.
|
||||
* increment its count
|
||||
* move it to the head of the chain.
|
||||
*/
|
||||
top->count++;
|
||||
toindex = prevtop->link;
|
||||
prevtop->link = top->link;
|
||||
top->link = *frompcindex;
|
||||
*frompcindex = toindex;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
done:
|
||||
#ifdef _KERNEL
|
||||
MCOUNT_EXIT(s);
|
||||
#else
|
||||
p->state = GMON_PROF_ON;
|
||||
#endif
|
||||
return;
|
||||
overflow:
|
||||
p->state = GMON_PROF_ERROR;
|
||||
#ifdef _KERNEL
|
||||
MCOUNT_EXIT(s);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Actual definition of mcount function. Defined in <machine/profile.h>,
|
||||
* which is included by <sys/gmon.h>.
|
||||
*/
|
||||
MCOUNT
|
||||
|
||||
#ifdef GUPROF
|
||||
void
|
||||
mexitcount(uintfptr_t selfpc)
|
||||
{
|
||||
struct gmonparam *p;
|
||||
uintfptr_t selfpcdiff;
|
||||
|
||||
p = &_gmonparam;
|
||||
#ifdef MCOUNT_DETRAMP
|
||||
MCOUNT_DETRAMP(selfpc);
|
||||
#endif
|
||||
selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
|
||||
if (selfpcdiff < p->textsize) {
|
||||
int delta;
|
||||
|
||||
/*
|
||||
* Count the time since cputime() was previously called
|
||||
* against `selfpc'. Compensate for overheads.
|
||||
*/
|
||||
delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
|
||||
cputime_bias = p->mexitcount_post_overhead;
|
||||
KCOUNT(p, selfpcdiff) += delta;
|
||||
*p->cputime_count += p->cputime_overhead;
|
||||
*p->mexitcount_count += p->mexitcount_overhead;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef __GNUCLIKE_ASM
|
||||
#error "This file uses null asms to prevent timing loops being optimized away."
|
||||
#endif
|
||||
|
||||
void
|
||||
empty_loop(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CALIB_SCALE; i++)
|
||||
__asm __volatile("");
|
||||
}
|
||||
|
||||
void
|
||||
nullfunc(void)
|
||||
{
|
||||
__asm __volatile("");
|
||||
}
|
||||
|
||||
void
|
||||
nullfunc_loop(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CALIB_SCALE; i++)
|
||||
nullfunc();
|
||||
}
|
||||
#endif /* GUPROF */
|
Loading…
Reference in New Issue
Block a user