Uninline epoch(9) entrance and exit. There is no proof that modern

processors would benefit from avoiding a function call, but bloating
code. In fact, clang created an uninlined real function for many
object files in the network stack.

- Move epoch_private.h into subr_epoch.c. Code copied exactly, avoiding
  any changes, including style(9).
- Remove private copies of critical_enter/exit.

Reviewed by:	kib, jtl
Differential Revision:	https://reviews.freebsd.org/D17879
This commit is contained in:
Gleb Smirnoff 2018-11-13 19:02:11 +00:00
parent bb4a27f927
commit a82296c2df
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=340404
3 changed files with 153 additions and 258 deletions

View File

@ -55,6 +55,40 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
typedef struct epoch_thread {
#ifdef EPOCH_TRACKER_DEBUG
uint64_t et_magic_pre;
#endif
TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */
struct thread *et_td; /* pointer to thread in section */
ck_epoch_section_t et_section; /* epoch section object */
#ifdef EPOCH_TRACKER_DEBUG
uint64_t et_magic_post;
#endif
} *epoch_thread_t;
TAILQ_HEAD (epoch_tdlist, epoch_thread);
#ifdef __amd64__
#define EPOCH_ALIGN CACHE_LINE_SIZE*2
#else
#define EPOCH_ALIGN CACHE_LINE_SIZE
#endif
typedef struct epoch_record {
ck_epoch_record_t er_read_record;
ck_epoch_record_t er_write_record;
volatile struct epoch_tdlist er_tdlist;
volatile uint32_t er_gen;
uint32_t er_cpuid;
} __aligned(EPOCH_ALIGN) *epoch_record_t;
struct epoch {
struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
epoch_record_t e_pcpu_record;
int e_idx;
int e_flags;
};
/* arbitrary --- needs benchmarking */
#define MAX_ADAPTIVE_SPIN 100
#define MAX_EPOCHS 64
@ -157,6 +191,15 @@ epoch_ctor(epoch_t epoch)
}
}
static void
epoch_adjust_prio(struct thread *td, u_char prio)
{
thread_lock(td);
sched_prio(td, prio);
thread_unlock(td);
}
epoch_t
epoch_alloc(int flags)
{
@ -192,32 +235,110 @@ epoch_free(epoch_t epoch)
free(epoch, M_EPOCH);
}
void
epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
static epoch_record_t
epoch_currecord(epoch_t epoch)
{
epoch_enter_preempt(epoch, et);
return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
}
#define INIT_CHECK(epoch) \
do { \
if (__predict_false((epoch) == NULL)) \
return; \
} while (0)
void
epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
{
struct epoch_record *er;
struct epoch_thread *etd;
struct thread_lite *td;
MPASS(cold || epoch != NULL);
INIT_CHECK(epoch);
etd = (void *)et;
MPASS(epoch->e_flags & EPOCH_PREEMPT);
#ifdef EPOCH_TRACKER_DEBUG
etd->et_magic_pre = EPOCH_MAGIC0;
etd->et_magic_post = EPOCH_MAGIC1;
#endif
td = (struct thread_lite *)curthread;
etd->et_td = (void*)td;
td->td_epochnest++;
critical_enter();
sched_pin_lite(td);
td->td_pre_epoch_prio = td->td_priority;
er = epoch_currecord(epoch);
TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link);
ck_epoch_begin(&er->er_read_record, (ck_epoch_section_t *)&etd->et_section);
critical_exit();
}
void
epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
epoch_enter(epoch_t epoch)
{
struct thread_lite *td;
epoch_record_t er;
epoch_exit_preempt(epoch, et);
MPASS(cold || epoch != NULL);
INIT_CHECK(epoch);
td = (struct thread_lite *)curthread;
td->td_epochnest++;
critical_enter();
er = epoch_currecord(epoch);
ck_epoch_begin(&er->er_read_record, NULL);
}
void
epoch_enter_KBI(epoch_t epoch)
epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
{
struct epoch_record *er;
struct epoch_thread *etd;
struct thread_lite *td;
epoch_enter(epoch);
INIT_CHECK(epoch);
td = (struct thread_lite *)curthread;
critical_enter();
sched_unpin_lite(td);
MPASS(td->td_epochnest);
td->td_epochnest--;
er = epoch_currecord(epoch);
MPASS(epoch->e_flags & EPOCH_PREEMPT);
etd = (void *)et;
MPASS(etd != NULL);
MPASS(etd->et_td == (struct thread *)td);
#ifdef EPOCH_TRACKER_DEBUG
MPASS(etd->et_magic_pre == EPOCH_MAGIC0);
MPASS(etd->et_magic_post == EPOCH_MAGIC1);
etd->et_magic_pre = 0;
etd->et_magic_post = 0;
#endif
etd->et_td = (void*)0xDEADBEEF;
ck_epoch_end(&er->er_read_record,
(ck_epoch_section_t *)&etd->et_section);
TAILQ_REMOVE(&er->er_tdlist, etd, et_link);
er->er_gen++;
if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio);
critical_exit();
}
void
epoch_exit_KBI(epoch_t epoch)
epoch_exit(epoch_t epoch)
{
struct thread_lite *td;
epoch_record_t er;
epoch_exit(epoch);
INIT_CHECK(epoch);
td = (struct thread_lite *)curthread;
MPASS(td->td_epochnest);
td->td_epochnest--;
er = epoch_currecord(epoch);
ck_epoch_end(&er->er_read_record, NULL);
critical_exit();
}
/*
@ -546,11 +667,3 @@ in_epoch(epoch_t epoch)
{
return (in_epoch_verbose(epoch, 0));
}
void
epoch_adjust_prio(struct thread *td, u_char prio)
{
thread_lock(td);
sched_prio(td, prio);
thread_unlock(td);
}

View File

@ -29,19 +29,11 @@
#ifndef _SYS_EPOCH_H_
#define _SYS_EPOCH_H_
#ifdef _KERNEL
#include <sys/lock.h>
#include <sys/pcpu.h>
#endif
struct epoch;
typedef struct epoch *epoch_t;
#define EPOCH_PREEMPT 0x1
#define EPOCH_LOCKED 0x2
extern epoch_t global_epoch;
extern epoch_t global_epoch_preempt;
/*
* XXXGL: temporarily keep epoch_tracker exposed to userland until
* we remove trackers embedded into network structs.
*/
struct epoch_context {
void *data[2];
@ -49,7 +41,6 @@ struct epoch_context {
typedef struct epoch_context *epoch_context_t;
struct epoch_tracker {
void *datap[3];
#ifdef EPOCH_TRACKER_DEBUG
@ -61,6 +52,19 @@ struct epoch_tracker {
typedef struct epoch_tracker *epoch_tracker_t;
#ifdef _KERNEL
#include <sys/lock.h>
#include <sys/pcpu.h>
struct epoch;
typedef struct epoch *epoch_t;
#define EPOCH_PREEMPT 0x1
#define EPOCH_LOCKED 0x2
extern epoch_t global_epoch;
extern epoch_t global_epoch_preempt;
epoch_t epoch_alloc(int flags);
void epoch_free(epoch_t epoch);
void epoch_wait(epoch_t epoch);
@ -68,26 +72,15 @@ void epoch_wait_preempt(epoch_t epoch);
void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t));
int in_epoch(epoch_t epoch);
int in_epoch_verbose(epoch_t epoch, int dump_onfail);
#ifdef _KERNEL
DPCPU_DECLARE(int, epoch_cb_count);
DPCPU_DECLARE(struct grouptask, epoch_cb_task);
#define EPOCH_MAGIC0 0xFADECAFEF00DD00D
#define EPOCH_MAGIC1 0xBADDBABEDEEDFEED
void epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et);
void epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et);
void epoch_enter_KBI(epoch_t epoch);
void epoch_exit_KBI(epoch_t epoch);
void epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et);
void epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et);
void epoch_enter(epoch_t epoch);
void epoch_exit(epoch_t epoch);
#if defined(KLD_MODULE) && !defined(KLD_TIED)
#define epoch_enter_preempt(e, t) epoch_enter_preempt_KBI((e), (t))
#define epoch_exit_preempt(e, t) epoch_exit_preempt_KBI((e), (t))
#define epoch_enter(e) epoch_enter_KBI((e))
#define epoch_exit(e) epoch_exit_KBI((e))
#else
#include <sys/epoch_private.h>
#endif /* KLD_MODULE */
#endif /* _KERNEL */
#endif
#endif /* _KERNEL */
#endif /* _SYS_EPOCH_H_ */

View File

@ -1,211 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_EPOCH_PRIVATE_H_
#define _SYS_EPOCH_PRIVATE_H_
#ifndef _KERNEL
#error "no user serviceable parts"
#else
#include <ck_epoch.h>
#include <sys/kpilite.h>
#include <sys/mutex.h>
extern void epoch_adjust_prio(struct thread *td, u_char prio);
#ifndef _SYS_SYSTM_H_
extern void critical_exit_preempt(void);
#endif
#ifdef __amd64__
#define EPOCH_ALIGN CACHE_LINE_SIZE*2
#else
#define EPOCH_ALIGN CACHE_LINE_SIZE
#endif
/*
* Standalone (_sa) routines for thread state manipulation
*/
static __inline void
critical_enter_sa(void *tdarg)
{
struct thread_lite *td;
td = tdarg;
td->td_critnest++;
__compiler_membar();
}
static __inline void
critical_exit_sa(void *tdarg)
{
struct thread_lite *td;
td = tdarg;
MPASS(td->td_critnest > 0);
__compiler_membar();
td->td_critnest--;
__compiler_membar();
if (__predict_false(td->td_owepreempt != 0))
critical_exit_preempt();
}
typedef struct epoch_thread {
#ifdef EPOCH_TRACKER_DEBUG
uint64_t et_magic_pre;
#endif
TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */
struct thread *et_td; /* pointer to thread in section */
ck_epoch_section_t et_section; /* epoch section object */
#ifdef EPOCH_TRACKER_DEBUG
uint64_t et_magic_post;
#endif
} *epoch_thread_t;
TAILQ_HEAD (epoch_tdlist, epoch_thread);
typedef struct epoch_record {
ck_epoch_record_t er_read_record;
ck_epoch_record_t er_write_record;
volatile struct epoch_tdlist er_tdlist;
volatile uint32_t er_gen;
uint32_t er_cpuid;
} __aligned(EPOCH_ALIGN) *epoch_record_t;
struct epoch {
struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
epoch_record_t e_pcpu_record;
int e_idx;
int e_flags;
};
static epoch_record_t
epoch_currecord(epoch_t epoch)
{
return zpcpu_get_cpu(epoch->e_pcpu_record, curcpu);
}
#define INIT_CHECK(epoch) \
do { \
if (__predict_false((epoch) == NULL)) \
return; \
} while (0)
static __inline void
epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
{
struct epoch_record *er;
struct epoch_thread *etd;
struct thread_lite *td;
MPASS(cold || epoch != NULL);
INIT_CHECK(epoch);
etd = (void *)et;
MPASS(epoch->e_flags & EPOCH_PREEMPT);
#ifdef EPOCH_TRACKER_DEBUG
etd->et_magic_pre = EPOCH_MAGIC0;
etd->et_magic_post = EPOCH_MAGIC1;
#endif
td = (struct thread_lite *)curthread;
etd->et_td = (void*)td;
td->td_epochnest++;
critical_enter_sa(td);
sched_pin_lite(td);
td->td_pre_epoch_prio = td->td_priority;
er = epoch_currecord(epoch);
TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link);
ck_epoch_begin(&er->er_read_record, (ck_epoch_section_t *)&etd->et_section);
critical_exit_sa(td);
}
static __inline void
epoch_enter(epoch_t epoch)
{
struct thread_lite *td;
epoch_record_t er;
MPASS(cold || epoch != NULL);
INIT_CHECK(epoch);
td = (struct thread_lite *)curthread;
td->td_epochnest++;
critical_enter_sa(td);
er = epoch_currecord(epoch);
ck_epoch_begin(&er->er_read_record, NULL);
}
static __inline void
epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
{
struct epoch_record *er;
struct epoch_thread *etd;
struct thread_lite *td;
INIT_CHECK(epoch);
td = (struct thread_lite *)curthread;
critical_enter_sa(td);
sched_unpin_lite(td);
MPASS(td->td_epochnest);
td->td_epochnest--;
er = epoch_currecord(epoch);
MPASS(epoch->e_flags & EPOCH_PREEMPT);
etd = (void *)et;
MPASS(etd != NULL);
MPASS(etd->et_td == (struct thread *)td);
#ifdef EPOCH_TRACKER_DEBUG
MPASS(etd->et_magic_pre == EPOCH_MAGIC0);
MPASS(etd->et_magic_post == EPOCH_MAGIC1);
etd->et_magic_pre = 0;
etd->et_magic_post = 0;
#endif
etd->et_td = (void*)0xDEADBEEF;
ck_epoch_end(&er->er_read_record,
(ck_epoch_section_t *)&etd->et_section);
TAILQ_REMOVE(&er->er_tdlist, etd, et_link);
er->er_gen++;
if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio);
critical_exit_sa(td);
}
static __inline void
epoch_exit(epoch_t epoch)
{
struct thread_lite *td;
epoch_record_t er;
INIT_CHECK(epoch);
td = (struct thread_lite *)curthread;
MPASS(td->td_epochnest);
td->td_epochnest--;
er = epoch_currecord(epoch);
ck_epoch_end(&er->er_read_record, NULL);
critical_exit_sa(td);
}
#endif /* _KERNEL */
#endif /* _SYS_EPOCH_PRIVATE_H_ */