Clone the RCU interface into a sleepable and a non-sleepable part
in the LinuxKPI. This allows synchronize RCU to be used inside a SRCU read section. No functional change intended. Bump the __FreeBSD_version to force recompilation of external kernel modules. PR: 242272 MFC after: 1 week Sponsored by: Mellanox Technologies
This commit is contained in:
parent
61d82b0794
commit
eae5868ce9
@ -35,6 +35,11 @@
|
||||
|
||||
#define LINUX_KFREE_RCU_OFFSET_MAX 4096 /* exclusive */
|
||||
|
||||
/* BSD specific defines */
|
||||
#define RCU_TYPE_REGULAR 0
|
||||
#define RCU_TYPE_SLEEPABLE 1
|
||||
#define RCU_TYPE_MAX 2
|
||||
|
||||
#define RCU_INITIALIZER(v) \
|
||||
((__typeof(*(v)) *)(v))
|
||||
|
||||
@ -43,27 +48,27 @@
|
||||
} while (0)
|
||||
|
||||
#define call_rcu(ptr, func) do { \
|
||||
linux_call_rcu(ptr, func); \
|
||||
linux_call_rcu(RCU_TYPE_REGULAR, ptr, func); \
|
||||
} while (0)
|
||||
|
||||
#define rcu_barrier(void) do { \
|
||||
linux_rcu_barrier(); \
|
||||
linux_rcu_barrier(RCU_TYPE_REGULAR); \
|
||||
} while (0)
|
||||
|
||||
#define rcu_read_lock(void) do { \
|
||||
linux_rcu_read_lock(); \
|
||||
linux_rcu_read_lock(RCU_TYPE_REGULAR); \
|
||||
} while (0)
|
||||
|
||||
#define rcu_read_unlock(void) do { \
|
||||
linux_rcu_read_unlock(); \
|
||||
linux_rcu_read_unlock(RCU_TYPE_REGULAR);\
|
||||
} while (0)
|
||||
|
||||
#define synchronize_rcu(void) do { \
|
||||
linux_synchronize_rcu(); \
|
||||
linux_synchronize_rcu(RCU_TYPE_REGULAR); \
|
||||
} while (0)
|
||||
|
||||
#define synchronize_rcu_expedited(void) do { \
|
||||
linux_synchronize_rcu(); \
|
||||
linux_synchronize_rcu(RCU_TYPE_REGULAR); \
|
||||
} while (0)
|
||||
|
||||
#define kfree_rcu(ptr, rcu_head) do { \
|
||||
@ -94,11 +99,11 @@
|
||||
|
||||
/* prototypes */
|
||||
|
||||
extern void linux_call_rcu(struct rcu_head *ptr, rcu_callback_t func);
|
||||
extern void linux_rcu_barrier(void);
|
||||
extern void linux_rcu_read_lock(void);
|
||||
extern void linux_rcu_read_unlock(void);
|
||||
extern void linux_synchronize_rcu(void);
|
||||
extern void linux_call_rcu(unsigned type, struct rcu_head *ptr, rcu_callback_t func);
|
||||
extern void linux_rcu_barrier(unsigned type);
|
||||
extern void linux_rcu_read_lock(unsigned type);
|
||||
extern void linux_rcu_read_unlock(unsigned type);
|
||||
extern void linux_synchronize_rcu(unsigned type);
|
||||
|
||||
/* Empty implementation for !DEBUG */
|
||||
#define init_rcu_head(...)
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*-
|
||||
* Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
|
||||
* Copyright (c) 2017 Hans Petter Selasky (hselasky@freebsd.org)
|
||||
* Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org)
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -90,9 +90,9 @@ CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
|
||||
*/
|
||||
CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
|
||||
|
||||
static ck_epoch_t linux_epoch;
|
||||
static struct linux_epoch_head linux_epoch_head;
|
||||
DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record);
|
||||
static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
|
||||
static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
|
||||
DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
|
||||
|
||||
static void linux_rcu_cleaner_func(void *, int);
|
||||
|
||||
@ -101,23 +101,27 @@ linux_rcu_runtime_init(void *arg __unused)
|
||||
{
|
||||
struct linux_epoch_head *head;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
ck_epoch_init(&linux_epoch);
|
||||
for (j = 0; j != RCU_TYPE_MAX; j++) {
|
||||
ck_epoch_init(&linux_epoch[j]);
|
||||
|
||||
head = &linux_epoch_head;
|
||||
head = &linux_epoch_head[j];
|
||||
|
||||
mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
|
||||
TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, NULL);
|
||||
STAILQ_INIT(&head->cb_head);
|
||||
mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
|
||||
TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
|
||||
STAILQ_INIT(&head->cb_head);
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
struct linux_epoch_record *record;
|
||||
CPU_FOREACH(i) {
|
||||
struct linux_epoch_record *record;
|
||||
|
||||
record = &DPCPU_ID_GET(i, linux_epoch_record);
|
||||
record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
|
||||
|
||||
record->cpuid = i;
|
||||
ck_epoch_register(&linux_epoch, &record->epoch_record, NULL);
|
||||
TAILQ_INIT(&record->ts_head);
|
||||
record->cpuid = i;
|
||||
ck_epoch_register(&linux_epoch[j],
|
||||
&record->epoch_record, NULL);
|
||||
TAILQ_INIT(&record->ts_head);
|
||||
}
|
||||
}
|
||||
}
|
||||
SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
|
||||
@ -126,24 +130,27 @@ static void
|
||||
linux_rcu_runtime_uninit(void *arg __unused)
|
||||
{
|
||||
struct linux_epoch_head *head;
|
||||
int j;
|
||||
|
||||
head = &linux_epoch_head;
|
||||
for (j = 0; j != RCU_TYPE_MAX; j++) {
|
||||
head = &linux_epoch_head[j];
|
||||
|
||||
/* destroy head lock */
|
||||
mtx_destroy(&head->lock);
|
||||
mtx_destroy(&head->lock);
|
||||
}
|
||||
}
|
||||
SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
|
||||
|
||||
static void
|
||||
linux_rcu_cleaner_func(void *context __unused, int pending __unused)
|
||||
linux_rcu_cleaner_func(void *context, int pending __unused)
|
||||
{
|
||||
struct linux_epoch_head *head;
|
||||
struct callback_head *rcu;
|
||||
STAILQ_HEAD(, callback_head) tmp_head;
|
||||
uintptr_t offset;
|
||||
|
||||
linux_set_current(curthread);
|
||||
|
||||
head = &linux_epoch_head;
|
||||
head = context;
|
||||
|
||||
/* move current callbacks into own queue */
|
||||
mtx_lock(&head->lock);
|
||||
@ -152,11 +159,10 @@ linux_rcu_cleaner_func(void *context __unused, int pending __unused)
|
||||
mtx_unlock(&head->lock);
|
||||
|
||||
/* synchronize */
|
||||
linux_synchronize_rcu();
|
||||
linux_synchronize_rcu(head - linux_epoch_head);
|
||||
|
||||
/* dispatch all callbacks, if any */
|
||||
while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
|
||||
uintptr_t offset;
|
||||
|
||||
STAILQ_REMOVE_HEAD(&tmp_head, entry);
|
||||
|
||||
@ -170,11 +176,13 @@ linux_rcu_cleaner_func(void *context __unused, int pending __unused)
|
||||
}
|
||||
|
||||
void
|
||||
linux_rcu_read_lock(void)
|
||||
linux_rcu_read_lock(unsigned type)
|
||||
{
|
||||
struct linux_epoch_record *record;
|
||||
struct task_struct *ts;
|
||||
|
||||
MPASS(type < RCU_TYPE_MAX);
|
||||
|
||||
if (RCU_SKIP())
|
||||
return;
|
||||
|
||||
@ -184,7 +192,7 @@ linux_rcu_read_lock(void)
|
||||
*/
|
||||
sched_pin();
|
||||
|
||||
record = &DPCPU_GET(linux_epoch_record);
|
||||
record = &DPCPU_GET(linux_epoch_record[type]);
|
||||
ts = current;
|
||||
|
||||
/*
|
||||
@ -200,15 +208,17 @@ linux_rcu_read_lock(void)
|
||||
}
|
||||
|
||||
void
|
||||
linux_rcu_read_unlock(void)
|
||||
linux_rcu_read_unlock(unsigned type)
|
||||
{
|
||||
struct linux_epoch_record *record;
|
||||
struct task_struct *ts;
|
||||
|
||||
MPASS(type < RCU_TYPE_MAX);
|
||||
|
||||
if (RCU_SKIP())
|
||||
return;
|
||||
|
||||
record = &DPCPU_GET(linux_epoch_record);
|
||||
record = &DPCPU_GET(linux_epoch_record[type]);
|
||||
ts = current;
|
||||
|
||||
/*
|
||||
@ -283,7 +293,7 @@ linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_re
|
||||
}
|
||||
|
||||
void
|
||||
linux_synchronize_rcu(void)
|
||||
linux_synchronize_rcu(unsigned type)
|
||||
{
|
||||
struct thread *td;
|
||||
int was_bound;
|
||||
@ -291,6 +301,8 @@ linux_synchronize_rcu(void)
|
||||
int old_pinned;
|
||||
u_char old_prio;
|
||||
|
||||
MPASS(type < RCU_TYPE_MAX);
|
||||
|
||||
if (RCU_SKIP())
|
||||
return;
|
||||
|
||||
@ -314,7 +326,7 @@ linux_synchronize_rcu(void)
|
||||
td->td_pinned = 0;
|
||||
sched_bind(td, old_cpu);
|
||||
|
||||
ck_epoch_synchronize_wait(&linux_epoch,
|
||||
ck_epoch_synchronize_wait(&linux_epoch[type],
|
||||
&linux_synchronize_rcu_cb, NULL);
|
||||
|
||||
/* restore CPU binding, if any */
|
||||
@ -337,23 +349,30 @@ linux_synchronize_rcu(void)
|
||||
}
|
||||
|
||||
void
|
||||
linux_rcu_barrier(void)
|
||||
linux_rcu_barrier(unsigned type)
|
||||
{
|
||||
struct linux_epoch_head *head;
|
||||
|
||||
linux_synchronize_rcu();
|
||||
MPASS(type < RCU_TYPE_MAX);
|
||||
|
||||
head = &linux_epoch_head;
|
||||
linux_synchronize_rcu(type);
|
||||
|
||||
head = &linux_epoch_head[type];
|
||||
|
||||
/* wait for callbacks to complete */
|
||||
taskqueue_drain(taskqueue_fast, &head->task);
|
||||
}
|
||||
|
||||
void
|
||||
linux_call_rcu(struct rcu_head *context, rcu_callback_t func)
|
||||
linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
|
||||
{
|
||||
struct callback_head *rcu = (struct callback_head *)context;
|
||||
struct linux_epoch_head *head = &linux_epoch_head;
|
||||
struct callback_head *rcu;
|
||||
struct linux_epoch_head *head;
|
||||
|
||||
MPASS(type < RCU_TYPE_MAX);
|
||||
|
||||
rcu = (struct callback_head *)context;
|
||||
head = &linux_epoch_head[type];
|
||||
|
||||
mtx_lock(&head->lock);
|
||||
rcu->func = func;
|
||||
@ -376,24 +395,24 @@ cleanup_srcu_struct(struct srcu_struct *srcu)
|
||||
int
|
||||
srcu_read_lock(struct srcu_struct *srcu)
|
||||
{
|
||||
linux_rcu_read_lock();
|
||||
linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
|
||||
{
|
||||
linux_rcu_read_unlock();
|
||||
linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
|
||||
}
|
||||
|
||||
void
|
||||
synchronize_srcu(struct srcu_struct *srcu)
|
||||
{
|
||||
linux_synchronize_rcu();
|
||||
linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
|
||||
}
|
||||
|
||||
void
|
||||
srcu_barrier(struct srcu_struct *srcu)
|
||||
{
|
||||
linux_rcu_barrier();
|
||||
linux_rcu_barrier(RCU_TYPE_SLEEPABLE);
|
||||
}
|
||||
|
@ -60,7 +60,7 @@
|
||||
* in the range 5 to 9.
|
||||
*/
|
||||
#undef __FreeBSD_version
|
||||
#define __FreeBSD_version 1300088 /* Master, propagated to newvers */
|
||||
#define __FreeBSD_version 1300089 /* Master, propagated to newvers */
|
||||
|
||||
/*
|
||||
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
||||
|
Loading…
x
Reference in New Issue
Block a user