Move syscall_thread_{enter,exit}() into the slow path. This is only

needed for syscalls from unloadable modules.

Reviewed by:	kib
MFC after:	2 weeks
Sponsored by:	EPSRC
Differential Revision:	https://reviews.freebsd.org/D26988
This commit is contained in:
Edward Tomasz Napierala 2020-11-08 15:54:59 +00:00
parent 36d6566e59
commit a1bd83fede
3 changed files with 26 additions and 30 deletions

View File

@ -80,10 +80,13 @@ syscall_thread_drain(struct sysent *se)
}
int
_syscall_thread_enter(struct thread *td, struct sysent *se)
syscall_thread_enter(struct thread *td, struct sysent *se)
{
u_int32_t cnt, oldcnt;
KASSERT((se->sy_thrcnt & SY_THR_STATIC) == 0,
("%s: not a static syscall", __func__));
do {
oldcnt = se->sy_thrcnt;
if ((oldcnt & (SY_THR_DRAINING | SY_THR_ABSENT)) != 0)
@ -94,10 +97,13 @@ _syscall_thread_enter(struct thread *td, struct sysent *se)
}
void
_syscall_thread_exit(struct thread *td, struct sysent *se)
syscall_thread_exit(struct thread *td, struct sysent *se)
{
u_int32_t cnt, oldcnt;
KASSERT((se->sy_thrcnt & SY_THR_STATIC) == 0,
("%s: not a static syscall", __func__));
do {
oldcnt = se->sy_thrcnt;
cnt = oldcnt - SY_THR_INCR;

View File

@ -61,6 +61,7 @@ syscallenter(struct thread *td)
struct syscall_args *sa;
struct sysent *se;
int error, traced;
bool sy_thr_static;
VM_CNT_INC(v_syscall);
p = td->td_proc;
@ -128,12 +129,6 @@ syscallenter(struct thread *td)
}
#endif
error = syscall_thread_enter(td, se);
if (error != 0) {
td->td_errno = error;
goto retval;
}
/*
* Fetch fast sigblock value at the time of syscall entry to
* handle sleepqueue primitives which might call cursig().
@ -145,8 +140,19 @@ syscallenter(struct thread *td)
KASSERT((td->td_pflags & TDP_NERRNO) == 0,
("%s: TDP_NERRNO set", __func__));
sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0;
if (__predict_false(SYSTRACE_ENABLED() ||
AUDIT_SYSCALL_ENTER(sa->code, td))) {
AUDIT_SYSCALL_ENTER(sa->code, td) ||
!sy_thr_static)) {
if (!sy_thr_static) {
error = syscall_thread_enter(td, se);
if (error != 0) {
td->td_errno = error;
goto retval;
}
}
#ifdef KDTRACE_HOOKS
/* Give the syscall:::entry DTrace probe a chance to fire. */
if (__predict_false(se->sy_entry != 0))
@ -176,6 +182,9 @@ syscallenter(struct thread *td)
(*systrace_probe_func)(sa, SYSTRACE_RETURN,
error ? -1 : td->td_retval[0]);
#endif
if (!sy_thr_static)
syscall_thread_exit(td, se);
} else {
error = (se->sy_call)(td, sa->args);
/* Save the latest error return value. */
@ -184,7 +193,6 @@ syscallenter(struct thread *td)
else
td->td_errno = error;
}
syscall_thread_exit(td, se);
retval:
KTR_STOP4(KTR_SYSC, "syscall", syscallname(p, sa->code),

View File

@ -294,26 +294,8 @@ struct nosys_args;
int lkmnosys(struct thread *, struct nosys_args *);
int lkmressys(struct thread *, struct nosys_args *);
int _syscall_thread_enter(struct thread *td, struct sysent *se);
void _syscall_thread_exit(struct thread *td, struct sysent *se);
static inline int
syscall_thread_enter(struct thread *td, struct sysent *se)
{
if (__predict_true((se->sy_thrcnt & SY_THR_STATIC) != 0))
return (0);
return (_syscall_thread_enter(td, se));
}
static inline void
syscall_thread_exit(struct thread *td, struct sysent *se)
{
if (__predict_true((se->sy_thrcnt & SY_THR_STATIC) != 0))
return;
_syscall_thread_exit(td, se);
}
int syscall_thread_enter(struct thread *td, struct sysent *se);
void syscall_thread_exit(struct thread *td, struct sysent *se);
int shared_page_alloc(int size, int align);
int shared_page_fill(int size, int align, const void *data);