Refactor sleepq_catch_signals().

- Extract suspension check into sig_ast_checksusp() helper.
- Extract signal check and calculation of the interruption errno into
  sig_ast_needsigchk() helper.
The helpers are moved to kern_sig.c which is the proper place for
signal-related code.

Improve control flow in sleepq_catch_signals(), to handle ret == 0
(can sleep) and ret != 0 (interrupted) only once, by separating
checking code into sleepq_check_ast_sq_locked(), which return value is
interpreted at single location.

Reviewed by:	markj
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
Differential revision:	https://reviews.freebsd.org/D26628
This commit is contained in:
Konstantin Belousov 2020-10-04 16:30:05 +00:00
parent 9f2d6263bb
commit 0c82fb267b
3 changed files with 151 additions and 99 deletions

View File

@ -3139,6 +3139,71 @@ postsig(int sig)
return (1);
}
int
sig_ast_checksusp(struct thread *td)
{
struct proc *p;
int ret;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
return (0);
ret = thread_suspend_check(1);
MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
return (ret);
}
int
sig_ast_needsigchk(struct thread *td)
{
struct proc *p;
struct sigacts *ps;
int ret, sig;
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
return (0);
ps = p->p_sigacts;
mtx_lock(&ps->ps_mtx);
sig = cursig(td);
if (sig == -1) {
mtx_unlock(&ps->ps_mtx);
KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
KASSERT(TD_SBDRY_INTR(td),
("lost TDF_SERESTART of TDF_SEINTR"));
KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
(TDF_SEINTR | TDF_SERESTART),
("both TDF_SEINTR and TDF_SERESTART"));
ret = TD_SBDRY_ERRNO(td);
} else if (sig != 0) {
ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
mtx_unlock(&ps->ps_mtx);
} else {
mtx_unlock(&ps->ps_mtx);
ret = 0;
}
/*
* Do not go into sleep if this thread was the ptrace(2)
* attach leader. cursig() consumed SIGSTOP from PT_ATTACH,
* but we usually act on the signal by interrupting sleep, and
* should do that here as well.
*/
if ((td->td_dbgflags & TDB_FSTP) != 0) {
if (ret == 0)
ret = EINTR;
td->td_dbgflags &= ~TDB_FSTP;
}
return (ret);
}
void
proc_wkilled(struct proc *p)
{

View File

@ -433,6 +433,68 @@ sleepq_sleepcnt(const void *wchan, int queue)
return (sq->sq_blockedcnt[queue]);
}
static int
sleepq_check_ast_sc_locked(struct thread *td, struct sleepqueue_chain *sc)
{
struct proc *p;
int ret;
mtx_assert(&sc->sc_lock, MA_OWNED);
ret = 0;
if ((td->td_pflags & TDP_WAKEUP) != 0) {
td->td_pflags &= ~TDP_WAKEUP;
ret = EINTR;
thread_lock(td);
return (0);
}
/*
* See if there are any pending signals or suspension requests for this
* thread. If not, we can switch immediately.
*/
thread_lock(td);
if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
return (0);
thread_unlock(td);
mtx_unlock_spin(&sc->sc_lock);
p = td->td_proc;
CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
(void *)td, (long)p->p_pid, td->td_name);
PROC_LOCK(p);
/*
* Check for suspension first. Checking for signals and then
* suspending could result in a missed signal, since a signal
* can be delivered while this thread is suspended.
*/
ret = sig_ast_checksusp(td);
if (ret != 0) {
PROC_UNLOCK(p);
mtx_lock_spin(&sc->sc_lock);
thread_lock(td);
return (ret);
}
ret = sig_ast_needsigchk(td);
/*
* Lock the per-process spinlock prior to dropping the
* PROC_LOCK to avoid a signal delivery race.
* PROC_LOCK, PROC_SLOCK, and thread_lock() are
* currently held in tdsendsignal().
*/
PROC_SLOCK(p);
mtx_lock_spin(&sc->sc_lock);
PROC_UNLOCK(p);
thread_lock(td);
PROC_SUNLOCK(p);
return (ret);
}
/*
* Marks the pending sleep of the current thread as interruptible and
* makes an initial check for pending signals before putting a thread
@ -442,116 +504,39 @@ sleepq_sleepcnt(const void *wchan, int queue)
static int
sleepq_catch_signals(const void *wchan, int pri)
{
struct thread *td;
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
struct thread *td;
struct proc *p;
struct sigacts *ps;
int sig, ret;
int ret;
ret = 0;
td = curthread;
p = curproc;
sc = SC_LOOKUP(wchan);
mtx_assert(&sc->sc_lock, MA_OWNED);
MPASS(wchan != NULL);
if ((td->td_pflags & TDP_WAKEUP) != 0) {
td->td_pflags &= ~TDP_WAKEUP;
ret = EINTR;
thread_lock(td);
goto out;
}
td = curthread;
/*
* See if there are any pending signals or suspension requests for this
* thread. If not, we can switch immediately.
*/
thread_lock(td);
if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
thread_unlock(td);
mtx_unlock_spin(&sc->sc_lock);
CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
(void *)td, (long)p->p_pid, td->td_name);
PROC_LOCK(p);
/*
* Check for suspension first. Checking for signals and then
* suspending could result in a missed signal, since a signal
* can be delivered while this thread is suspended.
*/
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
ret = thread_suspend_check(1);
MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
if (ret != 0) {
PROC_UNLOCK(p);
mtx_lock_spin(&sc->sc_lock);
thread_lock(td);
goto out;
}
}
if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
ps = p->p_sigacts;
mtx_lock(&ps->ps_mtx);
sig = cursig(td);
if (sig == -1) {
mtx_unlock(&ps->ps_mtx);
KASSERT((td->td_flags & TDF_SBDRY) != 0,
("lost TDF_SBDRY"));
KASSERT(TD_SBDRY_INTR(td),
("lost TDF_SERESTART of TDF_SEINTR"));
KASSERT((td->td_flags &
(TDF_SEINTR | TDF_SERESTART)) !=
(TDF_SEINTR | TDF_SERESTART),
("both TDF_SEINTR and TDF_SERESTART"));
ret = TD_SBDRY_ERRNO(td);
} else if (sig != 0) {
ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
EINTR : ERESTART;
mtx_unlock(&ps->ps_mtx);
} else {
mtx_unlock(&ps->ps_mtx);
}
ret = sleepq_check_ast_sc_locked(td, sc);
THREAD_LOCK_ASSERT(td, MA_OWNED);
mtx_assert(&sc->sc_lock, MA_OWNED);
/*
* Do not go into sleep if this thread was the
* ptrace(2) attach leader. cursig() consumed
* SIGSTOP from PT_ATTACH, but we usually act
* on the signal by interrupting sleep, and
* should do that here as well.
*/
if ((td->td_dbgflags & TDB_FSTP) != 0) {
if (ret == 0)
ret = EINTR;
td->td_dbgflags &= ~TDB_FSTP;
}
}
/*
* Lock the per-process spinlock prior to dropping the PROC_LOCK
* to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
* thread_lock() are currently held in tdsendsignal().
*/
PROC_SLOCK(p);
mtx_lock_spin(&sc->sc_lock);
PROC_UNLOCK(p);
thread_lock(td);
PROC_SUNLOCK(p);
}
if (ret == 0) {
/*
* No pending signals and no suspension requests found.
* Switch the thread off the cpu.
*/
sleepq_switch(wchan, pri);
return (0);
} else {
/*
* There were pending signals and this thread is still
* on the sleep queue, remove it from the sleep queue.
*/
if (TD_ON_SLEEPQ(td)) {
sq = sleepq_lookup(wchan);
sleepq_remove_thread(sq, td);
}
MPASS(td->td_lock != &sc->sc_lock);
mtx_unlock_spin(&sc->sc_lock);
thread_unlock(td);
}
out:
/*
* There were pending signals and this thread is still
* on the sleep queue, remove it from the sleep queue.
*/
if (TD_ON_SLEEPQ(td)) {
sq = sleepq_lookup(wchan);
sleepq_remove_thread(sq, td);
}
MPASS(td->td_lock != &sc->sc_lock);
mtx_unlock_spin(&sc->sc_lock);
thread_unlock(td);
return (ret);
}

View File

@ -399,6 +399,8 @@ void sigacts_copy(struct sigacts *dest, struct sigacts *src);
void sigacts_free(struct sigacts *ps);
struct sigacts *sigacts_hold(struct sigacts *ps);
int sigacts_shared(struct sigacts *ps);
int sig_ast_checksusp(struct thread *td);
int sig_ast_needsigchk(struct thread *td);
void sig_drop_caught(struct proc *p);
void sigexit(struct thread *td, int sig) __dead2;
int sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **);