fork: avoid endless wait with PTRACE_FORK and RFSTOPPED.
An RFSTOPPED thread can't clean TDB_STOPATFORK, which is done in the fork_return() in its context, so parent is stuck forever. Triggered when trying to ptrace linux process. Instead of waiting for the new thread to clear TDB_STOPATFORK, tag it as traced and reparent to the debugger in do_fork(), and let it only notify the debugger when run. Submitted by: Yanko Yankulov <yanko.yankulov@gmail.com> Reviewed by: jhb MFC after: 1 week X-MFC-Note: keep p_dbgwait placeholder intact Differential revision: https://reviews.freebsd.org/D15857
This commit is contained in:
parent
ac4bc0c171
commit
6e22bbf66e
@ -721,18 +721,6 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
|
||||
* but before we wait for the debugger.
|
||||
*/
|
||||
_PHOLD(p2);
|
||||
if (p1->p_ptevents & PTRACE_FORK) {
|
||||
/*
|
||||
* Arrange for debugger to receive the fork event.
|
||||
*
|
||||
* We can report PL_FLAG_FORKED regardless of
|
||||
* P_FOLLOWFORK settings, but it does not make a sense
|
||||
* for runaway child.
|
||||
*/
|
||||
td->td_dbgflags |= TDB_FORK;
|
||||
td->td_dbg_forked = p2->p_pid;
|
||||
td2->td_dbgflags |= TDB_STOPATFORK;
|
||||
}
|
||||
if (fr->fr_flags & RFPPWAIT) {
|
||||
td->td_pflags |= TDP_RFPPWAIT;
|
||||
td->td_rfppwait_p = p2;
|
||||
@ -756,7 +744,42 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
|
||||
procdesc_finit(p2->p_procdesc, fp_procdesc);
|
||||
fdrop(fp_procdesc, td);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Speculative check for PTRACE_FORK. PTRACE_FORK is not
|
||||
* synced with forks in progress so it is OK if we miss it
|
||||
* if being set atm.
|
||||
*/
|
||||
if ((p1->p_ptevents & PTRACE_FORK) != 0) {
|
||||
sx_xlock(&proctree_lock);
|
||||
PROC_LOCK(p2);
|
||||
|
||||
/*
|
||||
* p1->p_ptevents & p1->p_pptr are protected by both
|
||||
* process and proctree locks for modifications,
|
||||
* so owning proctree_lock allows the race-free read.
|
||||
*/
|
||||
if ((p1->p_ptevents & PTRACE_FORK) != 0) {
|
||||
/*
|
||||
* Arrange for debugger to receive the fork event.
|
||||
*
|
||||
* We can report PL_FLAG_FORKED regardless of
|
||||
* P_FOLLOWFORK settings, but it does not make a sense
|
||||
* for runaway child.
|
||||
*/
|
||||
td->td_dbgflags |= TDB_FORK;
|
||||
td->td_dbg_forked = p2->p_pid;
|
||||
td2->td_dbgflags |= TDB_STOPATFORK;
|
||||
proc_set_traced(p2, true);
|
||||
CTR2(KTR_PTRACE,
|
||||
"do_fork: attaching to new child pid %d: oppid %d",
|
||||
p2->p_pid, p2->p_oppid);
|
||||
proc_reparent(p2, p1->p_pptr);
|
||||
}
|
||||
PROC_UNLOCK(p2);
|
||||
sx_xunlock(&proctree_lock);
|
||||
}
|
||||
|
||||
if ((fr->fr_flags & RFSTOPPED) == 0) {
|
||||
/*
|
||||
* If RFSTOPPED not requested, make child runnable and
|
||||
@ -773,11 +796,6 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
|
||||
}
|
||||
|
||||
PROC_LOCK(p2);
|
||||
/*
|
||||
* Wait until debugger is attached to child.
|
||||
*/
|
||||
while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
|
||||
cv_wait(&p2->p_dbgwait, &p2->p_mtx);
|
||||
_PRELE(p2);
|
||||
racct_proc_fork_done(p2);
|
||||
PROC_UNLOCK(p2);
|
||||
@ -1063,24 +1081,15 @@ fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
|
||||
void
|
||||
fork_return(struct thread *td, struct trapframe *frame)
|
||||
{
|
||||
struct proc *p, *dbg;
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
if (td->td_dbgflags & TDB_STOPATFORK) {
|
||||
sx_xlock(&proctree_lock);
|
||||
PROC_LOCK(p);
|
||||
if (p->p_pptr->p_ptevents & PTRACE_FORK) {
|
||||
if ((p->p_flag & P_TRACED) != 0) {
|
||||
/*
|
||||
* If debugger still wants auto-attach for the
|
||||
* parent's children, do it now.
|
||||
* Inform the debugger if one is still present.
|
||||
*/
|
||||
dbg = p->p_pptr->p_pptr;
|
||||
proc_set_traced(p, true);
|
||||
CTR2(KTR_PTRACE,
|
||||
"fork_return: attaching to new child pid %d: oppid %d",
|
||||
p->p_pid, p->p_oppid);
|
||||
proc_reparent(p, dbg);
|
||||
sx_xunlock(&proctree_lock);
|
||||
td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP;
|
||||
ptracestop(td, SIGSTOP, NULL);
|
||||
td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX);
|
||||
@ -1088,9 +1097,7 @@ fork_return(struct thread *td, struct trapframe *frame)
|
||||
/*
|
||||
* ... otherwise clear the request.
|
||||
*/
|
||||
sx_xunlock(&proctree_lock);
|
||||
td->td_dbgflags &= ~TDB_STOPATFORK;
|
||||
cv_broadcast(&p->p_dbgwait);
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
} else if (p->p_flag & P_TRACED || td->td_dbgflags & TDB_BORN) {
|
||||
|
@ -266,7 +266,6 @@ proc_init(void *mem, int size, int flags)
|
||||
mtx_init(&p->p_itimmtx, "pitiml", NULL, MTX_SPIN | MTX_NEW);
|
||||
mtx_init(&p->p_profmtx, "pprofl", NULL, MTX_SPIN | MTX_NEW);
|
||||
cv_init(&p->p_pwait, "ppwait");
|
||||
cv_init(&p->p_dbgwait, "dbgwait");
|
||||
TAILQ_INIT(&p->p_threads); /* all threads in proc */
|
||||
EVENTHANDLER_DIRECT_INVOKE(process_init, p);
|
||||
p->p_stats = pstats_alloc();
|
||||
|
@ -2581,7 +2581,6 @@ ptracestop(struct thread *td, int sig, ksiginfo_t *si)
|
||||
}
|
||||
if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
|
||||
td->td_dbgflags &= ~TDB_STOPATFORK;
|
||||
cv_broadcast(&p->p_dbgwait);
|
||||
}
|
||||
stopme:
|
||||
thread_suspend_switch(td, p);
|
||||
|
@ -685,8 +685,6 @@ struct proc {
|
||||
LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/
|
||||
struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
|
||||
struct cv p_pwait; /* (*) wait cv for exit/exec. */
|
||||
struct cv p_dbgwait; /* (*) wait cv for debugger attach
|
||||
after fork. */
|
||||
uint64_t p_prev_runtime; /* (c) Resource usage accounting. */
|
||||
struct racct *p_racct; /* (b) Resource accounting. */
|
||||
int p_throttled; /* (c) Flag for racct pcpu throttling */
|
||||
|
Loading…
x
Reference in New Issue
Block a user