The current rusage code show peculiar problems:

- Unsafeness on ruadd() in thread_exit()
- Unatomicity of thread_exiit() in the exit1() operations

This patch addresses these problems allocating p_fd as part of the
process and modifying the way it is accessed.

A small chunk of this patch, resolves a race about p_state in kern_wait(),
since we have to be sure about the zombif-ing process.

Submitted by: jeff
Approved by: jeff (mentor)
This commit is contained in:
Attilio Rao 2007-06-09 18:56:11 +00:00
parent 65d32cd8fb
commit a140976eb4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170466
6 changed files with 26 additions and 48 deletions

View File

@ -1226,19 +1226,21 @@ svr4_sys_waitsys(td, uap)
nfound++;
PROC_SLOCK(p);
/*
* See if we have a zombie. If so, WNOWAIT should be set,
* as otherwise we should have called kern_wait() up above.
*/
if ((p->p_state == PRS_ZOMBIE) &&
((uap->options & (SVR4_WEXITED|SVR4_WTRAPPED)))) {
PROC_SUNLOCK(p);
KASSERT(uap->options & SVR4_WNOWAIT,
("WNOWAIT is clear"));
/* Found a zombie, so cache info in local variables. */
pid = p->p_pid;
status = p->p_xstat;
ru = *p->p_ru;
ru = p->p_ru;
calcru(p, &ru.ru_utime, &ru.ru_stime);
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
@ -1253,7 +1255,6 @@ svr4_sys_waitsys(td, uap)
* See if we have a stopped or continued process.
* XXX: This duplicates the same code in kern_wait().
*/
PROC_SLOCK(p);
if ((p->p_flag & P_STOPPED_SIG) &&
(p->p_suspcount == p->p_numthreads) &&
(p->p_flag & P_WAITED) == 0 &&
@ -1264,7 +1265,7 @@ svr4_sys_waitsys(td, uap)
sx_sunlock(&proctree_lock);
pid = p->p_pid;
status = W_STOPCODE(p->p_xstat);
ru = *p->p_ru;
ru = p->p_ru;
calcru(p, &ru.ru_utime, &ru.ru_stime);
PROC_UNLOCK(p);
@ -1285,7 +1286,7 @@ svr4_sys_waitsys(td, uap)
if (((uap->options & SVR4_WNOWAIT)) == 0)
p->p_flag &= ~P_CONTINUED;
pid = p->p_pid;
ru = *p->p_ru;
ru = p->p_ru;
status = SIGCONT;
calcru(p, &ru.ru_utime, &ru.ru_stime);
PROC_UNLOCK(p);

View File

@ -500,6 +500,7 @@ proc0_post(void *dummy __unused)
{
struct timespec ts;
struct proc *p;
struct rusage ru;
/*
* Now we can look at the time, having had a chance to verify the
@ -508,7 +509,11 @@ proc0_post(void *dummy __unused)
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
microuptime(&p->p_stats->p_start);
rufetch(p, &ru); /* Clears thread stats */
p->p_rux.rux_runtime = 0;
p->p_rux.rux_uticks = 0;
p->p_rux.rux_sticks = 0;
p->p_rux.rux_iticks = 0;
}
sx_sunlock(&allproc_lock);
PCPU_SET(switchtime, cpu_ticks());

View File

@ -116,7 +116,6 @@ exit1(struct thread *td, int rv)
struct ucred *tracecred;
#endif
struct plimit *plim;
struct rusage *ru;
int locked;
/*
@ -233,8 +232,6 @@ exit1(struct thread *td, int rv)
*/
EVENTHANDLER_INVOKE(process_exit, p);
MALLOC(ru, struct rusage *, sizeof(struct rusage),
M_ZOMBIE, M_WAITOK);
/*
* If parent is waiting for us to exit or exec,
* P_PPWAIT is set; we will wakeup the parent below.
@ -446,16 +443,6 @@ exit1(struct thread *td, int rv)
PROC_LOCK(p);
p->p_xstat = rv;
p->p_xthread = td;
/*
* All statistics have been aggregated into the final td_ru by
* thread_exit(). Copy these into the proc here where wait*()
* can find them.
* XXX We will miss any statistics gathered between here and
* thread_exit() except for those related to clock ticks.
*/
*ru = td->td_ru;
ru->ru_nvcsw++;
p->p_ru = ru;
/*
* Notify interested parties of our demise.
*/
@ -536,6 +523,11 @@ exit1(struct thread *td, int rv)
*/
knlist_destroy(&p->p_klist);
/*
* Save our children's rusage information in our exit rusage.
*/
ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
/*
* Make sure the scheduler takes this thread out of its tables etc.
* This will also release this thread's reference to the ucred.
@ -711,27 +703,15 @@ kern_wait(struct thread *td, pid_t pid, int *status, int options,
}
nfound++;
PROC_SLOCK(p);
if (p->p_state == PRS_ZOMBIE) {
/*
* It is possible that the last thread of this
* process is still running on another CPU
* in thread_exit() after having dropped the process
* lock via PROC_UNLOCK() but before it has completed
* cpu_throw(). In that case, the other thread must
* still hold the proc slock, so simply by acquiring
* proc slock once we will wait long enough for the
* thread to exit in that case.
* XXX This is questionable.
*/
PROC_SLOCK(p);
PROC_SUNLOCK(p);
td->td_retval[0] = p->p_pid;
if (status)
*status = p->p_xstat; /* convert to int */
if (rusage) {
*rusage = *p->p_ru;
*rusage = p->p_ru;
calcru(p, &rusage->ru_utime, &rusage->ru_stime);
}
@ -776,11 +756,9 @@ kern_wait(struct thread *td, pid_t pid, int *status, int options,
p->p_xstat = 0; /* XXX: why? */
PROC_UNLOCK(p);
PROC_LOCK(q);
ruadd(&q->p_stats->p_cru, &q->p_crux, p->p_ru,
ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru,
&p->p_rux);
PROC_UNLOCK(q);
FREE(p->p_ru, M_ZOMBIE);
p->p_ru = NULL;
/*
* Decrement the count of procs running with this uid.
@ -819,7 +797,6 @@ kern_wait(struct thread *td, pid_t pid, int *status, int options,
sx_xunlock(&allproc_lock);
return (0);
}
PROC_SLOCK(p);
if ((p->p_flag & P_STOPPED_SIG) &&
(p->p_suspcount == p->p_numthreads) &&
(p->p_flag & P_WAITED) == 0 &&

View File

@ -1039,19 +1039,16 @@ rufetch(struct proc *p, struct rusage *ru)
{
struct thread *td;
memset(ru, 0, sizeof(*ru));
PROC_SLOCK(p);
if (p->p_ru == NULL) {
KASSERT(p->p_numthreads > 0,
("rufetch: No threads or ru in proc %p", p));
*ru = p->p_ru;
if (p->p_numthreads > 0) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
ruxagg(&p->p_rux, td);
thread_unlock(td);
rucollect(ru, &td->td_ru);
}
} else
*ru = *p->p_ru;
}
PROC_SUNLOCK(p);
}

View File

@ -391,9 +391,9 @@ thread_exit(void)
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
PCPU_INC(cnt.v_swtch);
/* Add the child usage to our own when the final thread exits. */
if (p->p_numthreads == 1)
ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
/* Save our resource usage in our process. */
td->td_ru.ru_nvcsw++;
rucollect(&p->p_ru, &td->td_ru);
/*
* The last thread is left attached to the process
* So that the whole bundle gets recycled. Skip
@ -411,9 +411,7 @@ thread_exit(void)
thread_unlink(td);
#endif
thread_unlock(td);
/* Impart our resource usage on another thread */
td2 = FIRST_THREAD_IN_PROC(p);
rucollect(&td2->td_ru, &td->td_ru);
sched_exit_thread(td2, td);
/*
@ -462,7 +460,7 @@ thread_exit(void)
}
PROC_UNLOCK(p);
thread_lock(td);
/* Aggregate our tick statistics into our parents rux. */
/* Save our tick information with both the thread and proc locked */
ruxagg(&p->p_rux, td);
PROC_SUNLOCK(p);
td->td_state = TDS_INACTIVE;

View File

@ -571,7 +571,7 @@ struct proc {
struct mdproc p_md; /* Any machine-dependent fields. */
struct callout p_itcallout; /* (h + c) Interval timer callout. */
u_short p_acflag; /* (c) Accounting flags. */
struct rusage *p_ru; /* (a) Exit information. */
struct rusage p_ru; /* (a) Exit information. */
struct proc *p_peers; /* (r) */
struct proc *p_leader; /* (b) */
void *p_emuldata; /* (c) Emulator state data. */