- Move p_cpulimit to struct proc from struct plimit and protect it with
sched_lock. This means that we no longer access p_limit in mi_switch() and the p_limit pointer can be protected by the proc lock. - Remove PRS_ZOMBIE check from CPU limit test in mi_switch(). PRS_ZOMBIE processes don't call mi_switch(), and even if they did there is no longer the danger of p_limit being NULL (which is what the original zombie check was added for). - When we bump the current processes soft CPU limit in ast(), just bump the private p_cpulimit instead of the shared rlimit. This fixes an XXX for some value of fix. There is still a (probably benign) bug in that this code doesn't check that the new soft limit exceeds the hard limit. Inspired by: bde (2)
This commit is contained in:
parent
0d7fc90488
commit
109070fe68
@ -424,8 +424,8 @@ proc0_init(void *dummy __unused)
|
||||
limit0.pl_rlimit[RLIMIT_RSS].rlim_max = i;
|
||||
limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
|
||||
limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
|
||||
limit0.p_cpulimit = RLIM_INFINITY;
|
||||
limit0.p_refcnt = 1;
|
||||
p->p_cpulimit = RLIM_INFINITY;
|
||||
|
||||
/* Allocate a prototype map so we have something to fork. */
|
||||
pmap_pinit0(vmspace_pmap(&vmspace0));
|
||||
|
@ -565,10 +565,9 @@ dosetrlimit(td, which, limp)
|
||||
switch (which) {
|
||||
|
||||
case RLIMIT_CPU:
|
||||
if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
|
||||
p->p_limit->p_cpulimit = RLIM_INFINITY;
|
||||
else
|
||||
p->p_limit->p_cpulimit = limp->rlim_cur;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
p->p_cpulimit = limp->rlim_cur;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
break;
|
||||
case RLIMIT_DATA:
|
||||
if (limp->rlim_cur > maxdsiz)
|
||||
|
@ -786,15 +786,9 @@ mi_switch(void)
|
||||
/*
|
||||
* Check if the process exceeds its cpu resource allocation. If
|
||||
* over max, arrange to kill the process in ast().
|
||||
*
|
||||
* XXX The checking for p_limit being NULL here is totally bogus,
|
||||
* but hides something easy to trip over, as a result of us switching
|
||||
* after the limit has been freed/set-to-NULL. A KASSERT() will be
|
||||
* appropriate once this is no longer a bug, to watch for regression.
|
||||
*/
|
||||
if (p->p_state != PRS_ZOMBIE && p->p_limit != NULL &&
|
||||
p->p_limit->p_cpulimit != RLIM_INFINITY &&
|
||||
p->p_runtime.sec > p->p_limit->p_cpulimit) {
|
||||
if (p->p_cpulimit != RLIM_INFINITY &&
|
||||
p->p_runtime.sec > p->p_cpulimit) {
|
||||
p->p_sflag |= PS_XCPU;
|
||||
ke->ke_flags |= KEF_ASTPENDING;
|
||||
}
|
||||
|
@ -241,9 +241,10 @@ ast(struct trapframe *framep)
|
||||
killproc(p, "exceeded maximum CPU limit");
|
||||
else {
|
||||
psignal(p, SIGXCPU);
|
||||
if (rlim->rlim_cur < rlim->rlim_max)
|
||||
/* XXX: we should make a private copy. */
|
||||
rlim->rlim_cur += 5;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (p->p_cpulimit < rlim->rlim_max)
|
||||
p->p_cpulimit += 5;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
@ -573,6 +573,7 @@ struct proc {
|
||||
struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */
|
||||
struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */
|
||||
struct pargs *p_args; /* (c) Process arguments. */
|
||||
rlim_t p_cpulimit; /* (j) Current CPU limit in seconds. */
|
||||
/* End area that is copied on creation. */
|
||||
#define p_endcopy p_xstat
|
||||
|
||||
|
@ -82,7 +82,6 @@ struct plimit {
|
||||
#define PL_SHAREMOD 0x01 /* modifications are shared */
|
||||
int p_lflags;
|
||||
int p_refcnt; /* number of references */
|
||||
rlim_t p_cpulimit; /* current cpu limit in sec */
|
||||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
Loading…
Reference in New Issue
Block a user