Locking for the per-process resource limits structure.

- struct plimit includes a mutex to protect a reference count.  The plimit
  structure is treated similarly to struct ucred in that is is always copy
  on write, so having a reference to a structure is sufficient to read from
  it without needing a further lock.
- The proc lock protects the p_limit pointer and must be held while reading
  limits from a process to keep the limit structure from changing out from
  under you while reading from it.
- Various global limits that are ints are not protected by a lock since
  int writes are atomic on all the archs we support and thus a lock
  wouldn't buy us anything.
- All accesses to individual resource limits from a process are abstracted
  behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return
  either an rlimit, or the current or max individual limit of the specified
  resource from a process.
- dosetrlimit() was renamed to kern_setrlimit() to match existing style of
  other similar syscall helper functions.
- The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit()
  (it didn't used the stackgap when it should have) but uses lim_rlimit()
  and kern_setrlimit() instead.
- The svr4 compat no longer uses the stackgap for resource limits calls,
  but uses lim_rlimit() and kern_setrlimit() instead.
- The ibcs2 compat no longer uses the stackgap for resource limits.  It
  also no longer uses the stackgap for accessing sysctl's for the
  ibcs2_sysconf() syscall but uses kernel_sysctl() instead.  As a result,
  ibcs2_sysconf() no longer needs Giant.
- The p_rlimit macro no longer exists.

Submitted by:	mtm (mostly, I only did a few cleanups and catchups)
Tested on:	i386
Compiled on:	alpha, amd64
This commit is contained in:
John Baldwin 2004-02-04 21:52:57 +00:00
parent 890cefab5b
commit 91d5354a2c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=125454
42 changed files with 581 additions and 397 deletions

View File

@ -382,7 +382,7 @@ linux_setrlimit(td, uap)
if ((error =
copyin(uap->rlim, &rlim, sizeof (struct rlimit))))
return (error);
return dosetrlimit(td, which, &rlim);
return (kern_setrlimit(td, which, &rlim));
}
int
@ -390,7 +390,9 @@ linux_getrlimit(td, uap)
struct thread *td;
struct linux_getrlimit_args *uap;
{
struct rlimit rlim;
u_int which;
int error;
#ifdef DEBUG
if (ldebug(getrlimit))
@ -405,6 +407,9 @@ linux_getrlimit(td, uap)
if (which == -1)
return EINVAL;
return (copyout(&td->td_proc->p_rlimit[which],
uap->rlim, sizeof (struct rlimit)));
PROC_LOCK(td->td_proc);
lim_rlimit(td->td_proc, which, &rlim);
PROC_UNLOCK(td->td_proc);
error = copyout(&rlim, uap->rlim, sizeof (struct rlimit));
return (error);
}

View File

@ -390,23 +390,25 @@ osf1_getrlimit(td, uap)
struct thread *td;
struct osf1_getrlimit_args *uap;
{
struct __getrlimit_args /* {
syscallarg(u_int) which;
syscallarg(struct rlimit *) rlp;
} */ a;
struct rlimit bsd_rlim;
struct proc *p;
int which;
if (uap->which >= OSF1_RLIMIT_NLIMITS)
return (EINVAL);
if (uap->which <= OSF1_RLIMIT_LASTCOMMON)
a.which = uap->which;
which = uap->which;
else if (uap->which == OSF1_RLIMIT_NOFILE)
a.which = RLIMIT_NOFILE;
which = RLIMIT_NOFILE;
else
return (0);
a.rlp = (struct rlimit *)uap->rlp;
return getrlimit(td, &a);
p = td->td_proc;
PROC_LOCK(p);
lim_rlimit(p, which, &bsd_rlim);
PROC_UNLOCK(p);
return (copyout(&bsd_rlim, uap->rlp, sizeof(bsd_rlim)));
}
@ -415,23 +417,24 @@ osf1_setrlimit(td, uap)
struct thread *td;
struct osf1_setrlimit_args *uap;
{
struct __setrlimit_args /* {
syscallarg(u_int) which;
syscallarg(struct rlimit *) rlp;
} */ a;
struct rlimit bsd_rlim;
int error, which;
if (uap->which >= OSF1_RLIMIT_NLIMITS)
return (EINVAL);
if (uap->which <= OSF1_RLIMIT_LASTCOMMON)
a.which = uap->which;
which = uap->which;
else if (uap->which == OSF1_RLIMIT_NOFILE)
a.which = RLIMIT_NOFILE;
which = RLIMIT_NOFILE;
else
return (0);
a.rlp = (struct rlimit *)uap->rlp;
return setrlimit(td, &a);
error = copyin(uap->rlp, &bsd_rlim, sizeof(bsd_rlim));
if (error)
return (error);
return (kern_setrlimit(td, which, &bsd_rlim));
}

View File

@ -273,50 +273,33 @@ static void
ia32_fixlimits(struct image_params *imgp)
{
struct proc *p = imgp->proc;
struct plimit *oldlim, *newlim;
if (ia32_maxdsiz == 0 && ia32_maxssiz == 0 && ia32_maxvmem == 0)
return;
newlim = lim_alloc();
PROC_LOCK(p);
oldlim = p->p_limit;
lim_copy(newlim, oldlim);
if (ia32_maxdsiz != 0) {
if (p->p_rlimit[RLIMIT_DATA].rlim_cur > ia32_maxdsiz ||
p->p_rlimit[RLIMIT_DATA].rlim_max > ia32_maxdsiz) {
if (p->p_limit->p_refcnt > 1) {
p->p_limit->p_refcnt--;
p->p_limit = limcopy(p->p_limit);
}
if (p->p_rlimit[RLIMIT_DATA].rlim_cur > ia32_maxdsiz)
p->p_rlimit[RLIMIT_DATA].rlim_cur =
ia32_maxdsiz;
if (p->p_rlimit[RLIMIT_DATA].rlim_max > ia32_maxdsiz)
p->p_rlimit[RLIMIT_DATA].rlim_max =
ia32_maxdsiz;
}
if (newlim->pl_rlimit[RLIMIT_DATA].rlim_cur > ia32_maxdsiz)
newlim->pl_rlimit[RLIMIT_DATA].rlim_cur = ia32_maxdsiz;
if (newlim->pl_rlimit[RLIMIT_DATA].rlim_max > ia32_maxdsiz)
newlim->pl_rlimit[RLIMIT_DATA].rlim_max = ia32_maxdsiz;
}
if (ia32_maxssiz != 0) {
if (p->p_rlimit[RLIMIT_STACK].rlim_cur > ia32_maxssiz ||
p->p_rlimit[RLIMIT_STACK].rlim_max > ia32_maxssiz) {
if (p->p_limit->p_refcnt > 1) {
p->p_limit->p_refcnt--;
p->p_limit = limcopy(p->p_limit);
}
if (p->p_rlimit[RLIMIT_STACK].rlim_cur > ia32_maxssiz)
p->p_rlimit[RLIMIT_STACK].rlim_cur =
ia32_maxssiz;
if (p->p_rlimit[RLIMIT_STACK].rlim_max > ia32_maxssiz)
p->p_rlimit[RLIMIT_STACK].rlim_max =
ia32_maxssiz;
}
if (newlim->pl_rlimit[RLIMIT_STACK].rlim_cur > ia32_maxssiz)
newlim->pl_rlimit[RLIMIT_STACK].rlim_cur = ia32_maxssiz;
if (newlim->pl_rlimit[RLIMIT_STACK].rlim_max > ia32_maxssiz)
newlim->pl_rlimit[RLIMIT_STACK].rlim_max = ia32_maxssiz;
}
if (ia32_maxvmem != 0) {
if (p->p_rlimit[RLIMIT_VMEM].rlim_cur > ia32_maxvmem ||
p->p_rlimit[RLIMIT_VMEM].rlim_max > ia32_maxvmem) {
if (p->p_limit->p_refcnt > 1) {
p->p_limit->p_refcnt--;
p->p_limit = limcopy(p->p_limit);
}
if (p->p_rlimit[RLIMIT_VMEM].rlim_cur > ia32_maxvmem)
p->p_rlimit[RLIMIT_VMEM].rlim_cur =
ia32_maxvmem;
if (p->p_rlimit[RLIMIT_VMEM].rlim_max > ia32_maxvmem)
p->p_rlimit[RLIMIT_VMEM].rlim_max =
ia32_maxvmem;
}
if (newlim->pl_rlimit[RLIMIT_VMEM].rlim_cur > ia32_maxvmem)
newlim->pl_rlimit[RLIMIT_VMEM].rlim_cur = ia32_maxvmem;
if (newlim->pl_rlimit[RLIMIT_VMEM].rlim_max > ia32_maxvmem)
newlim->pl_rlimit[RLIMIT_VMEM].rlim_max = ia32_maxvmem;
}
p->p_limit = newlim;
PROC_UNLOCK(p);
lim_free(oldlim);
}

View File

@ -378,20 +378,19 @@ linux_uselib(struct thread *td, struct linux_uselib_args *args)
goto cleanup;
}
/* To protect td->td_proc->p_rlimit in the if condition. */
mtx_assert(&Giant, MA_OWNED);
/*
* text/data/bss must not exceed limits
* XXX - this is not complete. it should check current usage PLUS
* the resources needed by this library.
*/
PROC_LOCK(td->td_proc);
if (a_out->a_text > maxtsiz ||
a_out->a_data + bss_size >
td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur) {
a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) {
PROC_UNLOCK(td->td_proc);
error = ENOMEM;
goto cleanup;
}
PROC_UNLOCK(td->td_proc);
mp_fixme("Unlocked vflags access.");
/* prevent more writers */
@ -1111,7 +1110,7 @@ linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
return (dosetrlimit(td, which, &bsd_rlim));
return (kern_setrlimit(td, which, &bsd_rlim));
}
int
@ -1119,7 +1118,7 @@ linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
{
struct l_rlimit rlim;
struct proc *p = td->td_proc;
struct rlimit *bsd_rlp;
struct rlimit bsd_rlim;
u_int which;
#ifdef DEBUG
@ -1134,12 +1133,15 @@ linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
which = linux_to_bsd_resource[args->resource];
if (which == -1)
return (EINVAL);
bsd_rlp = &p->p_rlimit[which];
rlim.rlim_cur = (unsigned long)bsd_rlp->rlim_cur;
PROC_LOCK(p);
lim_rlimit(p, which, &bsd_rlim);
PROC_UNLOCK(p);
rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
if (rlim.rlim_cur == ULONG_MAX)
rlim.rlim_cur = LONG_MAX;
rlim.rlim_max = (unsigned long)bsd_rlp->rlim_max;
rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
if (rlim.rlim_max == ULONG_MAX)
rlim.rlim_max = LONG_MAX;
return (copyout(&rlim, args->rlim, sizeof(rlim)));
@ -1150,7 +1152,7 @@ linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
{
struct l_rlimit rlim;
struct proc *p = td->td_proc;
struct rlimit *bsd_rlp;
struct rlimit bsd_rlim;
u_int which;
#ifdef DEBUG
@ -1165,10 +1167,13 @@ linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
which = linux_to_bsd_resource[args->resource];
if (which == -1)
return (EINVAL);
bsd_rlp = &p->p_rlimit[which];
rlim.rlim_cur = (l_ulong)bsd_rlp->rlim_cur;
rlim.rlim_max = (l_ulong)bsd_rlp->rlim_max;
PROC_LOCK(p);
lim_rlimit(p, which, &bsd_rlim);
PROC_UNLOCK(p);
rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
return (copyout(&rlim, args->rlim, sizeof(rlim)));
}
#endif /*!__alpha__*/

View File

@ -104,14 +104,16 @@ exec_svr4_imgact(imgp)
/* text + data can't exceed file size */
if (a_out->a_data + a_out->a_text > imgp->attr->va_size)
return (EFAULT);
/* For p_rlimit below. */
mtx_assert(&Giant, MA_OWNED);
/*
* text/data/bss must not exceed limits
*/
PROC_LOCK(imgp->proc);
if (a_out->a_text > maxtsiz ||
a_out->a_data + bss_size > imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur)
a_out->a_data + bss_size > lim_cur(imgp->proc, RLIMIT_DATA)) {
PROC_UNLOCK(imgp->proc);
return (ENOMEM);
}
PROC_UNLOCK(imgp->proc);
VOP_UNLOCK(imgp->vp, 0, td);

View File

@ -66,10 +66,13 @@ svr4_sys_poll(td, uap)
int idx = 0, cerr;
u_long siz;
mtx_assert(&Giant, MA_OWNED);
if (uap->nfds > td->td_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur &&
uap->nfds > FD_SETSIZE)
return (EINVAL);
PROC_LOCK(td->td_proc);
if (uap->nfds > lim_cur(td->td_proc, RLIMIT_NOFILE) &&
uap->nfds > FD_SETSIZE) {
PROC_UNLOCK(td->td_proc);
return (EINVAL);
}
PROC_UNLOCK(td->td_proc);
pa.fds = uap->fds;
pa.nfds = uap->nfds;

View File

@ -820,15 +820,15 @@ svr4_sys_break(td, uap)
base = round_page((vm_offset_t) vm->vm_daddr);
ns = (vm_offset_t)uap->nsize;
new = round_page(ns);
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
if (new > base) {
if ((new - base) > (unsigned) td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur) {
PROC_LOCK(p);
if ((new - base) > (unsigned)lim_cur(p, RLIMIT_DATA)) {
PROC_UNLOCK(p);
return ENOMEM;
}
if (new >= VM_MAXUSER_ADDRESS) {
return (ENOMEM);
}
}
PROC_UNLOCK(p);
if (new >= VM_MAXUSER_ADDRESS)
return (ENOMEM);
} else if (new < base) {
/*
* This is simply an invalid value. If someone wants to
@ -843,8 +843,12 @@ svr4_sys_break(td, uap)
if (new > old) {
vm_size_t diff;
diff = new - old;
if (vm->vm_map.size + diff > p->p_rlimit[RLIMIT_VMEM].rlim_cur)
PROC_LOCK(p);
if (vm->vm_map.size + diff > lim_cur(p, RLIMIT_VMEM)) {
PROC_UNLOCK(p);
return(ENOMEM);
}
PROC_UNLOCK(p);
rv = vm_map_find(&vm->vm_map, NULL, 0, &old, diff, FALSE,
VM_PROT_ALL, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
@ -922,42 +926,33 @@ svr4_sys_ulimit(td, uap)
struct svr4_sys_ulimit_args *uap;
{
int *retval = td->td_retval;
int error;
switch (uap->cmd) {
case SVR4_GFILLIM:
/* For p_rlimit below. */
mtx_assert(&Giant, MA_OWNED);
*retval = td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur / 512;
PROC_LOCK(td->td_proc);
*retval = lim_cur(td->td_proc, RLIMIT_FSIZE) / 512;
PROC_UNLOCK(td->td_proc);
if (*retval == -1)
*retval = 0x7fffffff;
return 0;
case SVR4_SFILLIM:
{
int error;
struct __setrlimit_args srl;
struct rlimit krl;
caddr_t sg = stackgap_init();
struct rlimit *url = (struct rlimit *)
stackgap_alloc(&sg, sizeof *url);
krl.rlim_cur = uap->newlimit * 512;
mtx_assert(&Giant, MA_OWNED);
krl.rlim_max = td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_max;
PROC_LOCK(td->td_proc);
krl.rlim_max = lim_max(td->td_proc, RLIMIT_FSIZE);
PROC_UNLOCK(td->td_proc);
error = copyout(&krl, url, sizeof(*url));
error = kern_setrlimit(td, RLIMIT_FSIZE, &krl);
if (error)
return error;
srl.which = RLIMIT_FSIZE;
srl.rlp = url;
error = setrlimit(td, &srl);
if (error)
return error;
mtx_assert(&Giant, MA_OWNED);
*retval = td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur;
PROC_LOCK(td->td_proc);
*retval = lim_cur(td->td_proc, RLIMIT_FSIZE);
PROC_UNLOCK(td->td_proc);
if (*retval == -1)
*retval = 0x7fffffff;
return 0;
@ -968,12 +963,15 @@ svr4_sys_ulimit(td, uap)
struct vmspace *vm = td->td_proc->p_vmspace;
register_t r;
mtx_assert(&Giant, MA_OWNED);
r = td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur;
PROC_LOCK(td->td_proc);
r = lim_cur(td->td_proc, RLIMIT_DATA);
PROC_UNLOCK(td->td_proc);
if (r == -1)
r = 0x7fffffff;
mtx_lock(&Giant); /* XXX */
r += (long) vm->vm_daddr;
mtx_unlock(&Giant);
if (r < 0)
r = 0x7fffffff;
*retval = r;
@ -981,8 +979,9 @@ svr4_sys_ulimit(td, uap)
}
case SVR4_GDESLIM:
mtx_assert(&Giant, MA_OWNED);
*retval = td->td_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur;
PROC_LOCK(td->td_proc);
*retval = lim_cur(td->td_proc, RLIMIT_NOFILE);
PROC_UNLOCK(td->td_proc);
if (*retval == -1)
*retval = 0x7fffffff;
return 0;

View File

@ -136,9 +136,9 @@ svr4_sys_getrlimit(td, uap)
if (rl == -1)
return EINVAL;
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
blim = td->td_proc->p_rlimit[rl];
PROC_LOCK(td->td_proc);
lim_rlimit(td->td_proc, rl, &blim);
PROC_UNLOCK(td->td_proc);
/*
* Our infinity, is their maxfiles.
@ -177,20 +177,20 @@ svr4_sys_setrlimit(td, uap)
struct svr4_sys_setrlimit_args *uap;
{
int rl = svr4_to_native_rl(uap->which);
struct rlimit blim, *limp;
struct rlimit blim, curlim;
struct svr4_rlimit slim;
int error;
if (rl == -1)
return EINVAL;
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
limp = &td->td_proc->p_rlimit[rl];
if ((error = copyin(uap->rlp, &slim, sizeof(slim))) != 0)
return error;
PROC_LOCK(td->td_proc);
lim_rlimit(td->td_proc, rl, &curlim);
PROC_UNLOCK(td->td_proc);
/*
* if the limit is SVR4_RLIM_INFINITY, then we set it to our
* unlimited.
@ -205,20 +205,20 @@ svr4_sys_setrlimit(td, uap)
else if (OKLIMIT(slim.rlim_max))
blim.rlim_max = (rlim_t) slim.rlim_max;
else if (slim.rlim_max == SVR4_RLIM_SAVED_MAX)
blim.rlim_max = limp->rlim_max;
blim.rlim_max = curlim.rlim_max;
else if (slim.rlim_max == SVR4_RLIM_SAVED_CUR)
blim.rlim_max = limp->rlim_cur;
blim.rlim_max = curlim.rlim_cur;
if (slim.rlim_cur == SVR4_RLIM_INFINITY)
blim.rlim_cur = RLIM_INFINITY;
else if (OKLIMIT(slim.rlim_cur))
blim.rlim_cur = (rlim_t) slim.rlim_cur;
else if (slim.rlim_cur == SVR4_RLIM_SAVED_MAX)
blim.rlim_cur = limp->rlim_max;
blim.rlim_cur = curlim.rlim_max;
else if (slim.rlim_cur == SVR4_RLIM_SAVED_CUR)
blim.rlim_cur = limp->rlim_cur;
blim.rlim_cur = curlim.rlim_cur;
return dosetrlimit(td, rl, &blim);
return (kern_setrlimit(td, rl, &blim));
}
@ -234,9 +234,9 @@ svr4_sys_getrlimit64(td, uap)
if (rl == -1)
return EINVAL;
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
blim = td->td_proc->p_rlimit[rl];
PROC_LOCK(td->td_proc);
lim_rlimit(td->td_proc, rl, &blim);
PROC_UNLOCK(td->td_proc);
/*
* Our infinity, is their maxfiles.
@ -275,20 +275,20 @@ svr4_sys_setrlimit64(td, uap)
struct svr4_sys_setrlimit64_args *uap;
{
int rl = svr4_to_native_rl(uap->which);
struct rlimit blim, *limp;
struct rlimit blim, curlim;
struct svr4_rlimit64 slim;
int error;
if (rl == -1)
return EINVAL;
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
limp = &td->td_proc->p_rlimit[rl];
if ((error = copyin(uap->rlp, &slim, sizeof(slim))) != 0)
return error;
PROC_LOCK(td->td_proc);
lim_rlimit(td->td_proc, rl, &curlim);
PROC_UNLOCK(td->td_proc);
/*
* if the limit is SVR4_RLIM64_INFINITY, then we set it to our
* unlimited.
@ -303,18 +303,18 @@ svr4_sys_setrlimit64(td, uap)
else if (OKLIMIT64(slim.rlim_max))
blim.rlim_max = (rlim_t) slim.rlim_max;
else if (slim.rlim_max == SVR4_RLIM64_SAVED_MAX)
blim.rlim_max = limp->rlim_max;
blim.rlim_max = curlim.rlim_max;
else if (slim.rlim_max == SVR4_RLIM64_SAVED_CUR)
blim.rlim_max = limp->rlim_cur;
blim.rlim_max = curlim.rlim_cur;
if (slim.rlim_cur == SVR4_RLIM64_INFINITY)
blim.rlim_cur = RLIM_INFINITY;
else if (OKLIMIT64(slim.rlim_cur))
blim.rlim_cur = (rlim_t) slim.rlim_cur;
else if (slim.rlim_cur == SVR4_RLIM64_SAVED_MAX)
blim.rlim_cur = limp->rlim_max;
blim.rlim_cur = curlim.rlim_max;
else if (slim.rlim_cur == SVR4_RLIM64_SAVED_CUR)
blim.rlim_cur = limp->rlim_cur;
blim.rlim_cur = curlim.rlim_cur;
return dosetrlimit(td, rl, &blim);
return (kern_setrlimit(td, rl, &blim));
}

View File

@ -169,7 +169,9 @@ fdesc_statfs(mp, sbp, td)
* limit is ever reduced below the current number
* of open files... ]
*/
lim = td->td_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur;
PROC_LOCK(td->td_proc);
lim = lim_cur(td->td_proc, RLIMIT_NOFILE);
PROC_UNLOCK(td->td_proc);
fdp = td->td_proc->p_fd;
FILEDESC_LOCK(fdp);
last = min(fdp->fd_nfiles, lim);

View File

@ -646,13 +646,15 @@ msdosfs_write(ap)
/*
* If they've exceeded their filesize limit, tell them about it.
*/
if (td &&
((uoff_t)uio->uio_offset + uio->uio_resid >
td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur)) {
if (td != NULL) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGXFSZ);
if ((uoff_t)uio->uio_offset + uio->uio_resid >
lim_cur(td->td_proc, RLIMIT_FSIZE)) {
psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
if ((uoff_t)uio->uio_offset + uio->uio_resid > DOS_FILESIZE_MAX)

View File

@ -235,12 +235,15 @@ nwfs_writevnode(vp, uiop, cred, ioflag)
}
}
if (uiop->uio_resid == 0) return 0;
if (td && uiop->uio_offset + uiop->uio_resid
> td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
if (td != NULL) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGXFSZ);
if (uiop->uio_offset + uiop->uio_resid >
lim_cur(td->td_proc, RLIMIT_FSIZE)) {
psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
NCPVNDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);

View File

@ -46,11 +46,15 @@
#define _RLIMIT_IDENT
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/resource.h>
#include <sys/sbuf.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <fs/pseudofs/pseudofs.h>
#include <fs/procfs/procfs.h>
@ -59,8 +63,17 @@
int
procfs_doprocrlimit(PFS_FILL_ARGS)
{
struct plimit *limp;
int i;
/*
* Obtain a private reference to resource limits
*/
PROC_LOCK(p);
limp = lim_hold(p->p_limit);
PROC_UNLOCK(p);
for (i = 0; i < RLIM_NLIMITS; i++) {
/*
@ -77,24 +90,25 @@ procfs_doprocrlimit(PFS_FILL_ARGS)
* current limit
*/
if (p->p_rlimit[i].rlim_cur == RLIM_INFINITY) {
if (limp->pl_rlimit[i].rlim_cur == RLIM_INFINITY) {
sbuf_printf(sb, "-1 ");
} else {
sbuf_printf(sb, "%llu ",
(unsigned long long)p->p_rlimit[i].rlim_cur);
(unsigned long long)limp->pl_rlimit[i].rlim_cur);
}
/*
* maximum limit
*/
if (p->p_rlimit[i].rlim_max == RLIM_INFINITY) {
if (limp->pl_rlimit[i].rlim_max == RLIM_INFINITY) {
sbuf_printf(sb, "-1\n");
} else {
sbuf_printf(sb, "%llu\n",
(unsigned long long)p->p_rlimit[i].rlim_max);
(unsigned long long)limp->pl_rlimit[i].rlim_max);
}
}
lim_free(limp);
return (0);
}

View File

@ -277,11 +277,15 @@ smbfs_writevnode(struct vnode *vp, struct uio *uiop,
}
if (uiop->uio_resid == 0)
return 0;
if (p && uiop->uio_offset + uiop->uio_resid > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return EFBIG;
if (p != NULL) {
PROC_LOCK(p);
if (uiop->uio_offset + uiop->uio_resid >
lim_cur(p, RLIMIT_FSIZE)) {
psignal(p, SIGXFSZ);
PROC_UNLOCK(p);
return EFBIG;
}
PROC_UNLOCK(p);
}
smb_makescred(&scred, td, cred);
error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);

View File

@ -217,15 +217,15 @@ WRITE(ap)
* file servers have no limits, I don't think it matters.
*/
td = uio->uio_td;
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
if (vp->v_type == VREG && td &&
uio->uio_offset + uio->uio_resid >
td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
if (vp->v_type == VREG && td != NULL) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGXFSZ);
if (uio->uio_offset + uio->uio_resid >
lim_cur(td->td_proc, RLIMIT_FSIZE)) {
psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
resid = uio->uio_resid;

View File

@ -217,15 +217,15 @@ WRITE(ap)
* file servers have no limits, I don't think it matters.
*/
td = uio->uio_td;
/* For p_rlimit. */
mtx_assert(&Giant, MA_OWNED);
if (vp->v_type == VREG && td &&
uio->uio_offset + uio->uio_resid >
td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
if (vp->v_type == VREG && td != NULL) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGXFSZ);
if (uio->uio_offset + uio->uio_resid >
lim_cur(td->td_proc, RLIMIT_FSIZE)) {
psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
resid = uio->uio_resid;

View File

@ -93,42 +93,41 @@ ibcs2_ulimit(td, uap)
struct thread *td;
struct ibcs2_ulimit_args *uap;
{
#ifdef notyet
int error;
struct rlimit rl;
struct setrlimit_args {
int resource;
struct rlimit *rlp;
} sra;
#endif
struct proc *p;
int error;
#define IBCS2_GETFSIZE 1
#define IBCS2_SETFSIZE 2
#define IBCS2_GETPSIZE 3
#define IBCS2_GETDTABLESIZE 4
p = td->td_proc;
switch (uap->cmd) {
case IBCS2_GETFSIZE:
td->td_retval[0] = td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur;
if (td->td_retval[0] == -1) td->td_retval[0] = 0x7fffffff;
PROC_LOCK(p);
td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE);
PROC_UNLOCK(p);
if (td->td_retval[0] == -1)
td->td_retval[0] = 0x7fffffff;
return 0;
case IBCS2_SETFSIZE: /* XXX - fix this */
#ifdef notyet
case IBCS2_SETFSIZE:
PROC_LOCK(p);
rl.rlim_max = lim_max(p, RLIMIT_FSIZE);
PROC_UNLOCK(p);
rl.rlim_cur = uap->newlimit;
sra.resource = RLIMIT_FSIZE;
sra.rlp = &rl;
error = setrlimit(td, &sra);
if (!error)
td->td_retval[0] = td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur;
else
error = kern_setrlimit(td, RLIMIT_FSIZE, &rl);
if (!error) {
PROC_LOCK(p);
td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE);
PROC_UNLOCK(p);
} else {
DPRINTF(("failed "));
}
return error;
#else
td->td_retval[0] = uap->newlimit;
return 0;
#endif
case IBCS2_GETPSIZE:
mtx_assert(&Giant, MA_OWNED);
td->td_retval[0] = td->td_proc->p_rlimit[RLIMIT_RSS].rlim_cur; /* XXX */
PROC_LOCK(p);
td->td_retval[0] = lim_cur(p, RLIMIT_RSS); /* XXX */
PROC_UNLOCK(p);
return 0;
case IBCS2_GETDTABLESIZE:
uap->cmd = IBCS2_SC_OPEN_MAX;
@ -775,25 +774,19 @@ ibcs2_sysconf(td, uap)
struct ibcs2_sysconf_args *uap;
{
int mib[2], value, len, error;
struct sysctl_args sa;
struct __getrlimit_args ga;
struct proc *p;
p = td->td_proc;
switch(uap->name) {
case IBCS2_SC_ARG_MAX:
mib[1] = KERN_ARGMAX;
break;
case IBCS2_SC_CHILD_MAX:
{
caddr_t sg = stackgap_init();
ga.which = RLIMIT_NPROC;
ga.rlp = stackgap_alloc(&sg, sizeof(struct rlimit *));
if ((error = getrlimit(td, &ga)) != 0)
return error;
td->td_retval[0] = ga.rlp->rlim_cur;
PROC_LOCK(p);
td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NPROC);
PROC_UNLOCK(p);
return 0;
}
case IBCS2_SC_CLK_TCK:
td->td_retval[0] = hz;
@ -804,16 +797,10 @@ ibcs2_sysconf(td, uap)
break;
case IBCS2_SC_OPEN_MAX:
{
caddr_t sg = stackgap_init();
ga.which = RLIMIT_NOFILE;
ga.rlp = stackgap_alloc(&sg, sizeof(struct rlimit *));
if ((error = getrlimit(td, &ga)) != 0)
return error;
td->td_retval[0] = ga.rlp->rlim_cur;
PROC_LOCK(p);
td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NOFILE);
PROC_UNLOCK(p);
return 0;
}
case IBCS2_SC_JOB_CONTROL:
mib[1] = KERN_JOB_CONTROL;
@ -841,13 +828,8 @@ ibcs2_sysconf(td, uap)
mib[0] = CTL_KERN;
len = sizeof(value);
sa.name = mib;
sa.namelen = 2;
sa.old = &value;
sa.oldlenp = &len;
sa.new = NULL;
sa.newlen = 0;
if ((error = __sysctl(td, &sa)) != 0)
error = kernel_sysctl(td, mib, 2, &value, &len, NULL, 0, NULL);
if (error)
return error;
td->td_retval[0] = value;
return 0;

View File

@ -106,10 +106,13 @@ exec_linux_imgact(struct image_params *imgp)
/*
* text/data/bss must not exceed limits
*/
mtx_assert(&Giant, MA_OWNED);
PROC_LOCK(imgp->proc);
if (a_out->a_text > maxtsiz ||
a_out->a_data + bss_size > imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur)
a_out->a_data + bss_size > lim_cur(imgp->proc, RLIMIT_DATA)) {
PROC_UNLOCK(imgp->proc);
return (ENOMEM);
}
PROC_UNLOCK(imgp->proc);
VOP_UNLOCK(imgp->vp, 0, td);

View File

@ -500,9 +500,10 @@ linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args)
* mmap'ed region, but some apps do not check
* mmap's return value.
*/
mtx_assert(&Giant, MA_OWNED);
PROC_LOCK(p);
p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
p->p_rlimit[RLIMIT_STACK].rlim_cur;
lim_cur(p, RLIMIT_STACK);
PROC_UNLOCK(p);
}
/* This gives us our maximum stack size */

View File

@ -177,14 +177,16 @@ exec_aout_imgact(imgp)
/*
* text/data/bss must not exceed limits
*/
mtx_assert(&Giant, MA_OWNED);
PROC_LOCK(imgp->proc);
if (/* text can't exceed maximum text size */
a_out->a_text > maxtsiz ||
/* data + bss can't exceed rlimit */
a_out->a_data + bss_size >
imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur)
a_out->a_data + bss_size > lim_cur(imgp->proc, RLIMIT_DATA)) {
PROC_UNLOCK(imgp->proc);
return (ENOMEM);
}
PROC_UNLOCK(imgp->proc);
/* copy in arguments and/or environment from old process */
error = exec_extract_strings(imgp);

View File

@ -794,11 +794,11 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
* limits after loading the segments since we do
* not actually fault in all the segments pages.
*/
if (data_size >
imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
PROC_LOCK(imgp->proc);
if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
text_size > maxtsiz ||
total_size >
imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
PROC_UNLOCK(imgp->proc);
error = ENOMEM;
goto fail;
}
@ -815,7 +815,8 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
* its maximum allowed size.
*/
addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
imgp->proc->p_rlimit[RLIMIT_DATA].rlim_max);
lim_max(imgp->proc, RLIMIT_DATA));
PROC_UNLOCK(imgp->proc);
imgp->entry_addr = entry;

View File

@ -209,16 +209,18 @@ do_aout_hdr(struct imgact_gzip * gz)
/*
* text/data/bss must not exceed limits
*/
mtx_assert(&Giant, MA_OWNED);
PROC_LOCK(gz->ip->proc);
if ( /* text can't exceed maximum text size */
gz->a_out.a_text > maxtsiz ||
/* data + bss can't exceed rlimit */
gz->a_out.a_data + gz->bss_size >
gz->ip->proc->p_rlimit[RLIMIT_DATA].rlim_cur) {
lim_cur(gz->ip->proc, RLIMIT_DATA)) {
PROC_UNLOCK(gz->ip->proc);
gz->where = __LINE__;
return (ENOMEM);
}
PROC_UNLOCK(gz->ip->proc);
/* Find out how far we should go */
gz->file_end = gz->file_offset + gz->a_out.a_text + gz->a_out.a_data;

View File

@ -93,7 +93,6 @@ struct thread thread0;
struct kse kse0;
struct ksegrp ksegrp0;
static struct filedesc0 filedesc0;
static struct plimit limit0;
struct vmspace vmspace0;
struct proc *initproc;
@ -419,19 +418,18 @@ proc0_init(void *dummy __unused)
fdp->fd_fd.fd_map = fdp->fd_dmap;
/* Create the limits structures. */
p->p_limit = &limit0;
for (i = 0; i < sizeof(p->p_rlimit)/sizeof(p->p_rlimit[0]); i++)
limit0.pl_rlimit[i].rlim_cur =
limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur =
limit0.pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur =
limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
p->p_limit = lim_alloc();
for (i = 0; i < RLIM_NLIMITS; i++)
p->p_limit->pl_rlimit[i].rlim_cur =
p->p_limit->pl_rlimit[i].rlim_max = RLIM_INFINITY;
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
i = ptoa(cnt.v_free_count);
limit0.pl_rlimit[RLIMIT_RSS].rlim_max = i;
limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
limit0.p_refcnt = 1;
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = i;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
p->p_cpulimit = RLIM_INFINITY;
/* Allocate a prototype map so we have something to fork. */

View File

@ -228,6 +228,7 @@ acct_process(td)
int t, ret;
struct vnode *vp;
struct ucred *uc;
struct plimit *newlim, *oldlim;
mtx_lock(&acct_mtx);
@ -300,11 +301,14 @@ acct_process(td)
/*
* Eliminate any file size rlimit.
*/
if (p->p_limit->p_refcnt > 1) {
p->p_limit->p_refcnt--;
p->p_limit = limcopy(p->p_limit);
}
p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
newlim = lim_alloc();
PROC_LOCK(p);
oldlim = p->p_limit;
lim_copy(newlim, oldlim);
newlim->pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
p->p_limit = newlim;
PROC_UNLOCK(p);
lim_free(oldlim);
VOP_LEASE(vp, td, uc, LEASE_WRITE);
ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&acct, sizeof (acct),

View File

@ -224,10 +224,10 @@ getdtablesize(td, uap)
{
struct proc *p = td->td_proc;
mtx_lock(&Giant);
PROC_LOCK(p);
td->td_retval[0] =
min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
mtx_unlock(&Giant);
min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
PROC_UNLOCK(p);
return (0);
}
@ -353,11 +353,14 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
case F_DUPFD:
FILEDESC_UNLOCK(fdp);
newmin = arg;
if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
PROC_LOCK(p);
if (newmin >= lim_cur(p, RLIMIT_NOFILE) ||
newmin >= maxfilesperproc) {
PROC_UNLOCK(p);
error = EINVAL;
break;
}
PROC_UNLOCK(p);
error = do_dup(td, DUP_VARIABLE, fd, newmin, td->td_retval);
break;
@ -572,7 +575,9 @@ do_dup(td, type, old, new, retval)
*/
if (old < 0 || new < 0)
return (EBADF);
maxfd = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
PROC_LOCK(p);
maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
PROC_UNLOCK(p);
if (new >= maxfd)
return (EMFILE);
@ -1213,7 +1218,9 @@ fdalloc(struct thread *td, int minfd, int *result)
FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
maxfd = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
PROC_LOCK(p);
maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
PROC_UNLOCK(p);
/*
* Search the bitmap for a free descriptor. If none is found, try
@ -1261,7 +1268,9 @@ fdavail(td, n)
FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
PROC_LOCK(p);
lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
PROC_UNLOCK(p);
if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
return (1);
last = min(fdp->fd_nfiles, lim);

View File

@ -119,6 +119,7 @@ exit1(struct thread *td, int rv)
struct vnode *tracevp;
struct ucred *tracecred;
#endif
struct plimit *plim;
GIANT_REQUIRED;
@ -373,11 +374,11 @@ exit1(struct thread *td, int rv)
/*
* Release our limits structure.
*/
mtx_assert(&Giant, MA_OWNED);
if (--p->p_limit->p_refcnt == 0) {
FREE(p->p_limit, M_SUBPROC);
p->p_limit = NULL;
}
PROC_LOCK(p);
plim = p->p_limit;
p->p_limit = NULL;
PROC_UNLOCK(p);
lim_free(plim);
/*
* Release this thread's reference to the ucred. The actual proc

View File

@ -308,7 +308,7 @@ fork1(td, flags, pages, procp)
*/
PROC_LOCK(p1);
ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
(uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
(uid != 0) ? lim_cur(p1, RLIMIT_NPROC) : 0);
PROC_UNLOCK(p1);
if (!ok) {
error = EAGAIN;
@ -528,14 +528,13 @@ fork1(td, flags, pages, procp)
VREF(p2->p_textvp);
p2->p_fd = fd;
p2->p_fdtol = fdtol;
PROC_UNLOCK(p1);
PROC_UNLOCK(p2);
/*
* p_limit is copy-on-write, bump refcnt,
*/
p2->p_limit = p1->p_limit;
p2->p_limit->p_refcnt++;
p2->p_limit = lim_hold(p1->p_limit);
PROC_UNLOCK(p1);
PROC_UNLOCK(p2);
/*
* Setup linkage for kernel based threading

View File

@ -65,6 +65,7 @@ __FBSDID("$FreeBSD$");
static int donice(struct thread *td, struct proc *chgp, int n);
static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
static struct mtx uihashtbl_mtx;
@ -459,9 +460,7 @@ osetrlimit(td, uap)
return (error);
lim.rlim_cur = olim.rlim_cur;
lim.rlim_max = olim.rlim_max;
mtx_lock(&Giant);
error = dosetrlimit(td, uap->which, &lim);
mtx_unlock(&Giant);
error = kern_setrlimit(td, uap->which, &lim);
return (error);
}
@ -482,19 +481,17 @@ ogetrlimit(td, uap)
{
struct proc *p = td->td_proc;
struct orlimit olim;
struct rlimit rl;
int error;
if (uap->which >= RLIM_NLIMITS)
return (EINVAL);
mtx_lock(&Giant);
olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
if (olim.rlim_cur == -1)
olim.rlim_cur = 0x7fffffff;
olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
if (olim.rlim_max == -1)
olim.rlim_max = 0x7fffffff;
PROC_LOCK(p);
lim_rlimit(p, uap->which, &rl);
PROC_UNLOCK(p);
olim.rlim_cur = rl.rlim_cur == -1 ? 0x7fffffff : rl.rlim_cur;
olim.rlim_max = rl.rlim_max == -1 ? 0x7fffffff : rl.rlim_max;
error = copyout(&olim, uap->rlp, sizeof(olim));
mtx_unlock(&Giant);
return (error);
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
@ -519,27 +516,24 @@ setrlimit(td, uap)
if ((error = copyin(uap->rlp, &alim, sizeof (struct rlimit))))
return (error);
mtx_lock(&Giant);
error = dosetrlimit(td, uap->which, &alim);
mtx_unlock(&Giant);
error = kern_setrlimit(td, uap->which, &alim);
return (error);
}
int
dosetrlimit(td, which, limp)
kern_setrlimit(td, which, limp)
struct thread *td;
u_int which;
struct rlimit *limp;
{
struct proc *p = td->td_proc;
struct plimit *newlim, *oldlim;
register struct rlimit *alimp;
rlim_t oldssiz;
int error;
GIANT_REQUIRED;
if (which >= RLIM_NLIMITS)
return (EINVAL);
alimp = &p->p_rlimit[which];
/*
* Preserve historical bugs by treating negative limits as unsigned.
@ -549,17 +543,22 @@ dosetrlimit(td, which, limp)
if (limp->rlim_max < 0)
limp->rlim_max = RLIM_INFINITY;
oldssiz = 0;
newlim = lim_alloc();
PROC_LOCK(p);
oldlim = p->p_limit;
alimp = &oldlim->pl_rlimit[which];
if (limp->rlim_cur > alimp->rlim_max ||
limp->rlim_max > alimp->rlim_max)
if ((error = suser_cred(td->td_ucred, PRISON_ROOT)))
if ((error = suser_cred(td->td_ucred, PRISON_ROOT))) {
PROC_UNLOCK(p);
lim_free(newlim);
return (error);
}
if (limp->rlim_cur > limp->rlim_max)
limp->rlim_cur = limp->rlim_max;
if (p->p_limit->p_refcnt > 1) {
p->p_limit->p_refcnt--;
p->p_limit = limcopy(p->p_limit);
alimp = &p->p_rlimit[which];
}
lim_copy(newlim, oldlim);
alimp = &newlim->pl_rlimit[which];
switch (which) {
@ -580,32 +579,7 @@ dosetrlimit(td, which, limp)
limp->rlim_cur = maxssiz;
if (limp->rlim_max > maxssiz)
limp->rlim_max = maxssiz;
/*
* Stack is allocated to the max at exec time with only
* "rlim_cur" bytes accessible. If stack limit is going
* up make more accessible, if going down make inaccessible.
*/
if (limp->rlim_cur != alimp->rlim_cur) {
vm_offset_t addr;
vm_size_t size;
vm_prot_t prot;
if (limp->rlim_cur > alimp->rlim_cur) {
prot = p->p_sysent->sv_stackprot;
size = limp->rlim_cur - alimp->rlim_cur;
addr = p->p_sysent->sv_usrstack -
limp->rlim_cur;
} else {
prot = VM_PROT_NONE;
size = alimp->rlim_cur - limp->rlim_cur;
addr = p->p_sysent->sv_usrstack -
alimp->rlim_cur;
}
addr = trunc_page(addr);
size = round_page(size);
(void) vm_map_protect(&p->p_vmspace->vm_map,
addr, addr+size, prot, FALSE);
}
oldssiz = alimp->rlim_cur;
break;
case RLIMIT_NOFILE:
@ -627,6 +601,40 @@ dosetrlimit(td, which, limp)
break;
}
*alimp = *limp;
p->p_limit = newlim;
PROC_UNLOCK(p);
lim_free(oldlim);
if (which == RLIMIT_STACK) {
/*
* Stack is allocated to the max at exec time with only
* "rlim_cur" bytes accessible. If stack limit is going
* up make more accessible, if going down make inaccessible.
*/
if (limp->rlim_cur != oldssiz) {
vm_offset_t addr;
vm_size_t size;
vm_prot_t prot;
mtx_lock(&Giant);
if (limp->rlim_cur > oldssiz) {
prot = p->p_sysent->sv_stackprot;
size = limp->rlim_cur - oldssiz;
addr = p->p_sysent->sv_usrstack -
limp->rlim_cur;
} else {
prot = VM_PROT_NONE;
size = oldssiz - limp->rlim_cur;
addr = p->p_sysent->sv_usrstack -
oldssiz;
}
addr = trunc_page(addr);
size = round_page(size);
(void) vm_map_protect(&p->p_vmspace->vm_map,
addr, addr+size, prot, FALSE);
mtx_unlock(&Giant);
}
}
return (0);
}
@ -647,13 +655,14 @@ getrlimit(td, uap)
{
int error;
struct proc *p = td->td_proc;
struct rlimit rlim;
if (uap->which >= RLIM_NLIMITS)
return (EINVAL);
mtx_lock(&Giant);
error = copyout(&p->p_rlimit[uap->which], uap->rlp,
sizeof (struct rlimit));
mtx_unlock(&Giant);
PROC_LOCK(p);
lim_rlimit(p, uap->which, &rlim);
PROC_UNLOCK(p);
error = copyout(&rlim, uap->rlp, sizeof (struct rlimit));
return(error);
}
@ -814,21 +823,107 @@ ruadd(ru, ru2)
}
/*
* Make a copy of the plimit structure.
* We share these structures copy-on-write after fork,
* and copy when a limit is changed.
* Allocate a new resource limits structure, initialize it's
* reference count and mutex.
*/
struct plimit *
limcopy(lim)
struct plimit *lim;
lim_alloc(void)
{
register struct plimit *copy;
struct plimit *limp;
MALLOC(copy, struct plimit *, sizeof(struct plimit),
M_SUBPROC, M_WAITOK);
bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
copy->p_refcnt = 1;
return (copy);
limp = (struct plimit *)malloc(sizeof(struct plimit), M_PLIMIT,
M_WAITOK | M_ZERO);
limp->pl_refcnt = 1;
mtx_init(&limp->pl_mtx, "plimit lock", NULL, MTX_DEF);
return (limp);
}
/*
* NOTE: The caller must own the proc lock this limit is associated with.
*/
struct plimit *
lim_hold(limp)
struct plimit *limp;
{
LIM_LOCK(limp);
limp->pl_refcnt++;
LIM_UNLOCK(limp);
return (limp);
}
/*
* NOTE: The caller must own the proc lock this plimit belongs to.
*/
void
lim_free(limp)
struct plimit *limp;
{
LIM_LOCK(limp);
KASSERT(limp->pl_refcnt > 0, ("bad plimit refcnt: %d", limp->pl_refcnt));
if (--limp->pl_refcnt == 0) {
mtx_destroy(&limp->pl_mtx);
free((void *)limp, M_PLIMIT);
return;
}
LIM_UNLOCK(limp);
}
/*
* Make a copy of the plimit structure.
* We share these structures copy-on-write after fork.
*/
void
lim_copy(dst, src)
struct plimit *dst, *src;
{
KASSERT(dst->pl_refcnt == 1, ("lim_copy of shared limit"));
bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
}
/*
* Obtain the hard limit for a particular system resource.
* which - index into the rlimit array
* Note: callers must hold proc lock.
*/
rlim_t
lim_max(struct proc *p, int which)
{
struct rlimit rl;
lim_rlimit(p, which, &rl);
return (rl.rlim_max);
}
/*
* Obtain the current (soft) limit for a particular system resource.
* which - index into the rlimit array
* Note: callers must hold proc lock.
*/
rlim_t
lim_cur(struct proc *p, int which)
{
struct rlimit rl;
lim_rlimit(p, which, &rl);
return (rl.rlim_max);
}
/*
* Obtain the entire rlimit structure for a particular system limit.
* which - index into the rlimit array
* rlp - address into which the rlimit structure will be placed
*/
void
lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
{
PROC_LOCK_ASSERT(p, MA_OWNED);
KASSERT(which >= 0 && which < RLIM_NLIMITS,
("request for invalid resource limit"));
*rlp = p->p_limit->pl_rlimit[which];
}
/*

View File

@ -2554,12 +2554,10 @@ coredump(struct thread *td)
* a corefile is truncated instead of not being created,
* if it is larger than the limit.
*/
limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
if (limit == 0) {
PROC_UNLOCK(p);
return EFBIG;
}
limit = (off_t)lim_cur(p, RLIMIT_CORE);
PROC_UNLOCK(p);
if (limit == 0)
return (EFBIG);
restart:
name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);

View File

@ -143,7 +143,7 @@ ast(struct trapframe *framep)
struct proc *p;
struct kse *ke;
struct ksegrp *kg;
struct rlimit *rlim;
struct rlimit rlim;
u_int prticks, sticks;
int sflag;
int flags;
@ -223,13 +223,13 @@ ast(struct trapframe *framep)
}
if (sflag & PS_XCPU) {
PROC_LOCK(p);
rlim = &p->p_rlimit[RLIMIT_CPU];
lim_rlimit(p, RLIMIT_CPU, &rlim);
mtx_lock_spin(&sched_lock);
if (p->p_runtime.sec >= rlim->rlim_max) {
if (p->p_runtime.sec >= rlim.rlim_max) {
mtx_unlock_spin(&sched_lock);
killproc(p, "exceeded maximum CPU limit");
} else {
if (p->p_cpulimit < rlim->rlim_max)
if (p->p_cpulimit < rlim.rlim_max)
p->p_cpulimit += 5;
mtx_unlock_spin(&sched_lock);
psignal(p, SIGXCPU);

View File

@ -961,11 +961,14 @@ poll(td, uap)
* least enough for the current limits. We want to be reasonably
* safe, but not overly restrictive.
*/
if ((nfds > td->td_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur) &&
PROC_LOCK(td->td_proc);
if ((nfds > lim_cur(td->td_proc, RLIMIT_NOFILE)) &&
(nfds > FD_SETSIZE)) {
PROC_UNLOCK(td->td_proc);
error = EINVAL;
goto done2;
}
PROC_UNLOCK(td->td_proc);
ni = nfds * sizeof(struct pollfd);
if (ni > sizeof(smallbits))
bits = malloc(ni, M_TEMP, M_WAITOK);

View File

@ -411,6 +411,7 @@ sbreserve(sb, cc, so, td)
struct socket *so;
struct thread *td;
{
rlim_t sbsize_limit;
/*
* td will only be NULL when we're in an interrupt
@ -418,10 +419,15 @@ sbreserve(sb, cc, so, td)
*/
if (cc > sb_max_adj)
return (0);
if (td != NULL) {
PROC_LOCK(td->td_proc);
sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
PROC_UNLOCK(td->td_proc);
} else
sbsize_limit = RLIM_INFINITY;
if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
td ? td->td_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur : RLIM_INFINITY)) {
sbsize_limit))
return (0);
}
sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
if (sb->sb_lowat > sb->sb_hiwat)
sb->sb_lowat = sb->sb_hiwat;

View File

@ -411,6 +411,7 @@ sbreserve(sb, cc, so, td)
struct socket *so;
struct thread *td;
{
rlim_t sbsize_limit;
/*
* td will only be NULL when we're in an interrupt
@ -418,10 +419,15 @@ sbreserve(sb, cc, so, td)
*/
if (cc > sb_max_adj)
return (0);
if (td != NULL) {
PROC_LOCK(td->td_proc);
sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
PROC_UNLOCK(td->td_proc);
} else
sbsize_limit = RLIM_INFINITY;
if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
td ? td->td_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur : RLIM_INFINITY)) {
sbsize_limit))
return (0);
}
sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
if (sb->sb_lowat > sb->sb_hiwat)
sb->sb_lowat = sb->sb_hiwat;

View File

@ -809,14 +809,17 @@ nfs_write(struct vop_write_args *ap)
* Maybe this should be above the vnode op call, but so long as
* file servers have no limits, i don't think it matters
*/
if (p && uio->uio_offset + uio->uio_resid >
p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
if (p != NULL) {
PROC_LOCK(p);
psignal(p, SIGXFSZ);
if (uio->uio_offset + uio->uio_resid >
lim_cur(p, RLIMIT_FSIZE)) {
psignal(p, SIGXFSZ);
PROC_UNLOCK(p);
if (haverslock)
nfs_rsunlock(np, td);
return (EFBIG);
}
PROC_UNLOCK(p);
if (haverslock)
nfs_rsunlock(np, td);
return (EFBIG);
}
biosize = vp->v_mount->mnt_stat.f_iosize;

View File

@ -531,7 +531,7 @@ struct proc {
struct filedesc_to_leader *p_fdtol; /* (b) Ptr to tracking node */
/* Accumulated stats for all KSEs? */
struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
struct plimit *p_limit; /* (c*) Process limits. */
struct plimit *p_limit; /* (c) Process limits. */
struct vm_object *p_upages_obj; /* (a) Upages object. */
struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
/*
@ -616,7 +616,6 @@ struct proc {
struct p_sched *p_sched; /* (*) Scheduler-specific data. */
};
#define p_rlimit p_limit->pl_rlimit
#define p_session p_pgrp->pg_session
#define p_pgid p_pgrp->pg_id

View File

@ -142,7 +142,7 @@ struct loadavg {
extern struct loadavg averunnable;
extern long cp_time[CPUSTATES];
int dosetrlimit(struct thread *, u_int, struct rlimit *);
int kern_setrlimit(struct thread *, u_int, struct rlimit *);
#else
#include <sys/cdefs.h>

View File

@ -75,11 +75,22 @@ struct pstats {
*/
struct plimit {
struct rlimit pl_rlimit[RLIM_NLIMITS];
int p_refcnt; /* number of references */
int pl_refcnt; /* number of references */
struct mtx pl_mtx;
};
#ifdef _KERNEL
/*
* Lock order for operations involving the plimit lock:
* filedesc <important to avoid deadlocks in the descriptor code>
* proc
* plimit
*/
#define LIM_LOCK(lim) mtx_lock(&(lim)->pl_mtx)
#define LIM_UNLOCK(lim) mtx_unlock(&(lim)->pl_mtx)
#define LIM_LOCK_ASSERT(lim, f) mtx_assert(&(lim)->pl_mtx, (f))
/*
* Per uid resource consumption
*/
@ -107,8 +118,15 @@ int chgproccnt(struct uidinfo *uip, int diff, int max);
int chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to,
rlim_t max);
int fuswintr(void *base);
rlim_t lim_cur(struct proc *p, int which);
rlim_t lim_max(struct proc *p, int which);
void lim_rlimit(struct proc *p, int which, struct rlimit *rlp);
void lim_copy(struct plimit *dst, struct plimit *src);
void lim_free(struct plimit *limp);
struct plimit
*limcopy(struct plimit *lim);
*lim_alloc(void);
struct plimit
*lim_hold(struct plimit *limp);
void ruadd(struct rusage *ru, struct rusage *ru2);
int suswintr(void *base, int word);
struct uidinfo

View File

@ -634,13 +634,15 @@ ffs_write(ap)
* file servers have no limits, I don't think it matters.
*/
td = uio->uio_td;
if (vp->v_type == VREG && td &&
uio->uio_offset + uio->uio_resid >
td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
if (vp->v_type == VREG && td != NULL) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGXFSZ);
if (uio->uio_offset + uio->uio_resid >
lim_cur(td->td_proc, RLIMIT_FSIZE)) {
psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
resid = uio->uio_resid;

View File

@ -682,6 +682,7 @@ vm_init_limits(udata)
void *udata;
{
struct proc *p = udata;
struct plimit *limp;
int rss_limit;
/*
@ -691,14 +692,15 @@ vm_init_limits(udata)
* of memory - half of main memory helps to favor smaller processes,
* and reduces thrashing of the object cache.
*/
p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
limp = p->p_limit;
limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
/* limit the limit to no less than 2MB */
rss_limit = max(cnt.v_free_count, 512);
p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
void

View File

@ -2467,6 +2467,7 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_offset_t bot, top;
vm_size_t init_ssize;
int orient, rv;
rlim_t vmemlim;
/*
* The stack orientation is piggybacked with the cow argument.
@ -2483,6 +2484,10 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
PROC_LOCK(curthread->td_proc);
vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
PROC_UNLOCK(curthread->td_proc);
vm_map_lock(map);
/* If addr is already mapped, no go */
@ -2492,8 +2497,7 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
}
/* If we would blow our VMEM resource limit, no go */
if (map->size + init_ssize >
curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
if (map->size + init_ssize > vmemlim) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
@ -2566,11 +2570,17 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
vm_map_t map = &vm->vm_map;
vm_offset_t end;
size_t grow_amount, max_grow;
rlim_t stacklim, vmemlim;
int is_procstack, rv;
GIANT_REQUIRED;
Retry:
PROC_LOCK(p);
stacklim = lim_cur(p, RLIMIT_STACK);
vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
PROC_UNLOCK(p);
vm_map_lock_read(map);
/* If addr is already in the entry range, no need to grow.*/
@ -2658,8 +2668,7 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
* If this is the main process stack, see if we're over the stack
* limit.
*/
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
vm_map_unlock_read(map);
return (KERN_NO_SPACE);
}
@ -2668,14 +2677,12 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
grow_amount = roundup (grow_amount, sgrowsiz);
if (grow_amount > stack_entry->avail_ssize)
grow_amount = stack_entry->avail_ssize;
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
ctob(vm->vm_ssize);
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
grow_amount = stacklim - ctob(vm->vm_ssize);
}
/* If we would blow our VMEM resource limit, no go */
if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
if (map->size + grow_amount > vmemlim) {
vm_map_unlock_read(map);
return (KERN_NO_SPACE);
}

View File

@ -266,7 +266,7 @@ mmap(td, uap)
return (EINVAL);
if (addr + size < addr)
return (EINVAL);
}
} else {
/*
* XXX for non-fixed mappings where no hint is provided or
* the hint would fall in the potential heap space,
@ -275,13 +275,15 @@ mmap(td, uap)
* There should really be a pmap call to determine a reasonable
* location.
*/
else if (addr == 0 ||
(addr >= round_page((vm_offset_t)vms->vm_taddr) &&
addr < round_page((vm_offset_t)vms->vm_daddr +
td->td_proc->p_rlimit[RLIMIT_DATA].rlim_max)))
addr = round_page((vm_offset_t)vms->vm_daddr +
td->td_proc->p_rlimit[RLIMIT_DATA].rlim_max);
PROC_LOCK(td->td_proc);
if (addr == 0 ||
(addr >= round_page((vm_offset_t)vms->vm_taddr) &&
addr < round_page((vm_offset_t)vms->vm_daddr +
lim_max(td->td_proc, RLIMIT_DATA))))
addr = round_page((vm_offset_t)vms->vm_daddr +
lim_max(td->td_proc, RLIMIT_DATA));
PROC_UNLOCK(td->td_proc);
}
mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
do {
if (flags & MAP_ANON) {
@ -1002,9 +1004,13 @@ mlock(td, uap)
return (EAGAIN);
#if 0
PROC_LOCK(td->td_proc);
if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) >
td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
lim_cur(td->td_proc, RLIMIT_MEMLOCK)) {
PROC_UNLOCK(td->td_proc);
return (ENOMEM);
}
PROC_UNLOCK(td->td_proc);
#else
error = suser(td);
if (error)
@ -1044,9 +1050,13 @@ mlockall(td, uap)
* If wiring all pages in the process would cause it to exceed
* a hard resource limit, return ENOMEM.
*/
PROC_LOCK(td->td_proc);
if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur))
lim_cur(td->td_proc, RLIMIT_MEMLOCK))) {
PROC_UNLOCK(td->td_proc);
return (ENOMEM);
}
PROC_UNLOCK(td->td_proc);
#else
error = suser(td);
if (error)
@ -1176,10 +1186,13 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
objsize = size = round_page(size);
PROC_LOCK(td->td_proc);
if (td->td_proc->p_vmspace->vm_map.size + size >
td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
lim_cur(td->td_proc, RLIMIT_VMEM)) {
PROC_UNLOCK(td->td_proc);
return(ENOMEM);
}
PROC_UNLOCK(td->td_proc);
/*
* We currently can only deal with page aligned file offsets.

View File

@ -1486,6 +1486,7 @@ vm_daemon()
struct proc *p;
int breakout;
struct thread *td;
struct rlimit rsslim;
mtx_lock(&Giant);
while (TRUE) {
@ -1511,6 +1512,7 @@ vm_daemon()
PROC_UNLOCK(p);
continue;
}
lim_rlimit(p, RLIMIT_RSS, &rsslim);
/*
* if the process is in a non-running type state,
* don't touch it.
@ -1534,8 +1536,7 @@ vm_daemon()
* get a limit
*/
limit = OFF_TO_IDX(
qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
p->p_rlimit[RLIMIT_RSS].rlim_max));
qmin(rsslim.rlim_cur, rsslim.rlim_max));
/*
* let processes that are swapped out really be

View File

@ -77,10 +77,16 @@ obreak(td, uap)
{
struct vmspace *vm = td->td_proc->p_vmspace;
vm_offset_t new, old, base;
rlim_t datalim, vmemlim;
int rv;
int error = 0;
boolean_t do_map_wirefuture;
PROC_LOCK(td->td_proc);
datalim = lim_cur(td->td_proc, RLIMIT_DATA);
vmemlim = lim_cur(td->td_proc, RLIMIT_VMEM);
PROC_UNLOCK(td->td_proc);
do_map_wirefuture = FALSE;
new = round_page((vm_offset_t)uap->nsize);
vm_map_lock(&vm->vm_map);
@ -92,8 +98,7 @@ obreak(td, uap)
* Check the resource limit, but allow a process to reduce
* its usage, even if it remains over the limit.
*/
if (new - base > td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur &&
new > old) {
if (new - base > datalim && new > old) {
error = ENOMEM;
goto done;
}
@ -111,8 +116,7 @@ obreak(td, uap)
goto done;
}
if (new > old) {
if (vm->vm_map.size + (new - old) >
td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
if (vm->vm_map.size + (new - old) > vmemlim) {
error = ENOMEM;
goto done;
}