1994-05-24 10:09:53 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1982, 1986, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
1996-12-22 23:17:09 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/systm.h>
|
1995-11-12 06:43:28 +00:00
|
|
|
#include <sys/sysproto.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
2000-09-05 22:11:13 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-12-01 06:59:18 +00:00
|
|
|
#include <sys/mutex.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
2005-09-27 18:07:05 +00:00
|
|
|
#include <sys/refcount.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2002-10-12 05:32:24 +00:00
|
|
|
#include <sys/sched.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/sx.h>
|
2004-10-05 18:51:11 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
2002-09-21 22:07:17 +00:00
|
|
|
#include <sys/sysent.h>
|
1999-11-29 11:29:04 +00:00
|
|
|
#include <sys/time.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1994-10-10 01:00:49 +00:00
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
|
2000-09-05 22:11:13 +00:00
|
|
|
static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
|
|
|
|
#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
|
2000-11-26 12:08:17 +00:00
|
|
|
static struct mtx uihashtbl_mtx;
|
2000-09-05 22:11:13 +00:00
|
|
|
static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
|
|
|
|
static u_long uihash; /* size of hash table - 1 */
|
|
|
|
|
2006-02-21 21:47:46 +00:00
|
|
|
static void calcru1(struct proc *p, struct rusage_ext *ruxp,
|
|
|
|
struct timeval *up, struct timeval *sp);
|
2004-09-24 00:38:15 +00:00
|
|
|
static int donice(struct thread *td, struct proc *chgp, int n);
|
|
|
|
static struct uidinfo *uilookup(uid_t uid);
|
2000-09-05 22:11:13 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Resource controls and accounting.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getpriority_args {
|
|
|
|
int which;
|
|
|
|
int who;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getpriority(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct getpriority_args *uap;
|
|
|
|
{
|
2004-02-05 20:53:25 +00:00
|
|
|
struct proc *p;
|
2004-09-24 00:38:15 +00:00
|
|
|
struct pgrp *pg;
|
2004-02-05 20:53:25 +00:00
|
|
|
int error, low;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
error = 0;
|
|
|
|
low = PRIO_MAX + 1;
|
2001-09-01 19:04:37 +00:00
|
|
|
switch (uap->which) {
|
2004-02-05 20:53:25 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case PRIO_PROCESS:
|
2003-03-13 00:54:53 +00:00
|
|
|
if (uap->who == 0)
|
2004-06-16 00:26:31 +00:00
|
|
|
low = td->td_proc->p_nice;
|
2003-03-13 00:54:53 +00:00
|
|
|
else {
|
1994-05-24 10:09:53 +00:00
|
|
|
p = pfind(uap->who);
|
2001-04-24 00:51:53 +00:00
|
|
|
if (p == NULL)
|
|
|
|
break;
|
2004-09-24 00:38:15 +00:00
|
|
|
if (p_cansee(td, p) == 0)
|
2004-06-16 00:26:31 +00:00
|
|
|
low = p->p_nice;
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
case PRIO_PGRP:
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (uap->who == 0) {
|
2002-04-13 23:28:23 +00:00
|
|
|
pg = td->td_proc->p_pgrp;
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pg);
|
|
|
|
} else {
|
|
|
|
pg = pgfind(uap->who);
|
|
|
|
if (pg == NULL) {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p);
|
2002-09-15 23:52:25 +00:00
|
|
|
if (!p_cansee(td, p)) {
|
2004-06-16 00:26:31 +00:00
|
|
|
if (p->p_nice < low)
|
|
|
|
low = p->p_nice;
|
2002-09-15 23:52:25 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pg);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRIO_USER:
|
|
|
|
if (uap->who == 0)
|
2002-04-13 23:28:23 +00:00
|
|
|
uap->who = td->td_ucred->cr_uid;
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2007-01-17 14:58:53 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
Close race conditions between fork() and [sg]etpriority()'s
PRIO_USER case, possibly also other places that deferences
p_ucred.
In the past, we insert a new process into the allproc list right
after PID allocation, and release the allproc_lock sx. Because
most content in new proc's structure is not yet initialized,
this could lead to undefined result if we do not handle PRS_NEW
with care.
The problem with PRS_NEW state is that it does not provide fine
grained information about how much initialization is done for a
new process. By defination, after PRIO_USER setpriority(), all
processes that belongs to given user should have their nice value
set to the specified value. Therefore, if p_{start,end}copy
section was done for a PRS_NEW process, we can not safely ignore
it because p_nice is in this area. On the other hand, we should
be careful on PRS_NEW processes because we do not allow non-root
users to lower their nice values, and without a successful copy
of the copy section, we can get stale values that is inherted
from the uninitialized area of the process structure.
This commit tries to close the race condition by grabbing proc
mutex *before* we release allproc_lock xlock, and do copy as
well as zero immediately after the allproc_lock xunlock. This
guarantees that the new process would have its p_copy and p_zero
sections, as well as user credential informaion initialized. In
getpriority() case, instead of grabbing PROC_LOCK for a PRS_NEW
process, we just skip the process in question, because it does
not affect the final result of the call, as the p_nice value
would be copied from its parent, and we will see it during
allproc traverse.
Other potential solutions are still under evaluation.
Discussed with: davidxu, jhb, rwatson
PR: kern/108071
MFC after: 2 weeks
2007-02-26 03:38:09 +00:00
|
|
|
/* Do not bother to check PRS_NEW processes */
|
|
|
|
if (p->p_state == PRS_NEW)
|
|
|
|
continue;
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_LOCK(p);
|
2002-05-19 00:14:50 +00:00
|
|
|
if (!p_cansee(td, p) &&
|
2002-09-15 23:52:25 +00:00
|
|
|
p->p_ucred->cr_uid == uap->who) {
|
2004-06-16 00:26:31 +00:00
|
|
|
if (p->p_nice < low)
|
|
|
|
low = p->p_nice;
|
2002-09-15 23:52:25 +00:00
|
|
|
}
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-01 19:04:37 +00:00
|
|
|
if (low == PRIO_MAX + 1 && error == 0)
|
|
|
|
error = ESRCH;
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = low;
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct setpriority_args {
|
|
|
|
int which;
|
|
|
|
int who;
|
|
|
|
int prio;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
setpriority(td, uap)
|
|
|
|
struct thread *td;
|
2004-09-24 00:38:15 +00:00
|
|
|
struct setpriority_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-09-24 00:38:15 +00:00
|
|
|
struct proc *curp, *p;
|
|
|
|
struct pgrp *pg;
|
1994-05-24 10:09:53 +00:00
|
|
|
int found = 0, error = 0;
|
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
curp = td->td_proc;
|
2001-09-01 19:04:37 +00:00
|
|
|
switch (uap->which) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case PRIO_PROCESS:
|
2002-04-13 23:28:23 +00:00
|
|
|
if (uap->who == 0) {
|
|
|
|
PROC_LOCK(curp);
|
|
|
|
error = donice(td, curp, uap->prio);
|
|
|
|
PROC_UNLOCK(curp);
|
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
p = pfind(uap->who);
|
2001-04-24 00:51:53 +00:00
|
|
|
if (p == 0)
|
|
|
|
break;
|
2002-05-19 00:14:50 +00:00
|
|
|
if (p_cansee(td, p) == 0)
|
2002-04-13 23:28:23 +00:00
|
|
|
error = donice(td, p, uap->prio);
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
found++;
|
|
|
|
break;
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
case PRIO_PGRP:
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (uap->who == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
pg = curp->p_pgrp;
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pg);
|
|
|
|
} else {
|
|
|
|
pg = pgfind(uap->who);
|
|
|
|
if (pg == NULL) {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p);
|
2002-05-19 00:14:50 +00:00
|
|
|
if (!p_cansee(td, p)) {
|
2002-04-13 23:28:23 +00:00
|
|
|
error = donice(td, p, uap->prio);
|
2000-06-04 04:28:31 +00:00
|
|
|
found++;
|
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pg);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRIO_USER:
|
|
|
|
if (uap->who == 0)
|
2002-04-13 23:28:23 +00:00
|
|
|
uap->who = td->td_ucred->cr_uid;
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2001-09-12 08:38:13 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_LOCK(p);
|
2000-06-04 04:28:31 +00:00
|
|
|
if (p->p_ucred->cr_uid == uap->who &&
|
2002-05-19 00:14:50 +00:00
|
|
|
!p_cansee(td, p)) {
|
2002-04-13 23:28:23 +00:00
|
|
|
error = donice(td, p, uap->prio);
|
1994-05-24 10:09:53 +00:00
|
|
|
found++;
|
|
|
|
}
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-09-12 08:38:13 +00:00
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-01 19:04:37 +00:00
|
|
|
if (found == 0 && error == 0)
|
|
|
|
error = ESRCH;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
/*
|
2004-06-16 00:26:31 +00:00
|
|
|
* Set "nice" for a (whole) process.
|
2002-09-15 23:52:25 +00:00
|
|
|
*/
|
1998-02-09 06:11:36 +00:00
|
|
|
static int
|
2002-09-15 23:52:25 +00:00
|
|
|
donice(struct thread *td, struct proc *p, int n)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-06-16 00:26:31 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-09-15 23:52:25 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
if ((error = p_cansched(td, p)))
|
o Centralize inter-process access control, introducing:
int p_can(p1, p2, operation, privused)
which allows specification of subject process, object process,
inter-process operation, and an optional call-by-reference privused
flag, allowing the caller to determine if privilege was required
for the call to succeed. This allows jail, kern.ps_showallprocs and
regular credential-based interaction checks to occur in one block of
code. Possible operations are P_CAN_SEE, P_CAN_SCHED, P_CAN_KILL,
and P_CAN_DEBUG. p_can currently breaks out as a wrapper to a
series of static function checks in kern_prot, which should not
be invoked directly.
o Commented out capabilities entries are included for some checks.
o Update most inter-process authorization to make use of p_can() instead
of manual checks, PRISON_CHECK(), P_TRESPASS(), and
kern.ps_showallprocs.
o Modify suser{,_xxx} to use const arguments, as it no longer modifies
process flags due to the disabling of ASU.
o Modify some checks/errors in procfs so that ENOENT is returned instead
of ESRCH, further improving concealment of processes that should not
be visible to other processes. Also introduce new access checks to
improve hiding of processes for procfs_lookup(), procfs_getattr(),
procfs_readdir(). Correct a bug reported by bp concerning not
handling the CREATE case in procfs_lookup(). Remove volatile flag in
procfs that caused apparently spurious qualifier warnigns (approved by
bde).
o Add comment noting that ktrace() has not been updated, as its access
control checks are different from ptrace(), whereas they should
probably be the same. Further discussion should happen on this topic.
Reviewed by: bde, green, phk, freebsd-security, others
Approved by: bde
Obtained from: TrustedBSD Project
2000-08-30 04:49:09 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (n > PRIO_MAX)
|
|
|
|
n = PRIO_MAX;
|
|
|
|
if (n < PRIO_MIN)
|
|
|
|
n = PRIO_MIN;
|
2006-11-06 13:42:10 +00:00
|
|
|
if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EACCES);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
2004-06-16 00:26:31 +00:00
|
|
|
sched_nice(p, n);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2006-09-21 04:18:46 +00:00
|
|
|
/*
|
|
|
|
* Set realtime priority for LWP.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct rtprio_thread_args {
|
|
|
|
int function;
|
|
|
|
lwpid_t lwpid;
|
|
|
|
struct rtprio *rtp;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
|
|
|
|
{
|
|
|
|
struct proc *curp;
|
|
|
|
struct proc *p;
|
|
|
|
struct rtprio rtp;
|
|
|
|
struct thread *td1;
|
|
|
|
int cierror, error;
|
|
|
|
|
|
|
|
/* Perform copyin before acquiring locks if needed. */
|
|
|
|
if (uap->function == RTP_SET)
|
|
|
|
cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
|
|
|
|
else
|
|
|
|
cierror = 0;
|
|
|
|
|
|
|
|
curp = td->td_proc;
|
|
|
|
/*
|
|
|
|
* Though lwpid is unique, only current process is supported
|
|
|
|
* since there is no efficient way to look up a LWP yet.
|
|
|
|
*/
|
|
|
|
p = curp;
|
|
|
|
PROC_LOCK(p);
|
|
|
|
|
|
|
|
switch (uap->function) {
|
|
|
|
case RTP_LOOKUP:
|
|
|
|
if ((error = p_cansee(td, p)))
|
|
|
|
break;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
2006-09-21 04:18:46 +00:00
|
|
|
if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
|
|
|
|
td1 = td;
|
|
|
|
else
|
|
|
|
td1 = thread_find(p, uap->lwpid);
|
|
|
|
if (td1 != NULL)
|
2006-10-26 21:42:22 +00:00
|
|
|
pri_to_rtp(td1, &rtp);
|
2006-09-21 04:18:46 +00:00
|
|
|
else
|
|
|
|
error = ESRCH;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2006-09-21 04:18:46 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
|
|
|
|
case RTP_SET:
|
|
|
|
if ((error = p_cansched(td, p)) || (error = cierror))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Disallow setting rtprio in most cases if not superuser. */
|
|
|
|
/*
|
|
|
|
* Realtime priority has to be restricted for reasons which should be
|
|
|
|
* obvious. However, for idle priority, there is a potential for
|
|
|
|
* system deadlock if an idleprio process gains a lock on a resource
|
|
|
|
* that other processes need (and the idleprio process can't run
|
|
|
|
* due to a CPU-bound normal process). Fix me! XXX
|
|
|
|
*/
|
|
|
|
#if 0
|
2007-06-14 23:31:52 +00:00
|
|
|
if (RTP_PRIO_IS_REALTIME(rtp.type)) {
|
2006-09-21 04:18:46 +00:00
|
|
|
#else
|
2007-06-14 23:31:52 +00:00
|
|
|
if (rtp.type != RTP_PRIO_NORMAL) {
|
2006-09-21 04:18:46 +00:00
|
|
|
#endif
|
2007-06-14 23:31:52 +00:00
|
|
|
error = priv_check(td, PRIV_SCHED_RTPRIO);
|
|
|
|
if (error)
|
2006-09-21 04:18:46 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
2006-09-21 04:18:46 +00:00
|
|
|
if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
|
|
|
|
td1 = td;
|
|
|
|
else
|
|
|
|
td1 = thread_find(p, uap->lwpid);
|
|
|
|
if (td1 != NULL)
|
2006-10-26 21:42:22 +00:00
|
|
|
error = rtp_to_pri(&rtp, td1);
|
2006-09-21 04:18:46 +00:00
|
|
|
else
|
|
|
|
error = ESRCH;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2006-09-21 04:18:46 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2004-02-06 19:25:34 +00:00
|
|
|
/*
|
2004-09-24 00:38:15 +00:00
|
|
|
* Set realtime priority.
|
2004-02-06 19:25:34 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-09-01 05:12:53 +00:00
|
|
|
struct rtprio_args {
|
1994-10-02 04:48:21 +00:00
|
|
|
int function;
|
|
|
|
pid_t pid;
|
1995-11-11 01:48:17 +00:00
|
|
|
struct rtprio *rtp;
|
1994-09-01 05:12:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-09-01 05:12:53 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
rtprio(td, uap)
|
2004-05-08 08:56:05 +00:00
|
|
|
struct thread *td; /* curthread */
|
1994-09-01 05:12:53 +00:00
|
|
|
register struct rtprio_args *uap;
|
|
|
|
{
|
2004-02-05 20:53:25 +00:00
|
|
|
struct proc *curp;
|
2004-05-08 08:56:05 +00:00
|
|
|
struct proc *p;
|
2006-10-26 21:42:22 +00:00
|
|
|
struct thread *tdp;
|
1994-10-02 04:48:21 +00:00
|
|
|
struct rtprio rtp;
|
2004-02-05 20:53:25 +00:00
|
|
|
int cierror, error;
|
1994-10-02 04:48:21 +00:00
|
|
|
|
2002-04-13 23:28:23 +00:00
|
|
|
/* Perform copyin before acquiring locks if needed. */
|
|
|
|
if (uap->function == RTP_SET)
|
|
|
|
cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
|
2004-02-05 20:53:25 +00:00
|
|
|
else
|
|
|
|
cierror = 0;
|
2001-09-01 19:04:37 +00:00
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
curp = td->td_proc;
|
2001-04-24 00:51:53 +00:00
|
|
|
if (uap->pid == 0) {
|
1994-09-01 05:12:53 +00:00
|
|
|
p = curp;
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_LOCK(p);
|
2001-09-01 19:04:37 +00:00
|
|
|
} else {
|
1994-10-02 04:48:21 +00:00
|
|
|
p = pfind(uap->pid);
|
2002-04-13 23:28:23 +00:00
|
|
|
if (p == NULL)
|
|
|
|
return (ESRCH);
|
2001-09-01 19:04:37 +00:00
|
|
|
}
|
1994-09-01 05:12:53 +00:00
|
|
|
|
1994-10-02 04:48:21 +00:00
|
|
|
switch (uap->function) {
|
|
|
|
case RTP_LOOKUP:
|
2002-05-19 00:14:50 +00:00
|
|
|
if ((error = p_cansee(td, p)))
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
2004-05-08 08:56:05 +00:00
|
|
|
/*
|
|
|
|
* Return OUR priority if no pid specified,
|
|
|
|
* or if one is, report the highest priority
|
2004-09-24 00:38:15 +00:00
|
|
|
* in the process. There isn't much more you can do as
|
2004-05-08 08:56:05 +00:00
|
|
|
* there is only room to return a single priority.
|
2004-09-24 00:38:15 +00:00
|
|
|
* XXXKSE: maybe need a new interface to report
|
2004-05-08 08:56:05 +00:00
|
|
|
* priorities of multiple system scope threads.
|
|
|
|
* Note: specifying our own pid is not the same
|
|
|
|
* as leaving it zero.
|
|
|
|
*/
|
|
|
|
if (uap->pid == 0) {
|
2006-10-26 21:42:22 +00:00
|
|
|
pri_to_rtp(td, &rtp);
|
2004-05-08 08:56:05 +00:00
|
|
|
} else {
|
|
|
|
struct rtprio rtp2;
|
|
|
|
|
|
|
|
rtp.type = RTP_PRIO_IDLE;
|
|
|
|
rtp.prio = RTP_PRIO_MAX;
|
2006-10-26 21:42:22 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, tdp) {
|
|
|
|
pri_to_rtp(tdp, &rtp2);
|
2004-09-24 00:38:15 +00:00
|
|
|
if (rtp2.type < rtp.type ||
|
|
|
|
(rtp2.type == rtp.type &&
|
|
|
|
rtp2.prio < rtp.prio)) {
|
2004-05-08 08:56:05 +00:00
|
|
|
rtp.type = rtp2.type;
|
|
|
|
rtp.prio = rtp2.prio;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
|
1994-10-02 04:48:21 +00:00
|
|
|
case RTP_SET:
|
2002-05-19 00:14:50 +00:00
|
|
|
if ((error = p_cansched(td, p)) || (error = cierror))
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
2004-09-24 00:38:15 +00:00
|
|
|
|
|
|
|
/* Disallow setting rtprio in most cases if not superuser. */
|
1998-02-04 18:43:10 +00:00
|
|
|
/*
|
|
|
|
* Realtime priority has to be restricted for reasons which should be
|
2004-09-24 00:38:15 +00:00
|
|
|
* obvious. However, for idle priority, there is a potential for
|
1998-02-04 18:43:10 +00:00
|
|
|
* system deadlock if an idleprio process gains a lock on a resource
|
|
|
|
* that other processes need (and the idleprio process can't run
|
2004-09-24 00:38:15 +00:00
|
|
|
* due to a CPU-bound normal process). Fix me! XXX
|
1998-02-04 18:43:10 +00:00
|
|
|
*/
|
|
|
|
#if 0
|
2007-06-14 23:31:52 +00:00
|
|
|
if (RTP_PRIO_IS_REALTIME(rtp.type)) {
|
2004-09-24 00:38:15 +00:00
|
|
|
#else
|
2007-06-14 23:31:52 +00:00
|
|
|
if (rtp.type != RTP_PRIO_NORMAL) {
|
2004-09-24 00:38:15 +00:00
|
|
|
#endif
|
2007-06-14 23:31:52 +00:00
|
|
|
error = priv_check(td, PRIV_SCHED_RTPRIO);
|
|
|
|
if (error)
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
1994-10-02 04:48:21 +00:00
|
|
|
}
|
2004-09-24 00:38:15 +00:00
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
/*
|
|
|
|
* If we are setting our own priority, set just our
|
|
|
|
* thread but if we are doing another process,
|
|
|
|
* do all the threads on that process. If we
|
|
|
|
* specify our own pid we do the latter.
|
|
|
|
*/
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
2004-05-08 08:56:05 +00:00
|
|
|
if (uap->pid == 0) {
|
2006-10-26 21:42:22 +00:00
|
|
|
error = rtp_to_pri(&rtp, td);
|
2004-05-08 08:56:05 +00:00
|
|
|
} else {
|
2006-10-26 21:42:22 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
if ((error = rtp_to_pri(&rtp, td)) != 0)
|
|
|
|
break;
|
2004-05-08 08:56:05 +00:00
|
|
|
}
|
|
|
|
}
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
1994-10-02 04:48:21 +00:00
|
|
|
default:
|
2001-04-24 00:51:53 +00:00
|
|
|
error = EINVAL;
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
1994-10-02 04:48:21 +00:00
|
|
|
}
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
1994-10-02 04:48:21 +00:00
|
|
|
}
|
1994-09-01 05:12:53 +00:00
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
int
|
2006-10-26 21:42:22 +00:00
|
|
|
rtp_to_pri(struct rtprio *rtp, struct thread *td)
|
2001-02-12 00:20:08 +00:00
|
|
|
{
|
2006-11-20 05:50:59 +00:00
|
|
|
u_char newpri;
|
2001-02-12 00:20:08 +00:00
|
|
|
|
|
|
|
if (rtp->prio > RTP_PRIO_MAX)
|
2001-04-29 22:09:26 +00:00
|
|
|
return (EINVAL);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2001-02-12 00:20:08 +00:00
|
|
|
switch (RTP_PRIO_BASE(rtp->type)) {
|
|
|
|
case RTP_PRIO_REALTIME:
|
2006-11-20 05:50:59 +00:00
|
|
|
newpri = PRI_MIN_REALTIME + rtp->prio;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case RTP_PRIO_NORMAL:
|
2006-11-20 05:50:59 +00:00
|
|
|
newpri = PRI_MIN_TIMESHARE + rtp->prio;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case RTP_PRIO_IDLE:
|
2006-11-20 05:50:59 +00:00
|
|
|
newpri = PRI_MIN_IDLE + rtp->prio;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
default:
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2001-04-29 22:09:26 +00:00
|
|
|
return (EINVAL);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_class(td, rtp->type); /* XXX fix */
|
2006-11-20 05:50:59 +00:00
|
|
|
sched_user_prio(td, newpri);
|
2006-10-26 21:42:22 +00:00
|
|
|
if (curthread == td)
|
|
|
|
sched_prio(curthread, td->td_user_pri); /* XXX dubious */
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2001-02-12 00:20:08 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-10-26 21:42:22 +00:00
|
|
|
pri_to_rtp(struct thread *td, struct rtprio *rtp)
|
2001-02-12 00:20:08 +00:00
|
|
|
{
|
|
|
|
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2006-10-26 21:42:22 +00:00
|
|
|
switch (PRI_BASE(td->td_pri_class)) {
|
2001-02-12 00:20:08 +00:00
|
|
|
case PRI_REALTIME:
|
2006-11-20 05:50:59 +00:00
|
|
|
rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case PRI_TIMESHARE:
|
2006-11-20 05:50:59 +00:00
|
|
|
rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case PRI_IDLE:
|
2006-11-20 05:50:59 +00:00
|
|
|
rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2006-10-26 21:42:22 +00:00
|
|
|
rtp->type = td->td_pri_class;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
|
|
|
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-11-11 01:48:17 +00:00
|
|
|
struct osetrlimit_args {
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int which;
|
1995-11-11 01:48:17 +00:00
|
|
|
struct orlimit *rlp;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osetrlimit(td, uap)
|
|
|
|
struct thread *td;
|
1995-11-11 01:48:17 +00:00
|
|
|
register struct osetrlimit_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct orlimit olim;
|
|
|
|
struct rlimit lim;
|
|
|
|
int error;
|
|
|
|
|
2002-06-29 02:00:02 +00:00
|
|
|
if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
lim.rlim_cur = olim.rlim_cur;
|
|
|
|
lim.rlim_max = olim.rlim_max;
|
2004-02-04 21:52:57 +00:00
|
|
|
error = kern_setrlimit(td, uap->which, &lim);
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-11-11 01:48:17 +00:00
|
|
|
struct ogetrlimit_args {
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int which;
|
|
|
|
struct orlimit *rlp;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ogetrlimit(td, uap)
|
|
|
|
struct thread *td;
|
1995-11-11 01:48:17 +00:00
|
|
|
register struct ogetrlimit_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct orlimit olim;
|
2004-02-05 20:53:25 +00:00
|
|
|
struct rlimit rl;
|
|
|
|
struct proc *p;
|
2001-09-01 19:04:37 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (uap->which >= RLIM_NLIMITS)
|
|
|
|
return (EINVAL);
|
2004-02-05 20:53:25 +00:00
|
|
|
p = td->td_proc;
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
lim_rlimit(p, uap->which, &rl);
|
|
|
|
PROC_UNLOCK(p);
|
2004-02-06 19:30:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX would be more correct to convert only RLIM_INFINITY to the
|
|
|
|
* old RLIM_INFINITY and fail with EOVERFLOW for other larger
|
|
|
|
* values. Most 64->32 and 32->16 conversions, including not
|
|
|
|
* unimportant ones of uids are even more broken than what we
|
|
|
|
* do here (they blindly truncate). We don't do this correctly
|
|
|
|
* here since we have little experience with EOVERFLOW yet.
|
|
|
|
* Elsewhere, getuid() can't fail...
|
|
|
|
*/
|
|
|
|
olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
|
|
|
|
olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
|
2002-06-29 02:00:02 +00:00
|
|
|
error = copyout(&olim, uap->rlp, sizeof(olim));
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-06-11 11:16:26 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct __setrlimit_args {
|
|
|
|
u_int which;
|
1995-11-11 01:48:17 +00:00
|
|
|
struct rlimit *rlp;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
setrlimit(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct __setrlimit_args *uap;
|
|
|
|
{
|
|
|
|
struct rlimit alim;
|
|
|
|
int error;
|
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2004-02-04 21:52:57 +00:00
|
|
|
error = kern_setrlimit(td, uap->which, &alim);
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
static void
|
|
|
|
lim_cb(void *arg)
|
|
|
|
{
|
|
|
|
struct rlimit rlim;
|
|
|
|
struct thread *td;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = arg;
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
/*
|
|
|
|
* Check if the process exceeds its cpu resource allocation. If
|
|
|
|
* it reaches the max, arrange to kill the process in ast().
|
|
|
|
*/
|
|
|
|
if (p->p_cpulimit == RLIM_INFINITY)
|
|
|
|
return;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
thread_lock(td);
|
2007-06-01 01:12:45 +00:00
|
|
|
ruxagg(&p->p_rux, td);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
|
|
|
}
|
|
|
|
PROC_SUNLOCK(p);
|
2007-06-01 01:12:45 +00:00
|
|
|
if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
|
|
|
|
lim_rlimit(p, RLIMIT_CPU, &rlim);
|
|
|
|
if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
|
|
|
|
killproc(p, "exceeded maximum CPU limit");
|
|
|
|
} else {
|
|
|
|
if (p->p_cpulimit < rlim.rlim_max)
|
|
|
|
p->p_cpulimit += 5;
|
|
|
|
psignal(p, SIGXCPU);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
callout_reset(&p->p_limco, hz, lim_cb, p);
|
|
|
|
}
|
|
|
|
|
1999-01-30 06:25:00 +00:00
|
|
|
int
|
2004-02-04 21:52:57 +00:00
|
|
|
kern_setrlimit(td, which, limp)
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int which;
|
|
|
|
struct rlimit *limp;
|
|
|
|
{
|
2004-02-04 21:52:57 +00:00
|
|
|
struct plimit *newlim, *oldlim;
|
2004-02-05 20:53:25 +00:00
|
|
|
struct proc *p;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct rlimit *alimp;
|
2004-02-04 21:52:57 +00:00
|
|
|
rlim_t oldssiz;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (which >= RLIM_NLIMITS)
|
|
|
|
return (EINVAL);
|
1994-12-06 22:53:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Preserve historical bugs by treating negative limits as unsigned.
|
|
|
|
*/
|
|
|
|
if (limp->rlim_cur < 0)
|
|
|
|
limp->rlim_cur = RLIM_INFINITY;
|
|
|
|
if (limp->rlim_max < 0)
|
|
|
|
limp->rlim_max = RLIM_INFINITY;
|
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
oldssiz = 0;
|
2004-02-05 20:53:25 +00:00
|
|
|
p = td->td_proc;
|
2004-02-04 21:52:57 +00:00
|
|
|
newlim = lim_alloc();
|
|
|
|
PROC_LOCK(p);
|
|
|
|
oldlim = p->p_limit;
|
|
|
|
alimp = &oldlim->pl_rlimit[which];
|
1995-05-30 08:16:23 +00:00
|
|
|
if (limp->rlim_cur > alimp->rlim_max ||
|
1994-05-24 10:09:53 +00:00
|
|
|
limp->rlim_max > alimp->rlim_max)
|
2007-06-12 00:12:01 +00:00
|
|
|
if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
lim_free(newlim);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2004-09-24 00:38:15 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (limp->rlim_cur > limp->rlim_max)
|
|
|
|
limp->rlim_cur = limp->rlim_max;
|
2004-02-04 21:52:57 +00:00
|
|
|
lim_copy(newlim, oldlim);
|
|
|
|
alimp = &newlim->pl_rlimit[which];
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
|
1998-05-28 09:30:28 +00:00
|
|
|
case RLIMIT_CPU:
|
2007-06-01 01:12:45 +00:00
|
|
|
if (limp->rlim_cur != RLIM_INFINITY &&
|
|
|
|
p->p_cpulimit == RLIM_INFINITY)
|
|
|
|
callout_reset(&p->p_limco, hz, lim_cb, p);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SLOCK(p);
|
2002-10-09 17:17:24 +00:00
|
|
|
p->p_cpulimit = limp->rlim_cur;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1998-05-28 09:30:28 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
case RLIMIT_DATA:
|
2001-10-10 23:06:54 +00:00
|
|
|
if (limp->rlim_cur > maxdsiz)
|
|
|
|
limp->rlim_cur = maxdsiz;
|
|
|
|
if (limp->rlim_max > maxdsiz)
|
|
|
|
limp->rlim_max = maxdsiz;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RLIMIT_STACK:
|
2001-10-10 23:06:54 +00:00
|
|
|
if (limp->rlim_cur > maxssiz)
|
|
|
|
limp->rlim_cur = maxssiz;
|
|
|
|
if (limp->rlim_max > maxssiz)
|
|
|
|
limp->rlim_max = maxssiz;
|
2004-02-04 21:52:57 +00:00
|
|
|
oldssiz = alimp->rlim_cur;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RLIMIT_NOFILE:
|
|
|
|
if (limp->rlim_cur > maxfilesperproc)
|
|
|
|
limp->rlim_cur = maxfilesperproc;
|
|
|
|
if (limp->rlim_max > maxfilesperproc)
|
|
|
|
limp->rlim_max = maxfilesperproc;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RLIMIT_NPROC:
|
|
|
|
if (limp->rlim_cur > maxprocperuid)
|
|
|
|
limp->rlim_cur = maxprocperuid;
|
|
|
|
if (limp->rlim_max > maxprocperuid)
|
|
|
|
limp->rlim_max = maxprocperuid;
|
|
|
|
if (limp->rlim_cur < 1)
|
|
|
|
limp->rlim_cur = 1;
|
|
|
|
if (limp->rlim_max < 1)
|
|
|
|
limp->rlim_max = 1;
|
|
|
|
break;
|
|
|
|
}
|
2007-05-14 22:40:04 +00:00
|
|
|
if (td->td_proc->p_sysent->sv_fixlimit != NULL)
|
|
|
|
td->td_proc->p_sysent->sv_fixlimit(limp, which);
|
2004-02-04 21:52:57 +00:00
|
|
|
*alimp = *limp;
|
|
|
|
p->p_limit = newlim;
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
lim_free(oldlim);
|
|
|
|
|
|
|
|
if (which == RLIMIT_STACK) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Stack is allocated to the max at exec time with only
|
|
|
|
* "rlim_cur" bytes accessible. If stack limit is going
|
|
|
|
* up make more accessible, if going down make inaccessible.
|
|
|
|
*/
|
2004-02-04 21:52:57 +00:00
|
|
|
if (limp->rlim_cur != oldssiz) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vm_offset_t addr;
|
|
|
|
vm_size_t size;
|
|
|
|
vm_prot_t prot;
|
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
if (limp->rlim_cur > oldssiz) {
|
2002-09-21 22:07:17 +00:00
|
|
|
prot = p->p_sysent->sv_stackprot;
|
2004-02-04 21:52:57 +00:00
|
|
|
size = limp->rlim_cur - oldssiz;
|
2002-09-21 22:07:17 +00:00
|
|
|
addr = p->p_sysent->sv_usrstack -
|
|
|
|
limp->rlim_cur;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
|
|
|
prot = VM_PROT_NONE;
|
2004-02-04 21:52:57 +00:00
|
|
|
size = oldssiz - limp->rlim_cur;
|
2004-09-24 00:38:15 +00:00
|
|
|
addr = p->p_sysent->sv_usrstack - oldssiz;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
addr = trunc_page(addr);
|
|
|
|
size = round_page(size);
|
2004-09-24 00:38:15 +00:00
|
|
|
(void)vm_map_protect(&p->p_vmspace->vm_map,
|
|
|
|
addr, addr + size, prot, FALSE);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2005-11-02 21:18:07 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct __getrlimit_args {
|
|
|
|
u_int which;
|
|
|
|
struct rlimit *rlp;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getrlimit(td, uap)
|
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct __getrlimit_args *uap;
|
|
|
|
{
|
2004-02-04 21:52:57 +00:00
|
|
|
struct rlimit rlim;
|
2004-02-06 19:25:34 +00:00
|
|
|
struct proc *p;
|
2004-02-05 20:53:25 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (uap->which >= RLIM_NLIMITS)
|
|
|
|
return (EINVAL);
|
2004-02-05 20:53:25 +00:00
|
|
|
p = td->td_proc;
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
lim_rlimit(p, uap->which, &rlim);
|
|
|
|
PROC_UNLOCK(p);
|
2004-02-05 20:53:25 +00:00
|
|
|
error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
|
2004-09-24 00:38:15 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2006-02-21 21:47:46 +00:00
|
|
|
/*
|
2006-02-22 16:58:48 +00:00
|
|
|
* Transform the running time and tick information for children of proc p
|
|
|
|
* into user and system time usage.
|
2006-02-21 21:47:46 +00:00
|
|
|
*/
|
2004-10-05 18:51:11 +00:00
|
|
|
void
|
|
|
|
calccru(p, up, sp)
|
|
|
|
struct proc *p;
|
|
|
|
struct timeval *up;
|
|
|
|
struct timeval *sp;
|
|
|
|
{
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2006-02-21 21:47:46 +00:00
|
|
|
calcru1(p, &p->p_crux, up, sp);
|
2004-10-05 18:51:11 +00:00
|
|
|
}
|
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
/*
|
2006-02-22 16:58:48 +00:00
|
|
|
* Transform the running time and tick information in proc p into user
|
|
|
|
* and system time usage. If appropriate, include the current time slice
|
|
|
|
* on this CPU.
|
2006-02-11 09:33:07 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
calcru(struct proc *p, struct timeval *up, struct timeval *sp)
|
2004-10-05 18:51:11 +00:00
|
|
|
{
|
2006-02-22 16:58:48 +00:00
|
|
|
struct thread *td;
|
2006-02-11 09:33:07 +00:00
|
|
|
uint64_t u;
|
2003-02-17 02:19:58 +00:00
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2007-06-09 21:48:44 +00:00
|
|
|
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
2006-02-21 21:47:46 +00:00
|
|
|
/*
|
|
|
|
* If we are getting stats for the current process, then add in the
|
|
|
|
* stats that this thread has accumulated in its current time slice.
|
|
|
|
* We reset the thread and CPU state as if we had performed a context
|
|
|
|
* switch right here.
|
|
|
|
*/
|
2007-06-01 01:12:45 +00:00
|
|
|
td = curthread;
|
|
|
|
if (td->td_proc == p) {
|
2006-02-11 09:33:07 +00:00
|
|
|
u = cpu_ticks();
|
2006-02-21 21:47:46 +00:00
|
|
|
p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
|
2006-02-11 09:33:07 +00:00
|
|
|
PCPU_SET(switchtime, u);
|
|
|
|
}
|
2007-06-09 21:48:44 +00:00
|
|
|
calcru1(p, &p->p_rux, up, sp);
|
2006-02-21 21:47:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
|
|
|
|
struct timeval *sp)
|
|
|
|
{
|
2006-03-11 10:48:19 +00:00
|
|
|
/* {user, system, interrupt, total} {ticks, usec}: */
|
|
|
|
u_int64_t ut, uu, st, su, it, tt, tu;
|
2006-02-11 09:33:07 +00:00
|
|
|
|
2004-10-05 18:51:11 +00:00
|
|
|
ut = ruxp->rux_uticks;
|
|
|
|
st = ruxp->rux_sticks;
|
|
|
|
it = ruxp->rux_iticks;
|
2003-02-17 02:19:58 +00:00
|
|
|
tt = ut + st + it;
|
|
|
|
if (tt == 0) {
|
2006-03-11 10:48:19 +00:00
|
|
|
/* Avoid divide by zero */
|
2003-02-17 02:19:58 +00:00
|
|
|
st = 1;
|
|
|
|
tt = 1;
|
|
|
|
}
|
2006-03-11 10:48:19 +00:00
|
|
|
tu = cputick2usec(ruxp->rux_runtime);
|
2004-06-21 17:46:27 +00:00
|
|
|
if ((int64_t)tu < 0) {
|
2006-03-11 10:48:19 +00:00
|
|
|
/* XXX: this should be an assert /phk */
|
2004-06-21 17:46:27 +00:00
|
|
|
printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
|
2004-02-06 19:30:12 +00:00
|
|
|
(intmax_t)tu, p->p_pid, p->p_comm);
|
2006-03-11 10:48:19 +00:00
|
|
|
tu = ruxp->rux_tu;
|
2003-02-17 02:19:58 +00:00
|
|
|
}
|
1999-03-13 19:46:13 +00:00
|
|
|
|
2006-03-11 10:48:19 +00:00
|
|
|
if (tu >= ruxp->rux_tu) {
|
|
|
|
/*
|
|
|
|
* The normal case, time increased.
|
|
|
|
* Enforce monotonicity of bucketed numbers.
|
|
|
|
*/
|
|
|
|
uu = (tu * ut) / tt;
|
2004-10-05 18:51:11 +00:00
|
|
|
if (uu < ruxp->rux_uu)
|
|
|
|
uu = ruxp->rux_uu;
|
2006-03-11 10:48:19 +00:00
|
|
|
su = (tu * st) / tt;
|
|
|
|
if (su < ruxp->rux_su)
|
2004-10-05 18:51:11 +00:00
|
|
|
su = ruxp->rux_su;
|
2006-03-11 10:48:19 +00:00
|
|
|
} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
|
|
|
|
/*
|
|
|
|
* When we calibrate the cputicker, it is not uncommon to
|
|
|
|
* see the presumably fixed frequency increase slightly over
|
|
|
|
* time as a result of thermal stabilization and NTP
|
|
|
|
* discipline (of the reference clock). We therefore ignore
|
|
|
|
* a bit of backwards slop because we expect to catch up
|
|
|
|
* shortly. We use a 3 microsecond limit to catch low
|
|
|
|
* counts and a 1% limit for high counts.
|
|
|
|
*/
|
|
|
|
uu = ruxp->rux_uu;
|
|
|
|
su = ruxp->rux_su;
|
|
|
|
tu = ruxp->rux_tu;
|
|
|
|
} else { /* tu < ruxp->rux_tu */
|
|
|
|
/*
|
|
|
|
* What happene here was likely that a laptop, which ran at
|
|
|
|
* a reduced clock frequency at boot, kicked into high gear.
|
|
|
|
* The wisdom of spamming this message in that case is
|
|
|
|
* dubious, but it might also be indicative of something
|
|
|
|
* serious, so lets keep it and hope laptops can be made
|
|
|
|
* more truthful about their CPU speed via ACPI.
|
|
|
|
*/
|
|
|
|
printf("calcru: runtime went backwards from %ju usec "
|
|
|
|
"to %ju usec for pid %d (%s)\n",
|
|
|
|
(uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
|
|
|
|
p->p_pid, p->p_comm);
|
|
|
|
uu = (tu * ut) / tt;
|
|
|
|
su = (tu * st) / tt;
|
2003-02-17 02:19:58 +00:00
|
|
|
}
|
2006-03-11 10:48:19 +00:00
|
|
|
|
2004-10-05 18:51:11 +00:00
|
|
|
ruxp->rux_uu = uu;
|
|
|
|
ruxp->rux_su = su;
|
2006-03-11 10:48:19 +00:00
|
|
|
ruxp->rux_tu = tu;
|
2003-02-17 02:19:58 +00:00
|
|
|
|
|
|
|
up->tv_sec = uu / 1000000;
|
|
|
|
up->tv_usec = uu % 1000000;
|
|
|
|
sp->tv_sec = su / 1000000;
|
|
|
|
sp->tv_usec = su % 1000000;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getrusage_args {
|
|
|
|
int who;
|
|
|
|
struct rusage *rusage;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getrusage(td, uap)
|
|
|
|
register struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct getrusage_args *uap;
|
|
|
|
{
|
2004-02-06 19:30:12 +00:00
|
|
|
struct rusage ru;
|
2004-10-05 18:51:11 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
error = kern_getrusage(td, uap->who, &ru);
|
|
|
|
if (error == 0)
|
|
|
|
error = copyout(&ru, uap->rusage, sizeof(struct rusage));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_getrusage(td, who, rup)
|
|
|
|
struct thread *td;
|
|
|
|
int who;
|
|
|
|
struct rusage *rup;
|
|
|
|
{
|
2004-02-06 19:30:12 +00:00
|
|
|
struct proc *p;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-02-06 19:30:12 +00:00
|
|
|
p = td->td_proc;
|
2004-10-05 18:51:11 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
switch (who) {
|
2004-02-06 19:30:12 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case RUSAGE_SELF:
|
2007-06-09 21:48:44 +00:00
|
|
|
rufetchcalc(p, rup, &rup->ru_utime,
|
|
|
|
&rup->ru_stime);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RUSAGE_CHILDREN:
|
2004-10-05 18:51:11 +00:00
|
|
|
*rup = p->p_stats->p_cru;
|
|
|
|
calccru(p, &rup->ru_utime, &rup->ru_stime);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2004-10-05 18:51:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
2004-02-06 19:30:12 +00:00
|
|
|
return (EINVAL);
|
2001-09-01 19:04:37 +00:00
|
|
|
}
|
2004-10-05 18:51:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2007-06-01 01:12:45 +00:00
|
|
|
rucollect(struct rusage *ru, struct rusage *ru2)
|
|
|
|
{
|
|
|
|
long *ip, *ip2;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ru->ru_maxrss < ru2->ru_maxrss)
|
|
|
|
ru->ru_maxrss = ru2->ru_maxrss;
|
|
|
|
ip = &ru->ru_first;
|
|
|
|
ip2 = &ru2->ru_first;
|
|
|
|
for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
|
|
|
|
*ip++ += *ip2++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
|
|
|
|
struct rusage_ext *rux2)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
|
2006-02-07 21:22:02 +00:00
|
|
|
rux->rux_runtime += rux2->rux_runtime;
|
2004-10-05 18:51:11 +00:00
|
|
|
rux->rux_uticks += rux2->rux_uticks;
|
|
|
|
rux->rux_sticks += rux2->rux_sticks;
|
|
|
|
rux->rux_iticks += rux2->rux_iticks;
|
|
|
|
rux->rux_uu += rux2->rux_uu;
|
|
|
|
rux->rux_su += rux2->rux_su;
|
2006-03-11 10:48:19 +00:00
|
|
|
rux->rux_tu += rux2->rux_tu;
|
2007-06-01 01:12:45 +00:00
|
|
|
rucollect(ru, ru2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Aggregate tick counts into the proc's rusage_ext.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ruxagg(struct rusage_ext *rux, struct thread *td)
|
|
|
|
{
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
|
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
|
|
|
PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
|
2007-06-01 01:12:45 +00:00
|
|
|
rux->rux_runtime += td->td_runtime;
|
|
|
|
rux->rux_uticks += td->td_uticks;
|
|
|
|
rux->rux_sticks += td->td_sticks;
|
|
|
|
rux->rux_iticks += td->td_iticks;
|
|
|
|
td->td_runtime = 0;
|
|
|
|
td->td_uticks = 0;
|
|
|
|
td->td_iticks = 0;
|
|
|
|
td->td_sticks = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the rusage_ext structure and fetch a valid aggregate rusage
|
|
|
|
* for proc p if storage for one is supplied.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rufetch(struct proc *p, struct rusage *ru)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
2007-06-09 21:48:44 +00:00
|
|
|
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
|
|
|
|
2007-06-09 18:56:11 +00:00
|
|
|
*ru = p->p_ru;
|
|
|
|
if (p->p_numthreads > 0) {
|
2007-06-01 01:12:45 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2007-06-01 01:12:45 +00:00
|
|
|
ruxagg(&p->p_rux, td);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2007-06-01 01:12:45 +00:00
|
|
|
rucollect(ru, &td->td_ru);
|
|
|
|
}
|
2007-06-09 18:56:11 +00:00
|
|
|
}
|
2007-06-09 21:48:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Atomically perform a rufetch and a calcru together.
|
|
|
|
* Consumers, can safely assume the calcru is executed only once
|
|
|
|
* rufetch is completed.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
|
|
|
|
struct timeval *sp)
|
|
|
|
{
|
|
|
|
|
|
|
|
PROC_SLOCK(p);
|
|
|
|
rufetch(p, ru);
|
|
|
|
calcru(p, up, sp);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
PROC_SUNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Allocate a new resource limits structure and initialize its
|
|
|
|
* reference count and mutex pointer.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
struct plimit *
|
2004-02-05 20:53:25 +00:00
|
|
|
lim_alloc()
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
struct plimit *limp;
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
|
2005-09-27 18:07:05 +00:00
|
|
|
refcount_init(&limp->pl_refcnt, 1);
|
2004-02-04 21:52:57 +00:00
|
|
|
return (limp);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct plimit *
|
|
|
|
lim_hold(limp)
|
|
|
|
struct plimit *limp;
|
|
|
|
{
|
|
|
|
|
2005-09-27 18:07:05 +00:00
|
|
|
refcount_acquire(&limp->pl_refcnt);
|
2004-02-04 21:52:57 +00:00
|
|
|
return (limp);
|
|
|
|
}
|
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
void
|
|
|
|
lim_fork(struct proc *p1, struct proc *p2)
|
|
|
|
{
|
|
|
|
p2->p_limit = lim_hold(p1->p_limit);
|
|
|
|
callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
|
|
|
|
if (p1->p_cpulimit != RLIM_INFINITY)
|
|
|
|
callout_reset(&p2->p_limco, hz, lim_cb, p2);
|
|
|
|
}
|
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
void
|
|
|
|
lim_free(limp)
|
|
|
|
struct plimit *limp;
|
|
|
|
{
|
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
|
2005-09-27 18:07:05 +00:00
|
|
|
if (refcount_release(&limp->pl_refcnt))
|
2004-02-04 21:52:57 +00:00
|
|
|
free((void *)limp, M_PLIMIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a copy of the plimit structure.
|
|
|
|
* We share these structures copy-on-write after fork.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
lim_copy(dst, src)
|
|
|
|
struct plimit *dst, *src;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
|
2004-02-04 21:52:57 +00:00
|
|
|
bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Return the hard limit for a particular system resource. The
|
|
|
|
* which parameter specifies the index into the rlimit array.
|
2004-02-04 21:52:57 +00:00
|
|
|
*/
|
|
|
|
rlim_t
|
|
|
|
lim_max(struct proc *p, int which)
|
|
|
|
{
|
|
|
|
struct rlimit rl;
|
|
|
|
|
|
|
|
lim_rlimit(p, which, &rl);
|
|
|
|
return (rl.rlim_max);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Return the current (soft) limit for a particular system resource.
|
|
|
|
* The which parameter which specifies the index into the rlimit array
|
2004-02-04 21:52:57 +00:00
|
|
|
*/
|
|
|
|
rlim_t
|
|
|
|
lim_cur(struct proc *p, int which)
|
|
|
|
{
|
|
|
|
struct rlimit rl;
|
|
|
|
|
|
|
|
lim_rlimit(p, which, &rl);
|
2004-02-11 18:04:13 +00:00
|
|
|
return (rl.rlim_cur);
|
2004-02-04 21:52:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Return a copy of the entire rlimit structure for the system limit
|
|
|
|
* specified by 'which' in the rlimit structure pointed to by 'rlp'.
|
2004-02-04 21:52:57 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
|
|
|
|
{
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
KASSERT(which >= 0 && which < RLIM_NLIMITS,
|
|
|
|
("request for invalid resource limit"));
|
|
|
|
*rlp = p->p_limit->pl_rlimit[which];
|
2007-05-14 22:40:04 +00:00
|
|
|
if (p->p_sysent->sv_fixlimit != NULL)
|
|
|
|
p->p_sysent->sv_fixlimit(rlp, which);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-09-05 22:11:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the uidinfo structure for a uid. This structure is used to
|
|
|
|
* track the total resource consumption (process count, socket buffer
|
|
|
|
* size, etc.) for the uid and impose limits.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uihashinit()
|
|
|
|
{
|
2000-11-26 12:08:17 +00:00
|
|
|
|
2000-09-05 22:11:13 +00:00
|
|
|
uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
|
|
|
|
2000-11-26 12:08:17 +00:00
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Look up a uidinfo struct for the parameter uid.
|
2000-11-26 12:08:17 +00:00
|
|
|
* uihashtbl_mtx must be locked.
|
|
|
|
*/
|
2000-09-05 22:11:13 +00:00
|
|
|
static struct uidinfo *
|
|
|
|
uilookup(uid)
|
|
|
|
uid_t uid;
|
|
|
|
{
|
2004-02-05 20:53:25 +00:00
|
|
|
struct uihashhead *uipp;
|
|
|
|
struct uidinfo *uip;
|
2000-09-05 22:11:13 +00:00
|
|
|
|
2000-11-26 12:08:17 +00:00
|
|
|
mtx_assert(&uihashtbl_mtx, MA_OWNED);
|
2000-09-05 22:11:13 +00:00
|
|
|
uipp = UIHASH(uid);
|
|
|
|
LIST_FOREACH(uip, uipp, ui_hash)
|
|
|
|
if (uip->ui_uid == uid)
|
|
|
|
break;
|
|
|
|
|
|
|
|
return (uip);
|
|
|
|
}
|
|
|
|
|
2000-11-26 12:08:17 +00:00
|
|
|
/*
|
2000-12-01 06:59:18 +00:00
|
|
|
* Find or allocate a struct uidinfo for a particular uid.
|
|
|
|
* Increase refcount on uidinfo struct returned.
|
2000-11-26 12:08:17 +00:00
|
|
|
* uifree() should be called on a struct uidinfo when released.
|
|
|
|
*/
|
2000-09-05 22:11:13 +00:00
|
|
|
struct uidinfo *
|
|
|
|
uifind(uid)
|
|
|
|
uid_t uid;
|
|
|
|
{
|
2004-02-05 20:53:25 +00:00
|
|
|
struct uidinfo *old_uip, *uip;
|
2000-09-05 22:11:13 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&uihashtbl_mtx);
|
2000-09-05 22:11:13 +00:00
|
|
|
uip = uilookup(uid);
|
2001-03-09 18:40:34 +00:00
|
|
|
if (uip == NULL) {
|
|
|
|
mtx_unlock(&uihashtbl_mtx);
|
2003-02-19 05:47:46 +00:00
|
|
|
uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
|
2001-03-09 18:40:34 +00:00
|
|
|
mtx_lock(&uihashtbl_mtx);
|
|
|
|
/*
|
|
|
|
* There's a chance someone created our uidinfo while we
|
|
|
|
* were in malloc and not holding the lock, so we have to
|
2004-02-05 20:53:25 +00:00
|
|
|
* make sure we don't insert a duplicate uidinfo.
|
2001-03-09 18:40:34 +00:00
|
|
|
*/
|
|
|
|
if ((old_uip = uilookup(uid)) != NULL) {
|
2004-02-05 20:53:25 +00:00
|
|
|
/* Someone else beat us to it. */
|
2001-03-09 18:40:34 +00:00
|
|
|
free(uip, M_UIDINFO);
|
|
|
|
uip = old_uip;
|
|
|
|
} else {
|
2003-07-13 01:22:21 +00:00
|
|
|
uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
|
2001-03-09 18:40:34 +00:00
|
|
|
uip->ui_uid = uid;
|
|
|
|
LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
|
|
|
|
}
|
|
|
|
}
|
2000-11-26 12:08:17 +00:00
|
|
|
uihold(uip);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&uihashtbl_mtx);
|
2000-09-05 22:11:13 +00:00
|
|
|
return (uip);
|
|
|
|
}
|
|
|
|
|
2000-11-30 19:15:22 +00:00
|
|
|
/*
|
2000-12-01 06:59:18 +00:00
|
|
|
* Place another refcount on a uidinfo struct.
|
2000-11-30 19:15:22 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
uihold(uip)
|
2000-12-01 06:59:18 +00:00
|
|
|
struct uidinfo *uip;
|
2000-11-30 19:15:22 +00:00
|
|
|
{
|
|
|
|
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_LOCK(uip);
|
2000-11-30 19:15:22 +00:00
|
|
|
uip->ui_ref++;
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-11-30 19:15:22 +00:00
|
|
|
}
|
|
|
|
|
2000-12-01 06:59:18 +00:00
|
|
|
/*-
|
|
|
|
* Since uidinfo structs have a long lifetime, we use an
|
2000-11-30 19:15:22 +00:00
|
|
|
* opportunistic refcounting scheme to avoid locking the lookup hash
|
|
|
|
* for each release.
|
|
|
|
*
|
2000-12-01 06:59:18 +00:00
|
|
|
* If the refcount hits 0, we need to free the structure,
|
2000-11-30 19:15:22 +00:00
|
|
|
* which means we need to lock the hash.
|
2000-12-01 06:59:18 +00:00
|
|
|
* Optimal case:
|
|
|
|
* After locking the struct and lowering the refcount, if we find
|
|
|
|
* that we don't need to free, simply unlock and return.
|
|
|
|
* Suboptimal case:
|
|
|
|
* If refcount lowering results in need to free, bump the count
|
2007-05-27 20:50:23 +00:00
|
|
|
* back up, lose the lock and acquire the locks in the proper
|
2000-12-01 06:59:18 +00:00
|
|
|
* order to try again.
|
2000-11-26 12:08:17 +00:00
|
|
|
*/
|
|
|
|
void
|
2000-09-05 22:11:13 +00:00
|
|
|
uifree(uip)
|
2000-12-01 06:59:18 +00:00
|
|
|
struct uidinfo *uip;
|
2000-09-05 22:11:13 +00:00
|
|
|
{
|
|
|
|
|
2000-12-01 06:59:18 +00:00
|
|
|
/* Prepare for optimal case. */
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_LOCK(uip);
|
2000-12-01 06:59:18 +00:00
|
|
|
|
2000-11-30 19:15:22 +00:00
|
|
|
if (--uip->ui_ref != 0) {
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-11-30 19:15:22 +00:00
|
|
|
return;
|
|
|
|
}
|
2000-12-01 06:59:18 +00:00
|
|
|
|
|
|
|
/* Prepare for suboptimal case. */
|
2000-11-30 19:15:22 +00:00
|
|
|
uip->ui_ref++;
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&uihashtbl_mtx);
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_LOCK(uip);
|
2000-12-01 06:59:18 +00:00
|
|
|
|
2000-11-30 19:15:22 +00:00
|
|
|
/*
|
2000-12-01 06:59:18 +00:00
|
|
|
* We must subtract one from the count again because we backed out
|
|
|
|
* our initial subtraction before dropping the lock.
|
|
|
|
* Since another thread may have added a reference after we dropped the
|
|
|
|
* initial lock we have to test for zero again.
|
2000-11-30 19:15:22 +00:00
|
|
|
*/
|
2000-09-05 22:11:13 +00:00
|
|
|
if (--uip->ui_ref == 0) {
|
2000-11-30 19:15:22 +00:00
|
|
|
LIST_REMOVE(uip, ui_hash);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&uihashtbl_mtx);
|
2000-09-05 22:11:13 +00:00
|
|
|
if (uip->ui_sbsize != 0)
|
2004-04-10 11:08:16 +00:00
|
|
|
printf("freeing uidinfo: uid = %d, sbsize = %jd\n",
|
|
|
|
uip->ui_uid, (intmax_t)uip->ui_sbsize);
|
2000-09-05 22:11:13 +00:00
|
|
|
if (uip->ui_proccnt != 0)
|
2000-09-18 17:03:03 +00:00
|
|
|
printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
|
2000-09-05 22:11:13 +00:00
|
|
|
uip->ui_uid, uip->ui_proccnt);
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
FREE(uip, M_UIDINFO);
|
2000-11-26 12:08:17 +00:00
|
|
|
return;
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
2000-12-01 06:59:18 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&uihashtbl_mtx);
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change the count associated with number of processes
|
|
|
|
* a given user is using. When 'max' is 0, don't enforce a limit
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
chgproccnt(uip, diff, max)
|
|
|
|
struct uidinfo *uip;
|
|
|
|
int diff;
|
|
|
|
int max;
|
|
|
|
{
|
2000-11-26 12:08:17 +00:00
|
|
|
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_LOCK(uip);
|
2004-02-05 20:53:25 +00:00
|
|
|
/* Don't allow them to exceed max, but allow subtraction. */
|
2000-11-26 12:08:17 +00:00
|
|
|
if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
return (0);
|
2000-11-26 12:08:17 +00:00
|
|
|
}
|
2000-09-05 22:11:13 +00:00
|
|
|
uip->ui_proccnt += diff;
|
|
|
|
if (uip->ui_proccnt < 0)
|
2000-09-18 17:03:03 +00:00
|
|
|
printf("negative proccnt for uid = %d\n", uip->ui_uid);
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change the total socket buffer size a user has used.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
chgsbsize(uip, hiwat, to, max)
|
|
|
|
struct uidinfo *uip;
|
2002-07-24 03:02:43 +00:00
|
|
|
u_int *hiwat;
|
|
|
|
u_int to;
|
2000-09-05 22:11:13 +00:00
|
|
|
rlim_t max;
|
|
|
|
{
|
|
|
|
rlim_t new;
|
|
|
|
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_LOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
new = uip->ui_sbsize + to - *hiwat;
|
2004-09-24 00:38:15 +00:00
|
|
|
/* Don't allow them to exceed max, but allow subtraction. */
|
2000-09-05 22:11:13 +00:00
|
|
|
if (to > *hiwat && new > max) {
|
2002-01-20 22:48:49 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
uip->ui_sbsize = new;
|
2004-08-06 22:04:33 +00:00
|
|
|
UIDINFO_UNLOCK(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
*hiwat = to;
|
2004-08-06 22:04:33 +00:00
|
|
|
if (new < 0)
|
2000-09-18 17:03:03 +00:00
|
|
|
printf("negative sbsize for uid = %d\n", uip->ui_uid);
|
2000-09-05 22:11:13 +00:00
|
|
|
return (1);
|
|
|
|
}
|