1994-05-24 10:09:53 +00:00
|
|
|
/*-
|
2017-11-20 19:43:44 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2016-09-15 13:16:20 +00:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-05-24 10:09:53 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/systm.h>
|
1995-11-12 06:43:28 +00:00
|
|
|
#include <sys/sysproto.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
2000-09-05 22:11:13 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-12-01 06:59:18 +00:00
|
|
|
#include <sys/mutex.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
2005-09-27 18:07:05 +00:00
|
|
|
#include <sys/refcount.h>
|
2011-03-29 17:47:25 +00:00
|
|
|
#include <sys/racct.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2008-03-16 21:29:02 +00:00
|
|
|
#include <sys/rwlock.h>
|
2002-10-12 05:32:24 +00:00
|
|
|
#include <sys/sched.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/sx.h>
|
2004-10-05 18:51:11 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
2011-12-13 14:00:27 +00:00
|
|
|
#include <sys/sysctl.h>
|
2002-09-21 22:07:17 +00:00
|
|
|
#include <sys/sysent.h>
|
1999-11-29 11:29:04 +00:00
|
|
|
#include <sys/time.h>
|
2007-12-11 08:25:36 +00:00
|
|
|
#include <sys/umtx.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
|
2000-09-05 22:11:13 +00:00
|
|
|
static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
|
|
|
|
#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
|
2008-03-16 21:29:02 +00:00
|
|
|
static struct rwlock uihashtbl_lock;
|
2000-09-05 22:11:13 +00:00
|
|
|
static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
|
|
|
|
static u_long uihash; /* size of hash table - 1 */
|
|
|
|
|
2006-02-21 21:47:46 +00:00
|
|
|
static void calcru1(struct proc *p, struct rusage_ext *ruxp,
|
|
|
|
struct timeval *up, struct timeval *sp);
|
2004-09-24 00:38:15 +00:00
|
|
|
static int donice(struct thread *td, struct proc *chgp, int n);
|
|
|
|
static struct uidinfo *uilookup(uid_t uid);
|
2019-12-15 21:11:15 +00:00
|
|
|
static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td);
|
2000-09-05 22:11:13 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Resource controls and accounting.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getpriority_args {
|
|
|
|
int which;
|
|
|
|
int who;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
sys_getpriority(struct thread *td, struct getpriority_args *uap)
|
2020-01-12 14:25:44 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (kern_getpriority(td, uap->which, uap->who));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_getpriority(struct thread *td, int which, int who)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-02-05 20:53:25 +00:00
|
|
|
struct proc *p;
|
2004-09-24 00:38:15 +00:00
|
|
|
struct pgrp *pg;
|
2004-02-05 20:53:25 +00:00
|
|
|
int error, low;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
error = 0;
|
|
|
|
low = PRIO_MAX + 1;
|
2020-01-12 14:25:44 +00:00
|
|
|
switch (which) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case PRIO_PROCESS:
|
2020-01-12 14:25:44 +00:00
|
|
|
if (who == 0)
|
2004-06-16 00:26:31 +00:00
|
|
|
low = td->td_proc->p_nice;
|
2003-03-13 00:54:53 +00:00
|
|
|
else {
|
2020-01-12 14:25:44 +00:00
|
|
|
p = pfind(who);
|
2001-04-24 00:51:53 +00:00
|
|
|
if (p == NULL)
|
|
|
|
break;
|
2004-09-24 00:38:15 +00:00
|
|
|
if (p_cansee(td, p) == 0)
|
2004-06-16 00:26:31 +00:00
|
|
|
low = p->p_nice;
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
case PRIO_PGRP:
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
2020-01-12 14:25:44 +00:00
|
|
|
if (who == 0) {
|
2002-04-13 23:28:23 +00:00
|
|
|
pg = td->td_proc->p_pgrp;
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pg);
|
|
|
|
} else {
|
2020-01-12 14:25:44 +00:00
|
|
|
pg = pgfind(who);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pg == NULL) {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p);
|
2011-04-06 17:47:22 +00:00
|
|
|
if (p->p_state == PRS_NORMAL &&
|
|
|
|
p_cansee(td, p) == 0) {
|
2004-06-16 00:26:31 +00:00
|
|
|
if (p->p_nice < low)
|
|
|
|
low = p->p_nice;
|
2002-09-15 23:52:25 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pg);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRIO_USER:
|
2020-01-12 14:25:44 +00:00
|
|
|
if (who == 0)
|
|
|
|
who = td->td_ucred->cr_uid;
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2007-01-17 14:58:53 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_LOCK(p);
|
2011-03-24 18:40:11 +00:00
|
|
|
if (p->p_state == PRS_NORMAL &&
|
|
|
|
p_cansee(td, p) == 0 &&
|
2020-01-12 14:25:44 +00:00
|
|
|
p->p_ucred->cr_uid == who) {
|
2004-06-16 00:26:31 +00:00
|
|
|
if (p->p_nice < low)
|
|
|
|
low = p->p_nice;
|
2002-09-15 23:52:25 +00:00
|
|
|
}
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-01 19:04:37 +00:00
|
|
|
if (low == PRIO_MAX + 1 && error == 0)
|
|
|
|
error = ESRCH;
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = low;
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct setpriority_args {
|
|
|
|
int which;
|
|
|
|
int who;
|
|
|
|
int prio;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2014-10-27 20:18:30 +00:00
|
|
|
sys_setpriority(struct thread *td, struct setpriority_args *uap)
|
2020-01-12 13:38:51 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (kern_setpriority(td, uap->which, uap->who, uap->prio));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_setpriority(struct thread *td, int which, int who, int prio)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-09-24 00:38:15 +00:00
|
|
|
struct proc *curp, *p;
|
|
|
|
struct pgrp *pg;
|
1994-05-24 10:09:53 +00:00
|
|
|
int found = 0, error = 0;
|
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
curp = td->td_proc;
|
2020-01-12 13:38:51 +00:00
|
|
|
switch (which) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case PRIO_PROCESS:
|
2020-01-12 13:38:51 +00:00
|
|
|
if (who == 0) {
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_LOCK(curp);
|
2020-01-12 13:38:51 +00:00
|
|
|
error = donice(td, curp, prio);
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(curp);
|
|
|
|
} else {
|
2020-01-12 13:38:51 +00:00
|
|
|
p = pfind(who);
|
2008-03-16 18:26:59 +00:00
|
|
|
if (p == NULL)
|
2001-04-24 00:51:53 +00:00
|
|
|
break;
|
2008-03-16 17:55:06 +00:00
|
|
|
error = p_cansee(td, p);
|
|
|
|
if (error == 0)
|
2020-01-12 13:38:51 +00:00
|
|
|
error = donice(td, p, prio);
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
found++;
|
|
|
|
break;
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
case PRIO_PGRP:
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
2020-01-12 13:38:51 +00:00
|
|
|
if (who == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
pg = curp->p_pgrp;
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pg);
|
|
|
|
} else {
|
2020-01-12 13:38:51 +00:00
|
|
|
pg = pgfind(who);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pg == NULL) {
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p);
|
2011-04-06 17:47:22 +00:00
|
|
|
if (p->p_state == PRS_NORMAL &&
|
|
|
|
p_cansee(td, p) == 0) {
|
2020-01-12 13:38:51 +00:00
|
|
|
error = donice(td, p, prio);
|
2000-06-04 04:28:31 +00:00
|
|
|
found++;
|
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pg);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PRIO_USER:
|
2020-01-12 13:38:51 +00:00
|
|
|
if (who == 0)
|
|
|
|
who = td->td_ucred->cr_uid;
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2001-09-12 08:38:13 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_LOCK(p);
|
2011-04-06 17:47:22 +00:00
|
|
|
if (p->p_state == PRS_NORMAL &&
|
2020-01-12 13:38:51 +00:00
|
|
|
p->p_ucred->cr_uid == who &&
|
2008-03-16 18:26:59 +00:00
|
|
|
p_cansee(td, p) == 0) {
|
2020-01-12 13:38:51 +00:00
|
|
|
error = donice(td, p, prio);
|
1994-05-24 10:09:53 +00:00
|
|
|
found++;
|
|
|
|
}
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-09-12 08:38:13 +00:00
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-01 19:04:37 +00:00
|
|
|
if (found == 0 && error == 0)
|
|
|
|
error = ESRCH;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
/*
|
2004-06-16 00:26:31 +00:00
|
|
|
* Set "nice" for a (whole) process.
|
2002-09-15 23:52:25 +00:00
|
|
|
*/
|
1998-02-09 06:11:36 +00:00
|
|
|
static int
|
2002-09-15 23:52:25 +00:00
|
|
|
donice(struct thread *td, struct proc *p, int n)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-06-16 00:26:31 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-09-15 23:52:25 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
if ((error = p_cansched(td, p)))
|
o Centralize inter-process access control, introducing:
int p_can(p1, p2, operation, privused)
which allows specification of subject process, object process,
inter-process operation, and an optional call-by-reference privused
flag, allowing the caller to determine if privilege was required
for the call to succeed. This allows jail, kern.ps_showallprocs and
regular credential-based interaction checks to occur in one block of
code. Possible operations are P_CAN_SEE, P_CAN_SCHED, P_CAN_KILL,
and P_CAN_DEBUG. p_can currently breaks out as a wrapper to a
series of static function checks in kern_prot, which should not
be invoked directly.
o Commented out capabilities entries are included for some checks.
o Update most inter-process authorization to make use of p_can() instead
of manual checks, PRISON_CHECK(), P_TRESPASS(), and
kern.ps_showallprocs.
o Modify suser{,_xxx} to use const arguments, as it no longer modifies
process flags due to the disabling of ASU.
o Modify some checks/errors in procfs so that ENOENT is returned instead
of ESRCH, further improving concealment of processes that should not
be visible to other processes. Also introduce new access checks to
improve hiding of processes for procfs_lookup(), procfs_getattr(),
procfs_readdir(). Correct a bug reported by bp concerning not
handling the CREATE case in procfs_lookup(). Remove volatile flag in
procfs that caused apparently spurious qualifier warnigns (approved by
bde).
o Add comment noting that ktrace() has not been updated, as its access
control checks are different from ptrace(), whereas they should
probably be the same. Further discussion should happen on this topic.
Reviewed by: bde, green, phk, freebsd-security, others
Approved by: bde
Obtained from: TrustedBSD Project
2000-08-30 04:49:09 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (n > PRIO_MAX)
|
|
|
|
n = PRIO_MAX;
|
|
|
|
if (n < PRIO_MIN)
|
|
|
|
n = PRIO_MIN;
|
2008-03-16 21:32:20 +00:00
|
|
|
if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EACCES);
|
2004-06-16 00:26:31 +00:00
|
|
|
sched_nice(p, n);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2011-12-13 14:00:27 +00:00
|
|
|
static int unprivileged_idprio;
|
|
|
|
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
|
|
|
|
&unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
|
|
|
|
|
2006-09-21 04:18:46 +00:00
|
|
|
/*
|
|
|
|
* Set realtime priority for LWP.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct rtprio_thread_args {
|
|
|
|
int function;
|
|
|
|
lwpid_t lwpid;
|
|
|
|
struct rtprio *rtp;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
|
2006-09-21 04:18:46 +00:00
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
struct rtprio rtp;
|
|
|
|
struct thread *td1;
|
|
|
|
int cierror, error;
|
|
|
|
|
|
|
|
/* Perform copyin before acquiring locks if needed. */
|
|
|
|
if (uap->function == RTP_SET)
|
|
|
|
cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
|
|
|
|
else
|
|
|
|
cierror = 0;
|
|
|
|
|
2010-10-09 02:50:23 +00:00
|
|
|
if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
|
|
|
|
p = td->td_proc;
|
|
|
|
td1 = td;
|
|
|
|
PROC_LOCK(p);
|
|
|
|
} else {
|
2020-11-10 18:10:50 +00:00
|
|
|
td1 = tdfind(uap->lwpid, -1);
|
2010-10-09 02:50:23 +00:00
|
|
|
if (td1 == NULL)
|
|
|
|
return (ESRCH);
|
|
|
|
p = td1->td_proc;
|
|
|
|
}
|
2006-09-21 04:18:46 +00:00
|
|
|
|
|
|
|
switch (uap->function) {
|
|
|
|
case RTP_LOOKUP:
|
|
|
|
if ((error = p_cansee(td, p)))
|
|
|
|
break;
|
2010-10-09 02:50:23 +00:00
|
|
|
pri_to_rtp(td1, &rtp);
|
2006-09-21 04:18:46 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
|
|
|
|
case RTP_SET:
|
|
|
|
if ((error = p_cansched(td, p)) || (error = cierror))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Disallow setting rtprio in most cases if not superuser. */
|
2011-12-13 14:00:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Realtime priority has to be restricted for reasons which
|
|
|
|
* should be obvious. However, for idleprio processes, there is
|
|
|
|
* a potential for system deadlock if an idleprio process gains
|
|
|
|
* a lock on a resource that other processes need (and the
|
|
|
|
* idleprio process can't run due to a CPU-bound normal
|
|
|
|
* process). Fix me! XXX
|
|
|
|
*
|
|
|
|
* This problem is not only related to idleprio process.
|
|
|
|
* A user level program can obtain a file lock and hold it
|
|
|
|
* indefinitely. Additionally, without idleprio processes it is
|
|
|
|
* still conceivable that a program with low priority will never
|
|
|
|
* get to run. In short, allowing this feature might make it
|
|
|
|
* easier to lock a resource indefinitely, but it is not the
|
|
|
|
* only thing that makes it possible.
|
|
|
|
*/
|
|
|
|
if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
|
|
|
|
(RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
|
|
|
|
unprivileged_idprio == 0)) {
|
2007-06-14 23:31:52 +00:00
|
|
|
error = priv_check(td, PRIV_SCHED_RTPRIO);
|
|
|
|
if (error)
|
2006-09-21 04:18:46 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-10-09 02:50:23 +00:00
|
|
|
error = rtp_to_pri(&rtp, td1);
|
2006-09-21 04:18:46 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2004-02-06 19:25:34 +00:00
|
|
|
/*
|
2004-09-24 00:38:15 +00:00
|
|
|
* Set realtime priority.
|
2004-02-06 19:25:34 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-09-01 05:12:53 +00:00
|
|
|
struct rtprio_args {
|
1994-10-02 04:48:21 +00:00
|
|
|
int function;
|
|
|
|
pid_t pid;
|
1995-11-11 01:48:17 +00:00
|
|
|
struct rtprio *rtp;
|
1994-09-01 05:12:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-09-01 05:12:53 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
sys_rtprio(struct thread *td, struct rtprio_args *uap)
|
1994-09-01 05:12:53 +00:00
|
|
|
{
|
2004-05-08 08:56:05 +00:00
|
|
|
struct proc *p;
|
2006-10-26 21:42:22 +00:00
|
|
|
struct thread *tdp;
|
1994-10-02 04:48:21 +00:00
|
|
|
struct rtprio rtp;
|
2004-02-05 20:53:25 +00:00
|
|
|
int cierror, error;
|
1994-10-02 04:48:21 +00:00
|
|
|
|
2002-04-13 23:28:23 +00:00
|
|
|
/* Perform copyin before acquiring locks if needed. */
|
|
|
|
if (uap->function == RTP_SET)
|
|
|
|
cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
|
2004-02-05 20:53:25 +00:00
|
|
|
else
|
|
|
|
cierror = 0;
|
2001-09-01 19:04:37 +00:00
|
|
|
|
2001-04-24 00:51:53 +00:00
|
|
|
if (uap->pid == 0) {
|
2008-03-16 18:26:59 +00:00
|
|
|
p = td->td_proc;
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_LOCK(p);
|
2001-09-01 19:04:37 +00:00
|
|
|
} else {
|
1994-10-02 04:48:21 +00:00
|
|
|
p = pfind(uap->pid);
|
2002-04-13 23:28:23 +00:00
|
|
|
if (p == NULL)
|
|
|
|
return (ESRCH);
|
2001-09-01 19:04:37 +00:00
|
|
|
}
|
1994-09-01 05:12:53 +00:00
|
|
|
|
1994-10-02 04:48:21 +00:00
|
|
|
switch (uap->function) {
|
|
|
|
case RTP_LOOKUP:
|
2002-05-19 00:14:50 +00:00
|
|
|
if ((error = p_cansee(td, p)))
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
2004-05-08 08:56:05 +00:00
|
|
|
/*
|
|
|
|
* Return OUR priority if no pid specified,
|
|
|
|
* or if one is, report the highest priority
|
2008-03-16 21:32:20 +00:00
|
|
|
* in the process. There isn't much more you can do as
|
2004-05-08 08:56:05 +00:00
|
|
|
* there is only room to return a single priority.
|
|
|
|
* Note: specifying our own pid is not the same
|
|
|
|
* as leaving it zero.
|
|
|
|
*/
|
|
|
|
if (uap->pid == 0) {
|
2006-10-26 21:42:22 +00:00
|
|
|
pri_to_rtp(td, &rtp);
|
2004-05-08 08:56:05 +00:00
|
|
|
} else {
|
|
|
|
struct rtprio rtp2;
|
|
|
|
|
|
|
|
rtp.type = RTP_PRIO_IDLE;
|
|
|
|
rtp.prio = RTP_PRIO_MAX;
|
2006-10-26 21:42:22 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, tdp) {
|
|
|
|
pri_to_rtp(tdp, &rtp2);
|
2004-09-24 00:38:15 +00:00
|
|
|
if (rtp2.type < rtp.type ||
|
|
|
|
(rtp2.type == rtp.type &&
|
|
|
|
rtp2.prio < rtp.prio)) {
|
2004-05-08 08:56:05 +00:00
|
|
|
rtp.type = rtp2.type;
|
|
|
|
rtp.prio = rtp2.prio;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2002-04-13 23:28:23 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
|
1994-10-02 04:48:21 +00:00
|
|
|
case RTP_SET:
|
2002-05-19 00:14:50 +00:00
|
|
|
if ((error = p_cansched(td, p)) || (error = cierror))
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
2004-09-24 00:38:15 +00:00
|
|
|
|
2011-12-13 14:00:27 +00:00
|
|
|
/*
|
|
|
|
* Disallow setting rtprio in most cases if not superuser.
|
|
|
|
* See the comment in sys_rtprio_thread about idprio
|
|
|
|
* threads holding a lock.
|
|
|
|
*/
|
|
|
|
if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
|
|
|
|
(RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
|
|
|
|
!unprivileged_idprio)) {
|
2007-06-14 23:31:52 +00:00
|
|
|
error = priv_check(td, PRIV_SCHED_RTPRIO);
|
|
|
|
if (error)
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
1994-10-02 04:48:21 +00:00
|
|
|
}
|
2004-09-24 00:38:15 +00:00
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
/*
|
|
|
|
* If we are setting our own priority, set just our
|
|
|
|
* thread but if we are doing another process,
|
|
|
|
* do all the threads on that process. If we
|
|
|
|
* specify our own pid we do the latter.
|
|
|
|
*/
|
2004-05-08 08:56:05 +00:00
|
|
|
if (uap->pid == 0) {
|
2006-10-26 21:42:22 +00:00
|
|
|
error = rtp_to_pri(&rtp, td);
|
2004-05-08 08:56:05 +00:00
|
|
|
} else {
|
2006-10-26 21:42:22 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
|
|
|
if ((error = rtp_to_pri(&rtp, td)) != 0)
|
|
|
|
break;
|
2004-05-08 08:56:05 +00:00
|
|
|
}
|
|
|
|
}
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
1994-10-02 04:48:21 +00:00
|
|
|
default:
|
2001-04-24 00:51:53 +00:00
|
|
|
error = EINVAL;
|
2001-04-29 22:09:26 +00:00
|
|
|
break;
|
1994-10-02 04:48:21 +00:00
|
|
|
}
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
1994-10-02 04:48:21 +00:00
|
|
|
}
|
1994-09-01 05:12:53 +00:00
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
int
|
2006-10-26 21:42:22 +00:00
|
|
|
rtp_to_pri(struct rtprio *rtp, struct thread *td)
|
2001-02-12 00:20:08 +00:00
|
|
|
{
|
Call sched_prio() to immediately change the priority of the thread in
response to an rtprio_thread() call, when the priority is different
than the old priority, and either the old or the new priority class is
not RTP_PRIO_NORMAL (timeshare).
The reasoning for the second half of the test is that if it's a change in
timeshare priority, then the scheduler is going to adjust that priority
in a way that completely wipes out the requested change anyway, so
what's the point? (If that's not true, then allowing a thread to change
its own timeshare priority would subvert the scheduler's adjustments and
let a cpu-bound thread monopolize the cpu; if allowed at all, that
should require priveleges.)
On the other hand, if either the old or new priority class is not
timeshare, then the scheduler doesn't make automatic adjustments, so we
should honor the request and make the priority change right away. The
reason the old class gets caught up in this is the very reason for this
change: when thread A changes the priority of its child thread B from
idle back to timeshare, thread B never actually gets moved to a
timeshare-range run queue unless there are some idle cycles available
to allow it to first get scheduled again as an idle thread.
Reviewed by: jhb@
2013-03-07 02:53:29 +00:00
|
|
|
u_char newpri, oldclass, oldpri;
|
2001-02-12 00:20:08 +00:00
|
|
|
|
|
|
|
switch (RTP_PRIO_BASE(rtp->type)) {
|
|
|
|
case RTP_PRIO_REALTIME:
|
2010-12-17 16:29:06 +00:00
|
|
|
if (rtp->prio > RTP_PRIO_MAX)
|
2010-03-03 21:46:51 +00:00
|
|
|
return (EINVAL);
|
2006-11-20 05:50:59 +00:00
|
|
|
newpri = PRI_MIN_REALTIME + rtp->prio;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case RTP_PRIO_NORMAL:
|
2010-12-17 16:29:06 +00:00
|
|
|
if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
|
2010-03-03 21:46:51 +00:00
|
|
|
return (EINVAL);
|
2006-11-20 05:50:59 +00:00
|
|
|
newpri = PRI_MIN_TIMESHARE + rtp->prio;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case RTP_PRIO_IDLE:
|
2010-12-17 16:29:06 +00:00
|
|
|
if (rtp->prio > RTP_PRIO_MAX)
|
|
|
|
return (EINVAL);
|
2006-11-20 05:50:59 +00:00
|
|
|
newpri = PRI_MIN_IDLE + rtp->prio;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
default:
|
2001-04-29 22:09:26 +00:00
|
|
|
return (EINVAL);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
2010-12-17 16:29:06 +00:00
|
|
|
|
|
|
|
thread_lock(td);
|
Call sched_prio() to immediately change the priority of the thread in
response to an rtprio_thread() call, when the priority is different
than the old priority, and either the old or the new priority class is
not RTP_PRIO_NORMAL (timeshare).
The reasoning for the second half of the test is that if it's a change in
timeshare priority, then the scheduler is going to adjust that priority
in a way that completely wipes out the requested change anyway, so
what's the point? (If that's not true, then allowing a thread to change
its own timeshare priority would subvert the scheduler's adjustments and
let a cpu-bound thread monopolize the cpu; if allowed at all, that
should require priveleges.)
On the other hand, if either the old or new priority class is not
timeshare, then the scheduler doesn't make automatic adjustments, so we
should honor the request and make the priority change right away. The
reason the old class gets caught up in this is the very reason for this
change: when thread A changes the priority of its child thread B from
idle back to timeshare, thread B never actually gets moved to a
timeshare-range run queue unless there are some idle cycles available
to allow it to first get scheduled again as an idle thread.
Reviewed by: jhb@
2013-03-07 02:53:29 +00:00
|
|
|
oldclass = td->td_pri_class;
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_class(td, rtp->type); /* XXX fix */
|
2007-12-11 08:25:36 +00:00
|
|
|
oldpri = td->td_user_pri;
|
2006-11-20 05:50:59 +00:00
|
|
|
sched_user_prio(td, newpri);
|
Call sched_prio() to immediately change the priority of the thread in
response to an rtprio_thread() call, when the priority is different
than the old priority, and either the old or the new priority class is
not RTP_PRIO_NORMAL (timeshare).
The reasoning for the second half of the test is that if it's a change in
timeshare priority, then the scheduler is going to adjust that priority
in a way that completely wipes out the requested change anyway, so
what's the point? (If that's not true, then allowing a thread to change
its own timeshare priority would subvert the scheduler's adjustments and
let a cpu-bound thread monopolize the cpu; if allowed at all, that
should require priveleges.)
On the other hand, if either the old or new priority class is not
timeshare, then the scheduler doesn't make automatic adjustments, so we
should honor the request and make the priority change right away. The
reason the old class gets caught up in this is the very reason for this
change: when thread A changes the priority of its child thread B from
idle back to timeshare, thread B never actually gets moved to a
timeshare-range run queue unless there are some idle cycles available
to allow it to first get scheduled again as an idle thread.
Reviewed by: jhb@
2013-03-07 02:53:29 +00:00
|
|
|
if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
|
|
|
|
td->td_pri_class != RTP_PRIO_NORMAL))
|
2011-12-02 19:59:46 +00:00
|
|
|
sched_prio(td, td->td_user_pri);
|
2007-12-11 08:25:36 +00:00
|
|
|
if (TD_ON_UPILOCK(td) && oldpri != newpri) {
|
2010-12-29 09:26:46 +00:00
|
|
|
critical_enter();
|
2007-12-11 08:25:36 +00:00
|
|
|
thread_unlock(td);
|
|
|
|
umtx_pi_adjust(td, oldpri);
|
2010-12-29 09:26:46 +00:00
|
|
|
critical_exit();
|
2007-12-11 08:25:36 +00:00
|
|
|
} else
|
|
|
|
thread_unlock(td);
|
2001-02-12 00:20:08 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-10-26 21:42:22 +00:00
|
|
|
pri_to_rtp(struct thread *td, struct rtprio *rtp)
|
2001-02-12 00:20:08 +00:00
|
|
|
{
|
|
|
|
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2006-10-26 21:42:22 +00:00
|
|
|
switch (PRI_BASE(td->td_pri_class)) {
|
2001-02-12 00:20:08 +00:00
|
|
|
case PRI_REALTIME:
|
2006-11-20 05:50:59 +00:00
|
|
|
rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case PRI_TIMESHARE:
|
2006-11-20 05:50:59 +00:00
|
|
|
rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
case PRI_IDLE:
|
2006-11-20 05:50:59 +00:00
|
|
|
rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
|
2001-02-12 00:20:08 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2006-10-26 21:42:22 +00:00
|
|
|
rtp->type = td->td_pri_class;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
|
|
|
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-11-11 01:48:17 +00:00
|
|
|
struct osetrlimit_args {
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int which;
|
1995-11-11 01:48:17 +00:00
|
|
|
struct orlimit *rlp;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
osetrlimit(struct thread *td, struct osetrlimit_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct orlimit olim;
|
|
|
|
struct rlimit lim;
|
|
|
|
int error;
|
|
|
|
|
2002-06-29 02:00:02 +00:00
|
|
|
if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
lim.rlim_cur = olim.rlim_cur;
|
|
|
|
lim.rlim_max = olim.rlim_max;
|
2004-02-04 21:52:57 +00:00
|
|
|
error = kern_setrlimit(td, uap->which, &lim);
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-11-11 01:48:17 +00:00
|
|
|
struct ogetrlimit_args {
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int which;
|
|
|
|
struct orlimit *rlp;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
ogetrlimit(struct thread *td, struct ogetrlimit_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct orlimit olim;
|
2004-02-05 20:53:25 +00:00
|
|
|
struct rlimit rl;
|
2001-09-01 19:04:37 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (uap->which >= RLIM_NLIMITS)
|
|
|
|
return (EINVAL);
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit(td, uap->which, &rl);
|
2004-02-06 19:30:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX would be more correct to convert only RLIM_INFINITY to the
|
|
|
|
* old RLIM_INFINITY and fail with EOVERFLOW for other larger
|
|
|
|
* values. Most 64->32 and 32->16 conversions, including not
|
|
|
|
* unimportant ones of uids are even more broken than what we
|
|
|
|
* do here (they blindly truncate). We don't do this correctly
|
|
|
|
* here since we have little experience with EOVERFLOW yet.
|
|
|
|
* Elsewhere, getuid() can't fail...
|
|
|
|
*/
|
|
|
|
olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
|
|
|
|
olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
|
2002-06-29 02:00:02 +00:00
|
|
|
error = copyout(&olim, uap->rlp, sizeof(olim));
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-06-11 11:16:26 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct __setrlimit_args {
|
|
|
|
u_int which;
|
1995-11-11 01:48:17 +00:00
|
|
|
struct rlimit *rlp;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
sys_setrlimit(struct thread *td, struct __setrlimit_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct rlimit alim;
|
|
|
|
int error;
|
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2004-02-04 21:52:57 +00:00
|
|
|
error = kern_setrlimit(td, uap->which, &alim);
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
static void
|
|
|
|
lim_cb(void *arg)
|
|
|
|
{
|
|
|
|
struct rlimit rlim;
|
|
|
|
struct thread *td;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = arg;
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
/*
|
|
|
|
* Check if the process exceeds its cpu resource allocation. If
|
|
|
|
* it reaches the max, arrange to kill the process in ast().
|
|
|
|
*/
|
|
|
|
if (p->p_cpulimit == RLIM_INFINITY)
|
|
|
|
return;
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATLOCK(p);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
2010-05-04 05:55:37 +00:00
|
|
|
ruxagg(p, td);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
}
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATUNLOCK(p);
|
2007-06-01 01:12:45 +00:00
|
|
|
if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit_proc(p, RLIMIT_CPU, &rlim);
|
2007-06-01 01:12:45 +00:00
|
|
|
if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
|
|
|
|
killproc(p, "exceeded maximum CPU limit");
|
|
|
|
} else {
|
|
|
|
if (p->p_cpulimit < rlim.rlim_max)
|
|
|
|
p->p_cpulimit += 5;
|
2011-09-16 13:58:51 +00:00
|
|
|
kern_psignal(p, SIGXCPU);
|
2007-06-01 01:12:45 +00:00
|
|
|
}
|
|
|
|
}
|
2008-10-24 01:09:24 +00:00
|
|
|
if ((p->p_flag & P_WEXIT) == 0)
|
2013-03-04 16:25:12 +00:00
|
|
|
callout_reset_sbt(&p->p_limco, SBT_1S, 0,
|
|
|
|
lim_cb, p, C_PREL(1));
|
2007-06-01 01:12:45 +00:00
|
|
|
}
|
|
|
|
|
1999-01-30 06:25:00 +00:00
|
|
|
int
|
2012-01-22 20:25:00 +00:00
|
|
|
kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (kern_proc_setrlimit(td, td->td_proc, which, limp));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
|
|
|
|
struct rlimit *limp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-02-04 21:52:57 +00:00
|
|
|
struct plimit *newlim, *oldlim;
|
2017-05-17 00:34:34 +00:00
|
|
|
struct rlimit *alimp;
|
2007-07-12 18:01:31 +00:00
|
|
|
struct rlimit oldssiz;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (which >= RLIM_NLIMITS)
|
|
|
|
return (EINVAL);
|
1994-12-06 22:53:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Preserve historical bugs by treating negative limits as unsigned.
|
|
|
|
*/
|
|
|
|
if (limp->rlim_cur < 0)
|
|
|
|
limp->rlim_cur = RLIM_INFINITY;
|
|
|
|
if (limp->rlim_max < 0)
|
|
|
|
limp->rlim_max = RLIM_INFINITY;
|
|
|
|
|
2007-07-12 18:01:31 +00:00
|
|
|
oldssiz.rlim_cur = 0;
|
2015-06-10 10:48:12 +00:00
|
|
|
newlim = lim_alloc();
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
oldlim = p->p_limit;
|
|
|
|
alimp = &oldlim->pl_rlimit[which];
|
1995-05-30 08:16:23 +00:00
|
|
|
if (limp->rlim_cur > alimp->rlim_max ||
|
1994-05-24 10:09:53 +00:00
|
|
|
limp->rlim_max > alimp->rlim_max)
|
2007-06-12 00:12:01 +00:00
|
|
|
if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_free(newlim);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2004-09-24 00:38:15 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (limp->rlim_cur > limp->rlim_max)
|
|
|
|
limp->rlim_cur = limp->rlim_max;
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_copy(newlim, oldlim);
|
|
|
|
alimp = &newlim->pl_rlimit[which];
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
switch (which) {
|
1998-05-28 09:30:28 +00:00
|
|
|
case RLIMIT_CPU:
|
2007-06-01 01:12:45 +00:00
|
|
|
if (limp->rlim_cur != RLIM_INFINITY &&
|
|
|
|
p->p_cpulimit == RLIM_INFINITY)
|
2013-03-04 16:25:12 +00:00
|
|
|
callout_reset_sbt(&p->p_limco, SBT_1S, 0,
|
|
|
|
lim_cb, p, C_PREL(1));
|
2002-10-09 17:17:24 +00:00
|
|
|
p->p_cpulimit = limp->rlim_cur;
|
1998-05-28 09:30:28 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
case RLIMIT_DATA:
|
2001-10-10 23:06:54 +00:00
|
|
|
if (limp->rlim_cur > maxdsiz)
|
|
|
|
limp->rlim_cur = maxdsiz;
|
|
|
|
if (limp->rlim_max > maxdsiz)
|
|
|
|
limp->rlim_max = maxdsiz;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RLIMIT_STACK:
|
2001-10-10 23:06:54 +00:00
|
|
|
if (limp->rlim_cur > maxssiz)
|
|
|
|
limp->rlim_cur = maxssiz;
|
|
|
|
if (limp->rlim_max > maxssiz)
|
|
|
|
limp->rlim_max = maxssiz;
|
2007-07-12 18:01:31 +00:00
|
|
|
oldssiz = *alimp;
|
2010-10-18 15:46:58 +00:00
|
|
|
if (p->p_sysent->sv_fixlimit != NULL)
|
|
|
|
p->p_sysent->sv_fixlimit(&oldssiz,
|
2007-07-12 18:01:31 +00:00
|
|
|
RLIMIT_STACK);
|
2004-02-04 21:52:57 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RLIMIT_NOFILE:
|
|
|
|
if (limp->rlim_cur > maxfilesperproc)
|
|
|
|
limp->rlim_cur = maxfilesperproc;
|
|
|
|
if (limp->rlim_max > maxfilesperproc)
|
|
|
|
limp->rlim_max = maxfilesperproc;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RLIMIT_NPROC:
|
|
|
|
if (limp->rlim_cur > maxprocperuid)
|
|
|
|
limp->rlim_cur = maxprocperuid;
|
|
|
|
if (limp->rlim_max > maxprocperuid)
|
|
|
|
limp->rlim_max = maxprocperuid;
|
|
|
|
if (limp->rlim_cur < 1)
|
|
|
|
limp->rlim_cur = 1;
|
|
|
|
if (limp->rlim_max < 1)
|
|
|
|
limp->rlim_max = 1;
|
|
|
|
break;
|
|
|
|
}
|
2010-10-18 15:46:58 +00:00
|
|
|
if (p->p_sysent->sv_fixlimit != NULL)
|
|
|
|
p->p_sysent->sv_fixlimit(limp, which);
|
2004-02-04 21:52:57 +00:00
|
|
|
*alimp = *limp;
|
2015-06-10 10:48:12 +00:00
|
|
|
p->p_limit = newlim;
|
|
|
|
PROC_UPDATE_COW(p);
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_free(oldlim);
|
2004-02-04 21:52:57 +00:00
|
|
|
|
2015-04-15 08:13:53 +00:00
|
|
|
if (which == RLIMIT_STACK &&
|
|
|
|
/*
|
|
|
|
* Skip calls from exec_new_vmspace(), done when stack is
|
|
|
|
* not mapped yet.
|
|
|
|
*/
|
|
|
|
(td != curthread || (p->p_flag & P_INEXEC) == 0)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Stack is allocated to the max at exec time with only
|
|
|
|
* "rlim_cur" bytes accessible. If stack limit is going
|
|
|
|
* up make more accessible, if going down make inaccessible.
|
|
|
|
*/
|
2007-07-12 18:01:31 +00:00
|
|
|
if (limp->rlim_cur != oldssiz.rlim_cur) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vm_offset_t addr;
|
|
|
|
vm_size_t size;
|
|
|
|
vm_prot_t prot;
|
|
|
|
|
2007-07-12 18:01:31 +00:00
|
|
|
if (limp->rlim_cur > oldssiz.rlim_cur) {
|
2002-09-21 22:07:17 +00:00
|
|
|
prot = p->p_sysent->sv_stackprot;
|
2007-07-12 18:01:31 +00:00
|
|
|
size = limp->rlim_cur - oldssiz.rlim_cur;
|
2002-09-21 22:07:17 +00:00
|
|
|
addr = p->p_sysent->sv_usrstack -
|
|
|
|
limp->rlim_cur;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
|
|
|
prot = VM_PROT_NONE;
|
2007-07-12 18:01:31 +00:00
|
|
|
size = oldssiz.rlim_cur - limp->rlim_cur;
|
|
|
|
addr = p->p_sysent->sv_usrstack -
|
|
|
|
oldssiz.rlim_cur;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
addr = trunc_page(addr);
|
|
|
|
size = round_page(size);
|
2004-09-24 00:38:15 +00:00
|
|
|
(void)vm_map_protect(&p->p_vmspace->vm_map,
|
|
|
|
addr, addr + size, prot, FALSE);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2005-11-02 21:18:07 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct __getrlimit_args {
|
|
|
|
u_int which;
|
|
|
|
struct rlimit *rlp;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
sys_getrlimit(struct thread *td, struct __getrlimit_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-02-04 21:52:57 +00:00
|
|
|
struct rlimit rlim;
|
2004-02-05 20:53:25 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (uap->which >= RLIM_NLIMITS)
|
|
|
|
return (EINVAL);
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit(td, uap->which, &rlim);
|
2004-02-05 20:53:25 +00:00
|
|
|
error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
|
2004-09-24 00:38:15 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2006-02-21 21:47:46 +00:00
|
|
|
/*
|
2006-02-22 16:58:48 +00:00
|
|
|
* Transform the running time and tick information for children of proc p
|
|
|
|
* into user and system time usage.
|
2006-02-21 21:47:46 +00:00
|
|
|
*/
|
2004-10-05 18:51:11 +00:00
|
|
|
void
|
2014-10-27 20:18:30 +00:00
|
|
|
calccru(struct proc *p, struct timeval *up, struct timeval *sp)
|
2004-10-05 18:51:11 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2006-02-21 21:47:46 +00:00
|
|
|
calcru1(p, &p->p_crux, up, sp);
|
2004-10-05 18:51:11 +00:00
|
|
|
}
|
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
/*
|
2006-02-22 16:58:48 +00:00
|
|
|
* Transform the running time and tick information in proc p into user
|
|
|
|
* and system time usage. If appropriate, include the current time slice
|
|
|
|
* on this CPU.
|
2006-02-11 09:33:07 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
calcru(struct proc *p, struct timeval *up, struct timeval *sp)
|
2004-10-05 18:51:11 +00:00
|
|
|
{
|
2006-02-22 16:58:48 +00:00
|
|
|
struct thread *td;
|
2011-07-18 17:33:08 +00:00
|
|
|
uint64_t runtime, u;
|
2003-02-17 02:19:58 +00:00
|
|
|
|
2006-02-11 09:33:07 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATLOCK_ASSERT(p, MA_OWNED);
|
2006-02-21 21:47:46 +00:00
|
|
|
/*
|
|
|
|
* If we are getting stats for the current process, then add in the
|
|
|
|
* stats that this thread has accumulated in its current time slice.
|
|
|
|
* We reset the thread and CPU state as if we had performed a context
|
|
|
|
* switch right here.
|
|
|
|
*/
|
2007-06-01 01:12:45 +00:00
|
|
|
td = curthread;
|
|
|
|
if (td->td_proc == p) {
|
2006-02-11 09:33:07 +00:00
|
|
|
u = cpu_ticks();
|
2011-07-18 17:33:08 +00:00
|
|
|
runtime = u - PCPU_GET(switchtime);
|
|
|
|
td->td_runtime += runtime;
|
|
|
|
td->td_incruntime += runtime;
|
2006-02-11 09:33:07 +00:00
|
|
|
PCPU_SET(switchtime, u);
|
|
|
|
}
|
2007-07-17 01:08:09 +00:00
|
|
|
/* Make sure the per-thread stats are current. */
|
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
2008-01-10 22:11:20 +00:00
|
|
|
if (td->td_incruntime == 0)
|
2007-07-17 01:08:09 +00:00
|
|
|
continue;
|
2010-05-04 05:55:37 +00:00
|
|
|
ruxagg(p, td);
|
2007-07-17 01:08:09 +00:00
|
|
|
}
|
2007-06-09 21:48:44 +00:00
|
|
|
calcru1(p, &p->p_rux, up, sp);
|
2006-02-21 21:47:46 +00:00
|
|
|
}
|
|
|
|
|
2011-07-18 17:33:08 +00:00
|
|
|
/* Collect resource usage for a single thread. */
|
|
|
|
void
|
|
|
|
rufetchtd(struct thread *td, struct rusage *ru)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
uint64_t runtime, u;
|
|
|
|
|
|
|
|
p = td->td_proc;
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATLOCK_ASSERT(p, MA_OWNED);
|
2011-07-18 17:33:08 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
|
|
|
/*
|
|
|
|
* If we are getting stats for the current thread, then add in the
|
|
|
|
* stats that this thread has accumulated in its current time slice.
|
|
|
|
* We reset the thread and CPU state as if we had performed a context
|
|
|
|
* switch right here.
|
|
|
|
*/
|
|
|
|
if (td == curthread) {
|
|
|
|
u = cpu_ticks();
|
|
|
|
runtime = u - PCPU_GET(switchtime);
|
|
|
|
td->td_runtime += runtime;
|
|
|
|
td->td_incruntime += runtime;
|
|
|
|
PCPU_SET(switchtime, u);
|
|
|
|
}
|
2019-12-15 21:11:15 +00:00
|
|
|
ruxagg_locked(p, td);
|
2011-07-18 17:33:08 +00:00
|
|
|
*ru = td->td_ru;
|
|
|
|
calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
|
|
|
|
}
|
|
|
|
|
Finish the fix for overflow in calcru1().
The previous fix was unnecessarily very slow up to 105 hours where the
simple formula used previously worked, and unnecessarily slow by a factor
of about 5/3 up to 388 days, and didn't work above 388 days. 388 days is
not a long time, since it is a reasonable uptime, and for processes the
times being calculated are aggregated over all threads, so with N CPUs
running the same thread a runtime of 388 days is reachable after only
388 / N physical days.
The PRs document overflow at 388 days, but don't try to fix it.
Use the simple formula up to 76 hours. Then use a complicated general
method that reduces to the simple formula up to a bit less than 105
hours, then reduces to the previous method without its extra work up
to almost 388 days, then does more complicated reductions, usually
many bits at a time so that this is not slow. This works up to half
of maximum representable time (292271 years), with accumulated rounding
errors of at most 32 usec.
amd64 can do all this with no avoidable rounding errors in an inline
asm with 2 instructions, but this is too special to use. __uint128_t
can do the same with 100's of instructions on 64-bit arches. Long
doubles with at least 64 bits of precision are the easiest method to
use on i386 userland, but are hard to use in the kernel.
PR: 76972 and duplicates
Reviewed by: kib
2019-02-14 19:07:08 +00:00
|
|
|
/* XXX: the MI version is too slow to use: */
|
|
|
|
#ifndef __HAVE_INLINE_FLSLL
|
|
|
|
#define flsll(x) (fls((x) >> 32) != 0 ? fls((x) >> 32) + 32 : fls(x))
|
|
|
|
#endif
|
|
|
|
|
2019-02-10 23:07:46 +00:00
|
|
|
static uint64_t
|
|
|
|
mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
|
|
|
|
{
|
Finish the fix for overflow in calcru1().
The previous fix was unnecessarily very slow up to 105 hours where the
simple formula used previously worked, and unnecessarily slow by a factor
of about 5/3 up to 388 days, and didn't work above 388 days. 388 days is
not a long time, since it is a reasonable uptime, and for processes the
times being calculated are aggregated over all threads, so with N CPUs
running the same thread a runtime of 388 days is reachable after only
388 / N physical days.
The PRs document overflow at 388 days, but don't try to fix it.
Use the simple formula up to 76 hours. Then use a complicated general
method that reduces to the simple formula up to a bit less than 105
hours, then reduces to the previous method without its extra work up
to almost 388 days, then does more complicated reductions, usually
many bits at a time so that this is not slow. This works up to half
of maximum representable time (292271 years), with accumulated rounding
errors of at most 32 usec.
amd64 can do all this with no avoidable rounding errors in an inline
asm with 2 instructions, but this is too special to use. __uint128_t
can do the same with 100's of instructions on 64-bit arches. Long
doubles with at least 64 bits of precision are the easiest method to
use on i386 userland, but are hard to use in the kernel.
PR: 76972 and duplicates
Reviewed by: kib
2019-02-14 19:07:08 +00:00
|
|
|
uint64_t acc, bh, bl;
|
|
|
|
int i, s, sa, sb;
|
|
|
|
|
2019-02-10 23:07:46 +00:00
|
|
|
/*
|
Finish the fix for overflow in calcru1().
The previous fix was unnecessarily very slow up to 105 hours where the
simple formula used previously worked, and unnecessarily slow by a factor
of about 5/3 up to 388 days, and didn't work above 388 days. 388 days is
not a long time, since it is a reasonable uptime, and for processes the
times being calculated are aggregated over all threads, so with N CPUs
running the same thread a runtime of 388 days is reachable after only
388 / N physical days.
The PRs document overflow at 388 days, but don't try to fix it.
Use the simple formula up to 76 hours. Then use a complicated general
method that reduces to the simple formula up to a bit less than 105
hours, then reduces to the previous method without its extra work up
to almost 388 days, then does more complicated reductions, usually
many bits at a time so that this is not slow. This works up to half
of maximum representable time (292271 years), with accumulated rounding
errors of at most 32 usec.
amd64 can do all this with no avoidable rounding errors in an inline
asm with 2 instructions, but this is too special to use. __uint128_t
can do the same with 100's of instructions on 64-bit arches. Long
doubles with at least 64 bits of precision are the easiest method to
use on i386 userland, but are hard to use in the kernel.
PR: 76972 and duplicates
Reviewed by: kib
2019-02-14 19:07:08 +00:00
|
|
|
* Calculate (a * b) / c accurately enough without overflowing. c
|
|
|
|
* must be nonzero, and its top bit must be 0. a or b must be
|
|
|
|
* <= c, and the implementation is tuned for b <= c.
|
|
|
|
*
|
|
|
|
* The comments about times are for use in calcru1() with units of
|
|
|
|
* microseconds for 'a' and stathz ticks at 128 Hz for b and c.
|
|
|
|
*
|
|
|
|
* Let n be the number of top zero bits in c. Each iteration
|
|
|
|
* either returns, or reduces b by right shifting it by at least n.
|
|
|
|
* The number of iterations is at most 1 + 64 / n, and the error is
|
|
|
|
* at most the number of iterations.
|
|
|
|
*
|
|
|
|
* It is very unusual to need even 2 iterations. Previous
|
|
|
|
* implementations overflowed essentially by returning early in the
|
|
|
|
* first iteration, with n = 38 giving overflow at 105+ hours and
|
|
|
|
* n = 32 giving overlow at at 388+ days despite a more careful
|
|
|
|
* calculation. 388 days is a reasonable uptime, and the calculation
|
|
|
|
* needs to work for the uptime times the number of CPUs since 'a'
|
|
|
|
* is per-process.
|
2019-02-10 23:07:46 +00:00
|
|
|
*/
|
Finish the fix for overflow in calcru1().
The previous fix was unnecessarily very slow up to 105 hours where the
simple formula used previously worked, and unnecessarily slow by a factor
of about 5/3 up to 388 days, and didn't work above 388 days. 388 days is
not a long time, since it is a reasonable uptime, and for processes the
times being calculated are aggregated over all threads, so with N CPUs
running the same thread a runtime of 388 days is reachable after only
388 / N physical days.
The PRs document overflow at 388 days, but don't try to fix it.
Use the simple formula up to 76 hours. Then use a complicated general
method that reduces to the simple formula up to a bit less than 105
hours, then reduces to the previous method without its extra work up
to almost 388 days, then does more complicated reductions, usually
many bits at a time so that this is not slow. This works up to half
of maximum representable time (292271 years), with accumulated rounding
errors of at most 32 usec.
amd64 can do all this with no avoidable rounding errors in an inline
asm with 2 instructions, but this is too special to use. __uint128_t
can do the same with 100's of instructions on 64-bit arches. Long
doubles with at least 64 bits of precision are the easiest method to
use on i386 userland, but are hard to use in the kernel.
PR: 76972 and duplicates
Reviewed by: kib
2019-02-14 19:07:08 +00:00
|
|
|
if (a >= (uint64_t)1 << 63)
|
|
|
|
return (0); /* Unsupported arg -- can't happen. */
|
|
|
|
acc = 0;
|
|
|
|
for (i = 0; i < 128; i++) {
|
|
|
|
sa = flsll(a);
|
|
|
|
sb = flsll(b);
|
|
|
|
if (sa + sb <= 64)
|
|
|
|
/* Up to 105 hours on first iteration. */
|
|
|
|
return (acc + (a * b) / c);
|
|
|
|
if (a >= c) {
|
|
|
|
/*
|
|
|
|
* This reduction is based on a = q * c + r, with the
|
|
|
|
* remainder r < c. 'a' may be large to start, and
|
|
|
|
* moving bits from b into 'a' at the end of the loop
|
|
|
|
* sets the top bit of 'a', so the reduction makes
|
|
|
|
* significant progress.
|
|
|
|
*/
|
|
|
|
acc += (a / c) * b;
|
|
|
|
a %= c;
|
|
|
|
sa = flsll(a);
|
|
|
|
if (sa + sb <= 64)
|
|
|
|
/* Up to 388 days on first iteration. */
|
|
|
|
return (acc + (a * b) / c);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This step writes a * b as a * ((bh << s) + bl) =
|
|
|
|
* a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2
|
|
|
|
* additive terms are handled separately. Splitting in
|
|
|
|
* this way is linear except for rounding errors.
|
|
|
|
*
|
|
|
|
* s = 64 - sa is the maximum such that a << s fits in 64
|
|
|
|
* bits. Since a < c and c has at least 1 zero top bit,
|
|
|
|
* sa < 64 and s > 0. Thus this step makes progress by
|
|
|
|
* reducing b (it increases 'a', but taking remainders on
|
|
|
|
* the next iteration completes the reduction).
|
|
|
|
*
|
|
|
|
* Finally, the choice for s is just what is needed to keep
|
|
|
|
* a * bl from overflowing, so we don't need complications
|
|
|
|
* like a recursive call mul64_by_fraction(a, bl, c) to
|
|
|
|
* handle the second additive term.
|
|
|
|
*/
|
|
|
|
s = 64 - sa;
|
|
|
|
bh = b >> s;
|
|
|
|
bl = b - (bh << s);
|
|
|
|
acc += (a * bl) / c;
|
|
|
|
a <<= s;
|
|
|
|
b = bh;
|
|
|
|
}
|
|
|
|
return (0); /* Algorithm failure -- can't happen. */
|
2019-02-10 23:07:46 +00:00
|
|
|
}
|
|
|
|
|
2006-02-21 21:47:46 +00:00
|
|
|
static void
|
|
|
|
calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
|
|
|
|
struct timeval *sp)
|
|
|
|
{
|
2006-03-11 10:48:19 +00:00
|
|
|
/* {user, system, interrupt, total} {ticks, usec}: */
|
2010-06-21 09:55:56 +00:00
|
|
|
uint64_t ut, uu, st, su, it, tt, tu;
|
2006-02-11 09:33:07 +00:00
|
|
|
|
2004-10-05 18:51:11 +00:00
|
|
|
ut = ruxp->rux_uticks;
|
|
|
|
st = ruxp->rux_sticks;
|
|
|
|
it = ruxp->rux_iticks;
|
2003-02-17 02:19:58 +00:00
|
|
|
tt = ut + st + it;
|
|
|
|
if (tt == 0) {
|
2006-03-11 10:48:19 +00:00
|
|
|
/* Avoid divide by zero */
|
2003-02-17 02:19:58 +00:00
|
|
|
st = 1;
|
|
|
|
tt = 1;
|
|
|
|
}
|
2006-03-11 10:48:19 +00:00
|
|
|
tu = cputick2usec(ruxp->rux_runtime);
|
2004-06-21 17:46:27 +00:00
|
|
|
if ((int64_t)tu < 0) {
|
2006-03-11 10:48:19 +00:00
|
|
|
/* XXX: this should be an assert /phk */
|
2004-06-21 17:46:27 +00:00
|
|
|
printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
|
2004-02-06 19:30:12 +00:00
|
|
|
(intmax_t)tu, p->p_pid, p->p_comm);
|
2006-03-11 10:48:19 +00:00
|
|
|
tu = ruxp->rux_tu;
|
2003-02-17 02:19:58 +00:00
|
|
|
}
|
1999-03-13 19:46:13 +00:00
|
|
|
|
Finish the fix for overflow in calcru1().
The previous fix was unnecessarily very slow up to 105 hours where the
simple formula used previously worked, and unnecessarily slow by a factor
of about 5/3 up to 388 days, and didn't work above 388 days. 388 days is
not a long time, since it is a reasonable uptime, and for processes the
times being calculated are aggregated over all threads, so with N CPUs
running the same thread a runtime of 388 days is reachable after only
388 / N physical days.
The PRs document overflow at 388 days, but don't try to fix it.
Use the simple formula up to 76 hours. Then use a complicated general
method that reduces to the simple formula up to a bit less than 105
hours, then reduces to the previous method without its extra work up
to almost 388 days, then does more complicated reductions, usually
many bits at a time so that this is not slow. This works up to half
of maximum representable time (292271 years), with accumulated rounding
errors of at most 32 usec.
amd64 can do all this with no avoidable rounding errors in an inline
asm with 2 instructions, but this is too special to use. __uint128_t
can do the same with 100's of instructions on 64-bit arches. Long
doubles with at least 64 bits of precision are the easiest method to
use on i386 userland, but are hard to use in the kernel.
PR: 76972 and duplicates
Reviewed by: kib
2019-02-14 19:07:08 +00:00
|
|
|
/* Subdivide tu. Avoid overflow in the multiplications. */
|
|
|
|
if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
|
|
|
|
/* Up to 76 hours when stathz is 128. */
|
|
|
|
uu = (tu * ut) / tt;
|
|
|
|
su = (tu * st) / tt;
|
|
|
|
} else {
|
|
|
|
uu = mul64_by_fraction(tu, ut, tt);
|
2019-03-18 12:41:42 +00:00
|
|
|
su = mul64_by_fraction(tu, st, tt);
|
Finish the fix for overflow in calcru1().
The previous fix was unnecessarily very slow up to 105 hours where the
simple formula used previously worked, and unnecessarily slow by a factor
of about 5/3 up to 388 days, and didn't work above 388 days. 388 days is
not a long time, since it is a reasonable uptime, and for processes the
times being calculated are aggregated over all threads, so with N CPUs
running the same thread a runtime of 388 days is reachable after only
388 / N physical days.
The PRs document overflow at 388 days, but don't try to fix it.
Use the simple formula up to 76 hours. Then use a complicated general
method that reduces to the simple formula up to a bit less than 105
hours, then reduces to the previous method without its extra work up
to almost 388 days, then does more complicated reductions, usually
many bits at a time so that this is not slow. This works up to half
of maximum representable time (292271 years), with accumulated rounding
errors of at most 32 usec.
amd64 can do all this with no avoidable rounding errors in an inline
asm with 2 instructions, but this is too special to use. __uint128_t
can do the same with 100's of instructions on 64-bit arches. Long
doubles with at least 64 bits of precision are the easiest method to
use on i386 userland, but are hard to use in the kernel.
PR: 76972 and duplicates
Reviewed by: kib
2019-02-14 19:07:08 +00:00
|
|
|
}
|
|
|
|
|
2006-03-11 10:48:19 +00:00
|
|
|
if (tu >= ruxp->rux_tu) {
|
|
|
|
/*
|
|
|
|
* The normal case, time increased.
|
|
|
|
* Enforce monotonicity of bucketed numbers.
|
|
|
|
*/
|
2004-10-05 18:51:11 +00:00
|
|
|
if (uu < ruxp->rux_uu)
|
|
|
|
uu = ruxp->rux_uu;
|
2006-03-11 10:48:19 +00:00
|
|
|
if (su < ruxp->rux_su)
|
2004-10-05 18:51:11 +00:00
|
|
|
su = ruxp->rux_su;
|
2006-03-11 10:48:19 +00:00
|
|
|
} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
|
2008-03-16 21:32:20 +00:00
|
|
|
/*
|
2006-03-11 10:48:19 +00:00
|
|
|
* When we calibrate the cputicker, it is not uncommon to
|
|
|
|
* see the presumably fixed frequency increase slightly over
|
|
|
|
* time as a result of thermal stabilization and NTP
|
|
|
|
* discipline (of the reference clock). We therefore ignore
|
|
|
|
* a bit of backwards slop because we expect to catch up
|
2008-03-16 21:32:20 +00:00
|
|
|
* shortly. We use a 3 microsecond limit to catch low
|
2006-03-11 10:48:19 +00:00
|
|
|
* counts and a 1% limit for high counts.
|
|
|
|
*/
|
|
|
|
uu = ruxp->rux_uu;
|
|
|
|
su = ruxp->rux_su;
|
|
|
|
tu = ruxp->rux_tu;
|
|
|
|
} else { /* tu < ruxp->rux_tu */
|
|
|
|
/*
|
2008-09-05 15:55:06 +00:00
|
|
|
* What happened here was likely that a laptop, which ran at
|
2006-03-11 10:48:19 +00:00
|
|
|
* a reduced clock frequency at boot, kicked into high gear.
|
|
|
|
* The wisdom of spamming this message in that case is
|
|
|
|
* dubious, but it might also be indicative of something
|
|
|
|
* serious, so lets keep it and hope laptops can be made
|
|
|
|
* more truthful about their CPU speed via ACPI.
|
|
|
|
*/
|
|
|
|
printf("calcru: runtime went backwards from %ju usec "
|
|
|
|
"to %ju usec for pid %d (%s)\n",
|
|
|
|
(uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
|
|
|
|
p->p_pid, p->p_comm);
|
2003-02-17 02:19:58 +00:00
|
|
|
}
|
2006-03-11 10:48:19 +00:00
|
|
|
|
2004-10-05 18:51:11 +00:00
|
|
|
ruxp->rux_uu = uu;
|
|
|
|
ruxp->rux_su = su;
|
2006-03-11 10:48:19 +00:00
|
|
|
ruxp->rux_tu = tu;
|
2003-02-17 02:19:58 +00:00
|
|
|
|
|
|
|
up->tv_sec = uu / 1000000;
|
|
|
|
up->tv_usec = uu % 1000000;
|
|
|
|
sp->tv_sec = su / 1000000;
|
|
|
|
sp->tv_usec = su % 1000000;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getrusage_args {
|
|
|
|
int who;
|
|
|
|
struct rusage *rusage;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
sys_getrusage(struct thread *td, struct getrusage_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-02-06 19:30:12 +00:00
|
|
|
struct rusage ru;
|
2004-10-05 18:51:11 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
error = kern_getrusage(td, uap->who, &ru);
|
|
|
|
if (error == 0)
|
|
|
|
error = copyout(&ru, uap->rusage, sizeof(struct rusage));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2010-05-01 14:46:17 +00:00
|
|
|
kern_getrusage(struct thread *td, int who, struct rusage *rup)
|
2004-10-05 18:51:11 +00:00
|
|
|
{
|
2004-02-06 19:30:12 +00:00
|
|
|
struct proc *p;
|
2008-03-19 06:19:01 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-03-19 06:19:01 +00:00
|
|
|
error = 0;
|
2004-02-06 19:30:12 +00:00
|
|
|
p = td->td_proc;
|
2004-10-05 18:51:11 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
switch (who) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case RUSAGE_SELF:
|
2007-06-09 21:48:44 +00:00
|
|
|
rufetchcalc(p, rup, &rup->ru_utime,
|
|
|
|
&rup->ru_stime);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RUSAGE_CHILDREN:
|
2004-10-05 18:51:11 +00:00
|
|
|
*rup = p->p_stats->p_cru;
|
|
|
|
calccru(p, &rup->ru_utime, &rup->ru_stime);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2010-05-04 05:55:37 +00:00
|
|
|
case RUSAGE_THREAD:
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATLOCK(p);
|
2010-05-04 05:55:37 +00:00
|
|
|
thread_lock(td);
|
2011-07-18 17:33:08 +00:00
|
|
|
rufetchtd(td, rup);
|
2010-05-04 05:55:37 +00:00
|
|
|
thread_unlock(td);
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATUNLOCK(p);
|
2010-05-04 05:55:37 +00:00
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
2008-03-19 06:19:01 +00:00
|
|
|
error = EINVAL;
|
2001-09-01 19:04:37 +00:00
|
|
|
}
|
2004-10-05 18:51:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
2008-03-19 06:19:01 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2007-06-01 01:12:45 +00:00
|
|
|
rucollect(struct rusage *ru, struct rusage *ru2)
|
|
|
|
{
|
|
|
|
long *ip, *ip2;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ru->ru_maxrss < ru2->ru_maxrss)
|
|
|
|
ru->ru_maxrss = ru2->ru_maxrss;
|
|
|
|
ip = &ru->ru_first;
|
|
|
|
ip2 = &ru2->ru_first;
|
|
|
|
for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
|
|
|
|
*ip++ += *ip2++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
|
|
|
|
struct rusage_ext *rux2)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
|
2006-02-07 21:22:02 +00:00
|
|
|
rux->rux_runtime += rux2->rux_runtime;
|
2004-10-05 18:51:11 +00:00
|
|
|
rux->rux_uticks += rux2->rux_uticks;
|
|
|
|
rux->rux_sticks += rux2->rux_sticks;
|
|
|
|
rux->rux_iticks += rux2->rux_iticks;
|
|
|
|
rux->rux_uu += rux2->rux_uu;
|
|
|
|
rux->rux_su += rux2->rux_su;
|
2006-03-11 10:48:19 +00:00
|
|
|
rux->rux_tu += rux2->rux_tu;
|
2007-06-01 01:12:45 +00:00
|
|
|
rucollect(ru, ru2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Aggregate tick counts into the proc's rusage_ext.
|
|
|
|
*/
|
2010-05-24 10:23:49 +00:00
|
|
|
static void
|
2019-12-15 21:11:15 +00:00
|
|
|
ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td)
|
2007-06-01 01:12:45 +00:00
|
|
|
{
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
|
2008-01-10 22:11:20 +00:00
|
|
|
rux->rux_runtime += td->td_incruntime;
|
2007-06-01 01:12:45 +00:00
|
|
|
rux->rux_uticks += td->td_uticks;
|
|
|
|
rux->rux_sticks += td->td_sticks;
|
|
|
|
rux->rux_iticks += td->td_iticks;
|
|
|
|
}
|
|
|
|
|
2010-05-24 10:23:49 +00:00
|
|
|
void
|
2019-12-15 21:11:15 +00:00
|
|
|
ruxagg_locked(struct proc *p, struct thread *td)
|
2010-05-01 14:46:17 +00:00
|
|
|
{
|
2019-12-15 21:11:15 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
|
|
|
PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
|
2010-05-01 14:46:17 +00:00
|
|
|
|
2019-12-15 21:11:15 +00:00
|
|
|
ruxagg_ext_locked(&p->p_rux, td);
|
|
|
|
ruxagg_ext_locked(&td->td_rux, td);
|
2010-05-04 05:55:37 +00:00
|
|
|
td->td_incruntime = 0;
|
|
|
|
td->td_uticks = 0;
|
|
|
|
td->td_iticks = 0;
|
|
|
|
td->td_sticks = 0;
|
2019-12-15 21:11:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ruxagg(struct proc *p, struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
thread_lock(td);
|
|
|
|
ruxagg_locked(p, td);
|
2010-05-01 14:46:17 +00:00
|
|
|
thread_unlock(td);
|
|
|
|
}
|
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
/*
|
|
|
|
* Update the rusage_ext structure and fetch a valid aggregate rusage
|
|
|
|
* for proc p if storage for one is supplied.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rufetch(struct proc *p, struct rusage *ru)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATLOCK_ASSERT(p, MA_OWNED);
|
2007-06-09 21:48:44 +00:00
|
|
|
|
2007-06-09 18:56:11 +00:00
|
|
|
*ru = p->p_ru;
|
|
|
|
if (p->p_numthreads > 0) {
|
2007-06-01 01:12:45 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
2010-05-04 05:55:37 +00:00
|
|
|
ruxagg(p, td);
|
2007-06-01 01:12:45 +00:00
|
|
|
rucollect(ru, &td->td_ru);
|
|
|
|
}
|
2007-06-09 18:56:11 +00:00
|
|
|
}
|
2007-06-09 21:48:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Atomically perform a rufetch and a calcru together.
|
|
|
|
* Consumers, can safely assume the calcru is executed only once
|
|
|
|
* rufetch is completed.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
|
|
|
|
struct timeval *sp)
|
|
|
|
{
|
|
|
|
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATLOCK(p);
|
2007-06-09 21:48:44 +00:00
|
|
|
rufetch(p, ru);
|
|
|
|
calcru(p, up, sp);
|
2014-11-26 14:10:00 +00:00
|
|
|
PROC_STATUNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Allocate a new resource limits structure and initialize its
|
|
|
|
* reference count and mutex pointer.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
struct plimit *
|
2004-02-05 20:53:25 +00:00
|
|
|
lim_alloc()
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
struct plimit *limp;
|
|
|
|
|
2004-09-24 00:38:15 +00:00
|
|
|
limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
|
2005-09-27 18:07:05 +00:00
|
|
|
refcount_init(&limp->pl_refcnt, 1);
|
2004-02-04 21:52:57 +00:00
|
|
|
return (limp);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct plimit *
|
2014-10-27 20:18:30 +00:00
|
|
|
lim_hold(struct plimit *limp)
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
|
2005-09-27 18:07:05 +00:00
|
|
|
refcount_acquire(&limp->pl_refcnt);
|
2004-02-04 21:52:57 +00:00
|
|
|
return (limp);
|
|
|
|
}
|
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
void
|
|
|
|
lim_fork(struct proc *p1, struct proc *p2)
|
|
|
|
{
|
2011-11-07 21:09:04 +00:00
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p1, MA_OWNED);
|
|
|
|
PROC_LOCK_ASSERT(p2, MA_OWNED);
|
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
p2->p_limit = lim_hold(p1->p_limit);
|
|
|
|
callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
|
|
|
|
if (p1->p_cpulimit != RLIM_INFINITY)
|
2013-03-04 16:25:12 +00:00
|
|
|
callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
|
|
|
|
lim_cb, p2, C_PREL(1));
|
2007-06-01 01:12:45 +00:00
|
|
|
}
|
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
void
|
2014-10-27 20:18:30 +00:00
|
|
|
lim_free(struct plimit *limp)
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
|
2005-09-27 18:07:05 +00:00
|
|
|
if (refcount_release(&limp->pl_refcnt))
|
2004-02-04 21:52:57 +00:00
|
|
|
free((void *)limp, M_PLIMIT);
|
|
|
|
}
|
|
|
|
|
2020-11-14 19:21:46 +00:00
|
|
|
void
|
|
|
|
lim_freen(struct plimit *limp, int n)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (refcount_releasen(&limp->pl_refcnt, n))
|
|
|
|
free((void *)limp, M_PLIMIT);
|
|
|
|
}
|
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
/*
|
|
|
|
* Make a copy of the plimit structure.
|
|
|
|
* We share these structures copy-on-write after fork.
|
|
|
|
*/
|
|
|
|
void
|
2014-10-27 20:18:30 +00:00
|
|
|
lim_copy(struct plimit *dst, struct plimit *src)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
|
2015-12-22 21:07:33 +00:00
|
|
|
KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"));
|
2004-02-04 21:52:57 +00:00
|
|
|
bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Return the hard limit for a particular system resource. The
|
|
|
|
* which parameter specifies the index into the rlimit array.
|
2004-02-04 21:52:57 +00:00
|
|
|
*/
|
|
|
|
rlim_t
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_max(struct thread *td, int which)
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
struct rlimit rl;
|
|
|
|
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit(td, which, &rl);
|
|
|
|
return (rl.rlim_max);
|
|
|
|
}
|
|
|
|
|
|
|
|
rlim_t
|
|
|
|
lim_max_proc(struct proc *p, int which)
|
|
|
|
{
|
|
|
|
struct rlimit rl;
|
|
|
|
|
|
|
|
lim_rlimit_proc(p, which, &rl);
|
2004-02-04 21:52:57 +00:00
|
|
|
return (rl.rlim_max);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Return the current (soft) limit for a particular system resource.
|
|
|
|
* The which parameter which specifies the index into the rlimit array
|
2004-02-04 21:52:57 +00:00
|
|
|
*/
|
|
|
|
rlim_t
|
2018-12-11 12:01:46 +00:00
|
|
|
(lim_cur)(struct thread *td, int which)
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
struct rlimit rl;
|
|
|
|
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit(td, which, &rl);
|
|
|
|
return (rl.rlim_cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
rlim_t
|
|
|
|
lim_cur_proc(struct proc *p, int which)
|
|
|
|
{
|
|
|
|
struct rlimit rl;
|
|
|
|
|
|
|
|
lim_rlimit_proc(p, which, &rl);
|
2004-02-11 18:04:13 +00:00
|
|
|
return (rl.rlim_cur);
|
2004-02-04 21:52:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Return a copy of the entire rlimit structure for the system limit
|
|
|
|
* specified by 'which' in the rlimit structure pointed to by 'rlp'.
|
2004-02-04 21:52:57 +00:00
|
|
|
*/
|
|
|
|
void
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
|
|
|
|
{
|
|
|
|
struct proc *p = td->td_proc;
|
|
|
|
|
|
|
|
MPASS(td == curthread);
|
|
|
|
KASSERT(which >= 0 && which < RLIM_NLIMITS,
|
|
|
|
("request for invalid resource limit"));
|
|
|
|
*rlp = td->td_limit->pl_rlimit[which];
|
|
|
|
if (p->p_sysent->sv_fixlimit != NULL)
|
|
|
|
p->p_sysent->sv_fixlimit(rlp, which);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
|
2004-02-04 21:52:57 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
|
|
|
KASSERT(which >= 0 && which < RLIM_NLIMITS,
|
|
|
|
("request for invalid resource limit"));
|
|
|
|
*rlp = p->p_limit->pl_rlimit[which];
|
2007-05-14 22:40:04 +00:00
|
|
|
if (p->p_sysent->sv_fixlimit != NULL)
|
|
|
|
p->p_sysent->sv_fixlimit(rlp, which);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-09-05 22:11:13 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
uihashinit()
|
|
|
|
{
|
2000-11-26 12:08:17 +00:00
|
|
|
|
2000-09-05 22:11:13 +00:00
|
|
|
uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
|
2008-03-16 21:29:02 +00:00
|
|
|
rw_init(&uihashtbl_lock, "uidinfo hash");
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
|
|
|
|
2000-11-26 12:08:17 +00:00
|
|
|
/*
|
2004-02-05 20:53:25 +00:00
|
|
|
* Look up a uidinfo struct for the parameter uid.
|
2008-03-16 21:29:02 +00:00
|
|
|
* uihashtbl_lock must be locked.
|
2014-10-27 20:20:05 +00:00
|
|
|
* Increase refcount on uidinfo struct returned.
|
2000-11-26 12:08:17 +00:00
|
|
|
*/
|
2000-09-05 22:11:13 +00:00
|
|
|
static struct uidinfo *
|
2014-10-27 20:18:30 +00:00
|
|
|
uilookup(uid_t uid)
|
2000-09-05 22:11:13 +00:00
|
|
|
{
|
2004-02-05 20:53:25 +00:00
|
|
|
struct uihashhead *uipp;
|
|
|
|
struct uidinfo *uip;
|
2000-09-05 22:11:13 +00:00
|
|
|
|
2008-03-16 21:29:02 +00:00
|
|
|
rw_assert(&uihashtbl_lock, RA_LOCKED);
|
2000-09-05 22:11:13 +00:00
|
|
|
uipp = UIHASH(uid);
|
|
|
|
LIST_FOREACH(uip, uipp, ui_hash)
|
2014-10-27 20:20:05 +00:00
|
|
|
if (uip->ui_uid == uid) {
|
|
|
|
uihold(uip);
|
2000-09-05 22:11:13 +00:00
|
|
|
break;
|
2014-10-27 20:20:05 +00:00
|
|
|
}
|
2000-09-05 22:11:13 +00:00
|
|
|
|
|
|
|
return (uip);
|
|
|
|
}
|
|
|
|
|
2000-11-26 12:08:17 +00:00
|
|
|
/*
|
2000-12-01 06:59:18 +00:00
|
|
|
* Find or allocate a struct uidinfo for a particular uid.
|
2014-10-27 20:20:05 +00:00
|
|
|
* Returns with uidinfo struct referenced.
|
2000-11-26 12:08:17 +00:00
|
|
|
* uifree() should be called on a struct uidinfo when released.
|
|
|
|
*/
|
2000-09-05 22:11:13 +00:00
|
|
|
struct uidinfo *
|
2014-10-27 20:18:30 +00:00
|
|
|
uifind(uid_t uid)
|
2000-09-05 22:11:13 +00:00
|
|
|
{
|
2014-10-27 20:20:05 +00:00
|
|
|
struct uidinfo *new_uip, *uip;
|
2017-11-01 05:51:20 +00:00
|
|
|
struct ucred *cred;
|
|
|
|
|
|
|
|
cred = curthread->td_ucred;
|
|
|
|
if (cred->cr_uidinfo->ui_uid == uid) {
|
|
|
|
uip = cred->cr_uidinfo;
|
|
|
|
uihold(uip);
|
|
|
|
return (uip);
|
|
|
|
} else if (cred->cr_ruidinfo->ui_uid == uid) {
|
|
|
|
uip = cred->cr_ruidinfo;
|
|
|
|
uihold(uip);
|
|
|
|
return (uip);
|
|
|
|
}
|
2000-09-05 22:11:13 +00:00
|
|
|
|
2008-03-16 21:29:02 +00:00
|
|
|
rw_rlock(&uihashtbl_lock);
|
2000-09-05 22:11:13 +00:00
|
|
|
uip = uilookup(uid);
|
2014-10-27 20:20:05 +00:00
|
|
|
rw_runlock(&uihashtbl_lock);
|
|
|
|
if (uip != NULL)
|
|
|
|
return (uip);
|
|
|
|
|
|
|
|
new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
|
|
|
|
racct_create(&new_uip->ui_racct);
|
|
|
|
refcount_init(&new_uip->ui_ref, 1);
|
|
|
|
new_uip->ui_uid = uid;
|
|
|
|
|
|
|
|
rw_wlock(&uihashtbl_lock);
|
|
|
|
/*
|
|
|
|
* There's a chance someone created our uidinfo while we
|
|
|
|
* were in malloc and not holding the lock, so we have to
|
|
|
|
* make sure we don't insert a duplicate uidinfo.
|
|
|
|
*/
|
|
|
|
if ((uip = uilookup(uid)) == NULL) {
|
|
|
|
LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
|
|
|
|
rw_wunlock(&uihashtbl_lock);
|
|
|
|
uip = new_uip;
|
|
|
|
} else {
|
|
|
|
rw_wunlock(&uihashtbl_lock);
|
|
|
|
racct_destroy(&new_uip->ui_racct);
|
|
|
|
free(new_uip, M_UIDINFO);
|
2001-03-09 18:40:34 +00:00
|
|
|
}
|
2000-09-05 22:11:13 +00:00
|
|
|
return (uip);
|
|
|
|
}
|
|
|
|
|
2000-11-30 19:15:22 +00:00
|
|
|
/*
|
2000-12-01 06:59:18 +00:00
|
|
|
* Place another refcount on a uidinfo struct.
|
2000-11-30 19:15:22 +00:00
|
|
|
*/
|
|
|
|
void
|
2014-10-27 20:18:30 +00:00
|
|
|
uihold(struct uidinfo *uip)
|
2000-11-30 19:15:22 +00:00
|
|
|
{
|
|
|
|
|
2008-03-16 21:29:02 +00:00
|
|
|
refcount_acquire(&uip->ui_ref);
|
2000-11-30 19:15:22 +00:00
|
|
|
}
|
|
|
|
|
2010-07-18 20:57:53 +00:00
|
|
|
/*-
|
2000-12-01 06:59:18 +00:00
|
|
|
* Since uidinfo structs have a long lifetime, we use an
|
2000-11-30 19:15:22 +00:00
|
|
|
* opportunistic refcounting scheme to avoid locking the lookup hash
|
|
|
|
* for each release.
|
|
|
|
*
|
2000-12-01 06:59:18 +00:00
|
|
|
* If the refcount hits 0, we need to free the structure,
|
2000-11-30 19:15:22 +00:00
|
|
|
* which means we need to lock the hash.
|
2000-12-01 06:59:18 +00:00
|
|
|
* Optimal case:
|
|
|
|
* After locking the struct and lowering the refcount, if we find
|
|
|
|
* that we don't need to free, simply unlock and return.
|
|
|
|
* Suboptimal case:
|
|
|
|
* If refcount lowering results in need to free, bump the count
|
2007-05-27 20:50:23 +00:00
|
|
|
* back up, lose the lock and acquire the locks in the proper
|
2000-12-01 06:59:18 +00:00
|
|
|
* order to try again.
|
2000-11-26 12:08:17 +00:00
|
|
|
*/
|
|
|
|
void
|
2014-10-27 20:18:30 +00:00
|
|
|
uifree(struct uidinfo *uip)
|
2000-09-05 22:11:13 +00:00
|
|
|
{
|
|
|
|
|
2018-12-07 16:11:45 +00:00
|
|
|
if (refcount_release_if_not_last(&uip->ui_ref))
|
2000-11-30 19:15:22 +00:00
|
|
|
return;
|
2000-12-01 06:59:18 +00:00
|
|
|
|
2008-03-16 21:29:02 +00:00
|
|
|
rw_wlock(&uihashtbl_lock);
|
2014-10-27 20:20:05 +00:00
|
|
|
if (refcount_release(&uip->ui_ref) == 0) {
|
2008-03-16 21:29:02 +00:00
|
|
|
rw_wunlock(&uihashtbl_lock);
|
2000-11-26 12:08:17 +00:00
|
|
|
return;
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
2014-10-27 20:20:05 +00:00
|
|
|
|
|
|
|
racct_destroy(&uip->ui_racct);
|
|
|
|
LIST_REMOVE(uip, ui_hash);
|
2008-03-16 21:29:02 +00:00
|
|
|
rw_wunlock(&uihashtbl_lock);
|
2014-10-27 20:20:05 +00:00
|
|
|
|
|
|
|
if (uip->ui_sbsize != 0)
|
|
|
|
printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
|
|
|
|
uip->ui_uid, uip->ui_sbsize);
|
|
|
|
if (uip->ui_proccnt != 0)
|
|
|
|
printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
|
|
|
|
uip->ui_uid, uip->ui_proccnt);
|
|
|
|
if (uip->ui_vmsize != 0)
|
|
|
|
printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
|
|
|
|
uip->ui_uid, (unsigned long long)uip->ui_vmsize);
|
|
|
|
free(uip, M_UIDINFO);
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
|
|
|
|
2014-11-23 08:25:44 +00:00
|
|
|
#ifdef RACCT
|
2011-03-29 17:47:25 +00:00
|
|
|
void
|
|
|
|
ui_racct_foreach(void (*callback)(struct racct *racct,
|
2015-11-15 12:10:51 +00:00
|
|
|
void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
|
|
|
|
void *arg2, void *arg3)
|
2011-03-29 17:47:25 +00:00
|
|
|
{
|
|
|
|
struct uidinfo *uip;
|
|
|
|
struct uihashhead *uih;
|
|
|
|
|
|
|
|
rw_rlock(&uihashtbl_lock);
|
2015-11-15 12:10:51 +00:00
|
|
|
if (pre != NULL)
|
|
|
|
(pre)();
|
2011-03-29 17:47:25 +00:00
|
|
|
for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
|
|
|
|
LIST_FOREACH(uip, uih, ui_hash) {
|
|
|
|
(callback)(uip->ui_racct, arg2, arg3);
|
|
|
|
}
|
|
|
|
}
|
2015-11-15 12:10:51 +00:00
|
|
|
if (post != NULL)
|
|
|
|
(post)();
|
2011-03-29 17:47:25 +00:00
|
|
|
rw_runlock(&uihashtbl_lock);
|
|
|
|
}
|
2014-11-23 08:25:44 +00:00
|
|
|
#endif
|
2011-03-29 17:47:25 +00:00
|
|
|
|
2015-06-25 00:15:37 +00:00
|
|
|
static inline int
|
|
|
|
chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
|
2000-09-05 22:11:13 +00:00
|
|
|
{
|
2018-01-04 22:07:58 +00:00
|
|
|
long new;
|
2000-11-26 12:08:17 +00:00
|
|
|
|
2004-02-05 20:53:25 +00:00
|
|
|
/* Don't allow them to exceed max, but allow subtraction. */
|
2018-01-04 22:07:58 +00:00
|
|
|
new = atomic_fetchadd_long(limit, (long)diff) + diff;
|
2008-03-16 21:29:02 +00:00
|
|
|
if (diff > 0 && max != 0) {
|
2018-01-04 22:07:58 +00:00
|
|
|
if (new < 0 || new > max) {
|
2015-06-25 00:15:37 +00:00
|
|
|
atomic_subtract_long(limit, (long)diff);
|
2008-03-16 21:29:02 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2018-01-04 22:07:58 +00:00
|
|
|
} else if (new < 0)
|
|
|
|
printf("negative %s for uid = %d\n", name, uip->ui_uid);
|
2000-09-05 22:11:13 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2015-06-25 00:15:37 +00:00
|
|
|
/*
|
|
|
|
* Change the count associated with number of processes
|
|
|
|
* a given user is using. When 'max' is 0, don't enforce a limit
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
|
|
|
|
}
|
|
|
|
|
2000-09-05 22:11:13 +00:00
|
|
|
/*
|
|
|
|
* Change the total socket buffer size a user has used.
|
|
|
|
*/
|
|
|
|
int
|
2014-10-27 20:18:30 +00:00
|
|
|
chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
|
2000-09-05 22:11:13 +00:00
|
|
|
{
|
2015-06-25 00:15:37 +00:00
|
|
|
int diff, rv;
|
2000-09-05 22:11:13 +00:00
|
|
|
|
2008-03-16 21:29:02 +00:00
|
|
|
diff = to - *hiwat;
|
2015-06-25 00:15:37 +00:00
|
|
|
if (diff > 0 && max == 0) {
|
|
|
|
rv = 0;
|
2008-03-16 21:29:02 +00:00
|
|
|
} else {
|
2015-06-25 00:15:37 +00:00
|
|
|
rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
|
|
|
|
if (rv != 0)
|
|
|
|
*hiwat = to;
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
2015-06-25 00:15:37 +00:00
|
|
|
return (rv);
|
2000-09-05 22:11:13 +00:00
|
|
|
}
|
Integrate the new MPSAFE TTY layer to the FreeBSD operating system.
The last half year I've been working on a replacement TTY layer for the
FreeBSD kernel. The new TTY layer was designed to improve the following:
- Improved driver model:
The old TTY layer has a driver model that is not abstract enough to
make it friendly to use. A good example is the output path, where the
device drivers directly access the output buffers. This means that an
in-kernel PPP implementation must always convert network buffers into
TTY buffers.
If a PPP implementation would be built on top of the new TTY layer
(still needs a hooks layer, though), it would allow the PPP
implementation to directly hand the data to the TTY driver.
- Improved hotplugging:
With the old TTY layer, it isn't entirely safe to destroy TTY's from
the system. This implementation has a two-step destructing design,
where the driver first abandons the TTY. After all threads have left
the TTY, the TTY layer calls a routine in the driver, which can be
used to free resources (unit numbers, etc).
The pts(4) driver also implements this feature, which means
posix_openpt() will now return PTY's that are created on the fly.
- Improved performance:
One of the major improvements is the per-TTY mutex, which is expected
to improve scalability when compared to the old Giant locking.
Another change is the unbuffered copying to userspace, which is both
used on TTY device nodes and PTY masters.
Upgrading should be quite straightforward. Unlike previous versions,
existing kernel configuration files do not need to be changed, except
when they reference device drivers that are listed in UPDATING.
Obtained from: //depot/projects/mpsafetty/...
Approved by: philip (ex-mentor)
Discussed: on the lists, at BSDCan, at the DevSummit
Sponsored by: Snow B.V., the Netherlands
dcons(4) fixed by: kan
2008-08-20 08:31:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Change the count associated with number of pseudo-terminals
|
|
|
|
* a given user is using. When 'max' is 0, don't enforce a limit
|
|
|
|
*/
|
|
|
|
int
|
2014-10-27 20:18:30 +00:00
|
|
|
chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
|
Integrate the new MPSAFE TTY layer to the FreeBSD operating system.
The last half year I've been working on a replacement TTY layer for the
FreeBSD kernel. The new TTY layer was designed to improve the following:
- Improved driver model:
The old TTY layer has a driver model that is not abstract enough to
make it friendly to use. A good example is the output path, where the
device drivers directly access the output buffers. This means that an
in-kernel PPP implementation must always convert network buffers into
TTY buffers.
If a PPP implementation would be built on top of the new TTY layer
(still needs a hooks layer, though), it would allow the PPP
implementation to directly hand the data to the TTY driver.
- Improved hotplugging:
With the old TTY layer, it isn't entirely safe to destroy TTY's from
the system. This implementation has a two-step destructing design,
where the driver first abandons the TTY. After all threads have left
the TTY, the TTY layer calls a routine in the driver, which can be
used to free resources (unit numbers, etc).
The pts(4) driver also implements this feature, which means
posix_openpt() will now return PTY's that are created on the fly.
- Improved performance:
One of the major improvements is the per-TTY mutex, which is expected
to improve scalability when compared to the old Giant locking.
Another change is the unbuffered copying to userspace, which is both
used on TTY device nodes and PTY masters.
Upgrading should be quite straightforward. Unlike previous versions,
existing kernel configuration files do not need to be changed, except
when they reference device drivers that are listed in UPDATING.
Obtained from: //depot/projects/mpsafetty/...
Approved by: philip (ex-mentor)
Discussed: on the lists, at BSDCan, at the DevSummit
Sponsored by: Snow B.V., the Netherlands
dcons(4) fixed by: kan
2008-08-20 08:31:58 +00:00
|
|
|
{
|
|
|
|
|
2015-06-25 00:15:37 +00:00
|
|
|
return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
|
Integrate the new MPSAFE TTY layer to the FreeBSD operating system.
The last half year I've been working on a replacement TTY layer for the
FreeBSD kernel. The new TTY layer was designed to improve the following:
- Improved driver model:
The old TTY layer has a driver model that is not abstract enough to
make it friendly to use. A good example is the output path, where the
device drivers directly access the output buffers. This means that an
in-kernel PPP implementation must always convert network buffers into
TTY buffers.
If a PPP implementation would be built on top of the new TTY layer
(still needs a hooks layer, though), it would allow the PPP
implementation to directly hand the data to the TTY driver.
- Improved hotplugging:
With the old TTY layer, it isn't entirely safe to destroy TTY's from
the system. This implementation has a two-step destructing design,
where the driver first abandons the TTY. After all threads have left
the TTY, the TTY layer calls a routine in the driver, which can be
used to free resources (unit numbers, etc).
The pts(4) driver also implements this feature, which means
posix_openpt() will now return PTY's that are created on the fly.
- Improved performance:
One of the major improvements is the per-TTY mutex, which is expected
to improve scalability when compared to the old Giant locking.
Another change is the unbuffered copying to userspace, which is both
used on TTY device nodes and PTY masters.
Upgrading should be quite straightforward. Unlike previous versions,
existing kernel configuration files do not need to be changed, except
when they reference device drivers that are listed in UPDATING.
Obtained from: //depot/projects/mpsafetty/...
Approved by: philip (ex-mentor)
Discussed: on the lists, at BSDCan, at the DevSummit
Sponsored by: Snow B.V., the Netherlands
dcons(4) fixed by: kan
2008-08-20 08:31:58 +00:00
|
|
|
}
|
2013-10-21 16:44:53 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
|
|
|
|
{
|
|
|
|
|
2015-06-25 00:15:37 +00:00
|
|
|
return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
|
2013-10-21 16:44:53 +00:00
|
|
|
}
|
2016-02-28 17:52:33 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
|
|
|
|
}
|