2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
1994-05-25 09:21:21 +00:00
|
|
|
* Copyright (c) 1993, David Greenman
|
|
|
|
* All rights reserved.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
1994-05-25 09:21:21 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
1994-05-24 10:09:53 +00:00
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1994-05-25 09:21:21 +00:00
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
1994-05-24 10:09:53 +00:00
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2011-06-30 10:56:02 +00:00
|
|
|
#include "opt_capsicum.h"
|
2005-06-24 00:16:57 +00:00
|
|
|
#include "opt_hwpmc_hooks.h"
|
2002-07-01 19:49:04 +00:00
|
|
|
#include "opt_ktrace.h"
|
2007-12-29 19:53:04 +00:00
|
|
|
#include "opt_vm.h"
|
2002-07-01 19:49:04 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/acct.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/capsicum.h>
|
|
|
|
#include <sys/eventhandler.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/exec.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/filedesc.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/imgact.h>
|
1996-03-10 08:42:54 +00:00
|
|
|
#include <sys/imgact_elf.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
2001-07-09 19:01:42 +00:00
|
|
|
#include <sys/malloc.h>
|
2017-01-05 01:44:12 +00:00
|
|
|
#include <sys/mman.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/namei.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1996-05-01 02:43:13 +00:00
|
|
|
#include <sys/proc.h>
|
2016-07-15 15:32:09 +00:00
|
|
|
#include <sys/ptrace.h>
|
2004-11-27 06:51:39 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2013-03-09 02:32:23 +00:00
|
|
|
#include <sys/rwlock.h>
|
2012-03-08 19:41:05 +00:00
|
|
|
#include <sys/sched.h>
|
2008-05-24 06:22:16 +00:00
|
|
|
#include <sys/sdt.h>
|
2004-04-23 03:01:40 +00:00
|
|
|
#include <sys/sf_buf.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/shm.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/signalvar.h>
|
2017-01-05 01:44:12 +00:00
|
|
|
#include <sys/smp.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/syscallsubr.h>
|
1996-02-24 14:32:53 +00:00
|
|
|
#include <sys/sysctl.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/sysent.h>
|
|
|
|
#include <sys/sysproto.h>
|
2020-11-21 21:43:36 +00:00
|
|
|
#include <sys/timers.h>
|
2020-11-21 10:32:40 +00:00
|
|
|
#include <sys/umtx.h>
|
1996-05-01 02:43:13 +00:00
|
|
|
#include <sys/vnode.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/wait.h>
|
2002-07-01 23:18:08 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/pmap.h>
|
1998-01-11 21:35:38 +00:00
|
|
|
#include <vm/vm_page.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_map.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <vm/vm_kern.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_extern.h>
|
1997-04-18 02:43:05 +00:00
|
|
|
#include <vm/vm_object.h>
|
1998-01-11 21:35:38 +00:00
|
|
|
#include <vm/vm_pager.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
#include <sys/pmckern.h>
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <machine/reg.h>
|
|
|
|
|
2006-09-01 11:45:40 +00:00
|
|
|
#include <security/audit/audit.h>
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
2006-09-01 11:45:40 +00:00
|
|
|
|
2008-05-24 06:22:16 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
#include <sys/dtrace_bsd.h>
|
|
|
|
dtrace_execexit_func_t dtrace_fasttrap_exec;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
SDT_PROVIDER_DECLARE(proc);
|
2015-12-16 23:39:27 +00:00
|
|
|
SDT_PROBE_DEFINE1(proc, , , exec, "char *");
|
|
|
|
SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
|
|
|
|
SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
|
2008-05-24 06:22:16 +00:00
|
|
|
|
1999-11-16 20:31:58 +00:00
|
|
|
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
|
|
|
|
|
2015-09-07 16:44:28 +00:00
|
|
|
int coredump_pack_fileinfo = 1;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
|
|
|
|
&coredump_pack_fileinfo, 0,
|
|
|
|
"Enable file path packing in 'procstat -f' coredump notes");
|
|
|
|
|
Fix core corruption caused by race in note_procstat_vmmap
This fix is spiritually similar to r287442 and was discovered thanks to
the KASSERT added in that revision.
NT_PROCSTAT_VMMAP output length, when packing kinfo structs, is tied to
the length of filenames corresponding to vnodes in the process' vm map
via vn_fullpath. As vnodes may move during coredump, this is racy.
We do not remove the race, only prevent it from causing coredump
corruption.
- Add a sysctl, kern.coredump_pack_vmmapinfo, to allow users to disable
kinfo packing for PROCSTAT_VMMAP notes. This avoids VMMAP corruption
and truncation, even if names change, at the cost of up to PATH_MAX
bytes per mapped object. The new sysctl is documented in core.5.
- Fix note_procstat_vmmap to self-limit in the second pass. This
addresses corruption, at the cost of sometimes producing a truncated
result.
- Fix PROCSTAT_VMMAP consumers libutil (and libprocstat, via copy-paste)
to grok the new zero padding.
Reported by: pho (https://people.freebsd.org/~pho/stress/log/datamove4-2.txt)
Relnotes: yes
Sponsored by: EMC / Isilon Storage Division
Differential Revision: https://reviews.freebsd.org/D3824
2015-10-06 18:07:00 +00:00
|
|
|
int coredump_pack_vmmapinfo = 1;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
|
|
|
|
&coredump_pack_vmmapinfo, 0,
|
|
|
|
"Enable file path packing in 'procstat -v' coredump notes");
|
|
|
|
|
2002-09-21 22:07:17 +00:00
|
|
|
static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
|
|
|
|
static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
|
2003-01-04 07:54:23 +00:00
|
|
|
static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
|
2005-01-29 23:12:00 +00:00
|
|
|
static int do_execve(struct thread *td, struct image_args *args,
|
2020-09-23 18:03:07 +00:00
|
|
|
struct mac *mac_p, struct vmspace *oldvmspace);
|
2002-09-21 22:07:17 +00:00
|
|
|
|
2000-07-05 07:46:41 +00:00
|
|
|
/* XXX This should be vm_size_t. */
|
2016-10-19 19:42:01 +00:00
|
|
|
SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
|
2020-03-02 15:30:52 +00:00
|
|
|
CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
|
|
|
|
"Location of process' ps_strings structure");
|
1998-12-27 18:03:29 +00:00
|
|
|
|
2000-07-05 07:46:41 +00:00
|
|
|
/* XXX This should be vm_size_t. */
|
2011-07-17 23:05:24 +00:00
|
|
|
SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
|
2020-03-02 15:30:52 +00:00
|
|
|
CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
|
|
|
|
"Top of process stack");
|
1996-02-24 14:32:53 +00:00
|
|
|
|
2016-10-19 19:42:01 +00:00
|
|
|
SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
|
2020-03-02 15:30:52 +00:00
|
|
|
NULL, 0, sysctl_kern_stackprot, "I",
|
|
|
|
"Stack memory permissions");
|
2003-01-04 07:54:23 +00:00
|
|
|
|
1999-11-16 20:31:58 +00:00
|
|
|
u_long ps_arg_cache_limit = PAGE_SIZE / 16;
|
2001-11-08 00:24:48 +00:00
|
|
|
SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
|
2020-03-02 15:30:52 +00:00
|
|
|
&ps_arg_cache_limit, 0,
|
|
|
|
"Process' command line characters cache limit");
|
1999-11-16 20:31:58 +00:00
|
|
|
|
2013-10-15 06:38:40 +00:00
|
|
|
static int disallow_high_osrel;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
|
|
|
|
&disallow_high_osrel, 0,
|
|
|
|
"Disallow execution of binaries built for higher version of the world");
|
|
|
|
|
2009-10-02 17:48:51 +00:00
|
|
|
static int map_at_zero = 0;
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
|
2009-10-02 17:48:51 +00:00
|
|
|
"Permit processes to map an object at virtual address 0.");
|
|
|
|
|
2002-09-21 22:07:17 +00:00
|
|
|
static int
|
|
|
|
sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
2004-02-18 00:54:17 +00:00
|
|
|
int error;
|
2002-09-21 22:07:17 +00:00
|
|
|
|
|
|
|
p = curproc;
|
2004-10-11 22:04:16 +00:00
|
|
|
#ifdef SCTL_MASK32
|
|
|
|
if (req->flags & SCTL_MASK32) {
|
2004-02-18 00:54:17 +00:00
|
|
|
unsigned int val;
|
|
|
|
val = (unsigned int)p->p_sysent->sv_psstrings;
|
|
|
|
error = SYSCTL_OUT(req, &val, sizeof(val));
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
|
|
|
|
sizeof(p->p_sysent->sv_psstrings));
|
|
|
|
return error;
|
2002-09-21 22:07:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
2004-02-18 00:54:17 +00:00
|
|
|
int error;
|
2002-09-21 22:07:17 +00:00
|
|
|
|
|
|
|
p = curproc;
|
2004-10-11 22:04:16 +00:00
|
|
|
#ifdef SCTL_MASK32
|
|
|
|
if (req->flags & SCTL_MASK32) {
|
2004-02-18 00:54:17 +00:00
|
|
|
unsigned int val;
|
|
|
|
val = (unsigned int)p->p_sysent->sv_usrstack;
|
|
|
|
error = SYSCTL_OUT(req, &val, sizeof(val));
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
|
|
|
|
sizeof(p->p_sysent->sv_usrstack));
|
|
|
|
return error;
|
2002-09-21 22:07:17 +00:00
|
|
|
}
|
|
|
|
|
2003-01-04 07:54:23 +00:00
|
|
|
static int
|
|
|
|
sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = curproc;
|
|
|
|
return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
|
|
|
|
sizeof(p->p_sysent->sv_stackprot)));
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1998-10-16 03:55:01 +00:00
|
|
|
* Each of the items is a pointer to a `const struct execsw', hence the
|
|
|
|
* double pointer here.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
1998-10-16 03:55:01 +00:00
|
|
|
static const struct execsw **execsw;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2003-12-28 04:18:13 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct execve_args {
|
2004-07-24 04:57:41 +00:00
|
|
|
char *fname;
|
|
|
|
char **argv;
|
|
|
|
char **envv;
|
2003-12-28 04:18:13 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
2015-05-10 09:00:40 +00:00
|
|
|
sys_execve(struct thread *td, struct execve_args *uap)
|
2003-12-28 04:18:13 +00:00
|
|
|
{
|
2005-01-29 23:12:00 +00:00
|
|
|
struct image_args args;
|
2015-05-10 09:00:40 +00:00
|
|
|
struct vmspace *oldvmspace;
|
|
|
|
int error;
|
2005-01-29 23:12:00 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
error = pre_execve(td, &oldvmspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2005-01-29 23:12:00 +00:00
|
|
|
error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
|
|
|
|
uap->argv, uap->envv);
|
|
|
|
if (error == 0)
|
2020-09-23 18:03:07 +00:00
|
|
|
error = kern_execve(td, &args, NULL, oldvmspace);
|
2015-05-10 09:00:40 +00:00
|
|
|
post_execve(td, error, oldvmspace);
|
2020-10-24 14:39:17 +00:00
|
|
|
AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
|
2005-01-29 23:12:00 +00:00
|
|
|
return (error);
|
2003-12-28 04:18:13 +00:00
|
|
|
}
|
|
|
|
|
2008-03-31 12:05:52 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fexecve_args {
|
|
|
|
int fd;
|
|
|
|
char **argv;
|
|
|
|
char **envv;
|
2020-05-02 18:54:25 +00:00
|
|
|
};
|
2008-03-31 12:05:52 +00:00
|
|
|
#endif
|
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_fexecve(struct thread *td, struct fexecve_args *uap)
|
2008-03-31 12:05:52 +00:00
|
|
|
{
|
|
|
|
struct image_args args;
|
2015-05-10 09:00:40 +00:00
|
|
|
struct vmspace *oldvmspace;
|
|
|
|
int error;
|
2008-03-31 12:05:52 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
error = pre_execve(td, &oldvmspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2008-03-31 12:05:52 +00:00
|
|
|
error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
|
|
|
|
uap->argv, uap->envv);
|
|
|
|
if (error == 0) {
|
|
|
|
args.fd = uap->fd;
|
2020-09-23 18:03:07 +00:00
|
|
|
error = kern_execve(td, &args, NULL, oldvmspace);
|
2008-03-31 12:05:52 +00:00
|
|
|
}
|
2015-05-10 09:00:40 +00:00
|
|
|
post_execve(td, error, oldvmspace);
|
2020-10-24 14:39:17 +00:00
|
|
|
AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
|
2008-03-31 12:05:52 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2003-12-28 04:18:13 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct __mac_execve_args {
|
|
|
|
char *fname;
|
|
|
|
char **argv;
|
|
|
|
char **envv;
|
|
|
|
struct mac *mac_p;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
2015-05-10 09:00:40 +00:00
|
|
|
sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
|
2003-12-28 04:18:13 +00:00
|
|
|
{
|
|
|
|
#ifdef MAC
|
2005-01-29 23:12:00 +00:00
|
|
|
struct image_args args;
|
2015-05-10 09:00:40 +00:00
|
|
|
struct vmspace *oldvmspace;
|
|
|
|
int error;
|
2005-01-29 23:12:00 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
error = pre_execve(td, &oldvmspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2005-01-29 23:12:00 +00:00
|
|
|
error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
|
|
|
|
uap->argv, uap->envv);
|
|
|
|
if (error == 0)
|
2020-09-23 18:03:07 +00:00
|
|
|
error = kern_execve(td, &args, uap->mac_p, oldvmspace);
|
2015-05-10 09:00:40 +00:00
|
|
|
post_execve(td, error, oldvmspace);
|
2020-10-24 14:39:17 +00:00
|
|
|
AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
|
2005-01-29 23:12:00 +00:00
|
|
|
return (error);
|
2003-12-28 04:18:13 +00:00
|
|
|
#else
|
|
|
|
return (ENOSYS);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-10-07 13:50:10 +00:00
|
|
|
int
|
2015-05-10 09:00:40 +00:00
|
|
|
pre_execve(struct thread *td, struct vmspace **oldvmspace)
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
{
|
2015-05-10 09:00:40 +00:00
|
|
|
struct proc *p;
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
int error;
|
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(td == curthread, ("non-current thread %p", td));
|
|
|
|
error = 0;
|
|
|
|
p = td->td_proc;
|
|
|
|
if ((p->p_flag & P_HADTHREADS) != 0) {
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_LOCK(p);
|
2015-05-10 09:00:40 +00:00
|
|
|
if (thread_single(p, SINGLE_BOUNDARY) != 0)
|
|
|
|
error = ERESTART;
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
|
|
|
|
("nested execve"));
|
|
|
|
*oldvmspace = p->p_vmspace;
|
|
|
|
return (error);
|
|
|
|
}
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
void
|
|
|
|
post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(td == curthread, ("non-current thread %p", td));
|
|
|
|
p = td->td_proc;
|
|
|
|
if ((p->p_flag & P_HADTHREADS) != 0) {
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
/*
|
|
|
|
* If success, we upgrade to SINGLE_EXIT state to
|
|
|
|
* force other threads to suicide.
|
|
|
|
*/
|
2017-11-24 07:35:08 +00:00
|
|
|
if (error == EJUSTRETURN)
|
2014-12-13 16:18:29 +00:00
|
|
|
thread_single(p, SINGLE_EXIT);
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
else
|
2014-12-13 16:18:29 +00:00
|
|
|
thread_single_end(p, SINGLE_BOUNDARY);
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2020-09-23 18:03:07 +00:00
|
|
|
exec_cleanup(td, oldvmspace);
|
2015-05-10 09:00:40 +00:00
|
|
|
}
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
/*
|
2020-09-23 18:03:07 +00:00
|
|
|
* kern_execve() has the astonishing property of not always returning to
|
2015-05-10 09:00:40 +00:00
|
|
|
* the caller. If sufficiently bad things happen during the call to
|
|
|
|
* do_execve(), it can end up calling exit1(); as a result, callers must
|
|
|
|
* avoid doing anything which they might need to undo (e.g., allocating
|
|
|
|
* memory).
|
|
|
|
*/
|
|
|
|
int
|
2020-09-23 18:03:07 +00:00
|
|
|
kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
|
|
|
|
struct vmspace *oldvmspace)
|
2015-05-10 09:00:40 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
AUDIT_ARG_ARGV(args->begin_argv, args->argc,
|
2018-11-29 21:00:56 +00:00
|
|
|
exec_args_get_begin_envv(args) - args->begin_argv);
|
|
|
|
AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
|
|
|
|
args->endp - exec_args_get_begin_envv(args));
|
2020-09-23 18:03:07 +00:00
|
|
|
return (do_execve(td, args, mac_p, oldvmspace));
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
Remove reference to struct execve_args from struct imgact, which
describes an image activation instance. Instead, make use of the
existing fname structure entry, and introduce two new entries,
userspace_argv, and userspace_envv. With the addition of
mac_execve(), this divorces the image structure from the specifics
of the execve() system call, removes a redundant pointer, etc.
No semantic change from current behavior, but it means that the
structure doesn't depend on syscalls.master-generated includes.
There seems to be some redundant initialization of imgact entries,
which I have maintained, but which could probably use some cleaning
up at some point.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2002-11-05 01:59:56 +00:00
|
|
|
* In-kernel implementation of execve(). All arguments are assumed to be
|
|
|
|
* userspace pointers from the passed thread.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
Remove reference to struct execve_args from struct imgact, which
describes an image activation instance. Instead, make use of the
existing fname structure entry, and introduce two new entries,
userspace_argv, and userspace_envv. With the addition of
mac_execve(), this divorces the image structure from the specifics
of the execve() system call, removes a redundant pointer, etc.
No semantic change from current behavior, but it means that the
structure doesn't depend on syscalls.master-generated includes.
There seems to be some redundant initialization of imgact entries,
which I have maintained, but which could probably use some cleaning
up at some point.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2002-11-05 01:59:56 +00:00
|
|
|
static int
|
2020-09-23 18:03:07 +00:00
|
|
|
do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
|
|
|
|
struct vmspace *oldvmspace)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2009-02-26 16:32:48 +00:00
|
|
|
struct nameidata nd;
|
2016-05-26 23:18:54 +00:00
|
|
|
struct ucred *oldcred;
|
2014-07-01 09:21:32 +00:00
|
|
|
struct uidinfo *euip = NULL;
|
2019-12-03 23:17:54 +00:00
|
|
|
uintptr_t stack_base;
|
1995-11-06 12:52:37 +00:00
|
|
|
struct image_params image_params, *imgp;
|
2005-10-12 06:56:00 +00:00
|
|
|
struct vattr attr;
|
2002-03-19 21:25:46 +00:00
|
|
|
int (*img_first)(struct image_params *);
|
2002-06-20 17:27:28 +00:00
|
|
|
struct pargs *oldargs = NULL, *newargs = NULL;
|
2016-05-27 15:03:38 +00:00
|
|
|
struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
|
2002-06-07 05:41:27 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct vnode *tracevp = NULL;
|
2003-03-13 18:24:22 +00:00
|
|
|
struct ucred *tracecred = NULL;
|
2002-06-07 05:41:27 +00:00
|
|
|
#endif
|
2015-07-14 01:13:37 +00:00
|
|
|
struct vnode *oldtextvp = NULL, *newtextvp;
|
2002-07-27 18:06:49 +00:00
|
|
|
int credential_changing;
|
2002-11-05 14:57:49 +00:00
|
|
|
#ifdef MAC
|
Introduce two related changes to the TrustedBSD MAC Framework:
(1) Abstract interpreter vnode labeling in execve(2) and mac_execve(2)
so that the general exec code isn't aware of the details of
allocating, copying, and freeing labels, rather, simply passes in
a void pointer to start and stop functions that will be used by
the framework. This change will be MFC'd.
(2) Introduce a new flags field to the MAC_POLICY_SET(9) interface
allowing policies to declare which types of objects require label
allocation, initialization, and destruction, and define a set of
flags covering various supported object types (MPC_OBJECT_PROC,
MPC_OBJECT_VNODE, MPC_OBJECT_INPCB, ...). This change reduces the
overhead of compiling the MAC Framework into the kernel if policies
aren't loaded, or if policies require labels on only a small number
or even no object types. Each time a policy is loaded or unloaded,
we recalculate a mask of labeled object types across all policies
present in the system. Eliminate MAC_ALWAYS_LABEL_MBUF option as it
is no longer required.
MFC after: 1 week ((1) only)
Reviewed by: csjp
Obtained from: TrustedBSD Project
Sponsored by: Apple, Inc.
2008-08-23 15:26:36 +00:00
|
|
|
struct label *interpvplabel = NULL;
|
Modify the MAC Framework so that instead of embedding a (struct label)
in various kernel objects to represent security data, we embed a
(struct label *) pointer, which now references labels allocated using
a UMA zone (mac_label.c). This allows the size and shape of struct
label to be varied without changing the size and shape of these kernel
objects, which become part of the frozen ABI with 5-STABLE. This opens
the door for boot-time selection of the number of label slots, and hence
changes to the bound on the number of simultaneous labeled policies
at boot-time instead of compile-time. This also makes it easier to
embed label references in new objects as required for locking/caching
with fine-grained network stack locking, such as inpcb structures.
This change also moves us further in the direction of hiding the
structure of kernel objects from MAC policy modules, not to mention
dramatically reducing the number of '&' symbols appearing in both the
MAC Framework and MAC policy modules, and improving readability.
While this results in minimal performance change with MAC enabled, it
will observably shrink the size of a number of critical kernel data
structures for the !MAC case, and should have a small (but measurable)
performance benefit (i.e., struct vnode, struct socket) do to memory
conservation and reduced cost of zeroing memory.
NOTE: Users of MAC must recompile their kernel and all MAC modules as a
result of this change. Because this is an API change, third party
MAC modules will also need to be updated to make less use of the '&'
symbol.
Suggestions from: bmilekic
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
|
|
|
int will_transition;
|
2002-11-05 14:57:49 +00:00
|
|
|
#endif
|
2005-06-30 19:01:26 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
struct pmckern_procexec pe;
|
|
|
|
#endif
|
2019-11-17 14:52:45 +00:00
|
|
|
int error, i, orig_osrel;
|
|
|
|
uint32_t orig_fctl0;
|
2008-03-31 12:05:52 +00:00
|
|
|
static const char fexecv_proc_title[] = "(fexecv)";
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-11-06 12:52:37 +00:00
|
|
|
imgp = &image_params;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2001-10-27 11:11:25 +00:00
|
|
|
/*
|
|
|
|
* Lock the process and set the P_INEXEC flag to indicate that
|
|
|
|
* it should be left alone until we're done here. This is
|
|
|
|
* necessary to avoid race conditions - e.g. in ptrace() -
|
|
|
|
* that might allow a local user to illicitly obtain elevated
|
|
|
|
* privileges.
|
|
|
|
*/
|
|
|
|
PROC_LOCK(p);
|
|
|
|
KASSERT((p->p_flag & P_INEXEC) == 0,
|
2001-12-10 05:40:12 +00:00
|
|
|
("%s(): process already has P_INEXEC flag", __func__));
|
2001-10-27 11:11:25 +00:00
|
|
|
p->p_flag |= P_INEXEC;
|
|
|
|
PROC_UNLOCK(p);
|
2001-09-12 08:38:13 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1995-11-06 12:52:37 +00:00
|
|
|
* Initialize part of the common data
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2014-09-29 23:59:19 +00:00
|
|
|
bzero(imgp, sizeof(*imgp));
|
1995-11-06 12:52:37 +00:00
|
|
|
imgp->proc = p;
|
|
|
|
imgp->attr = &attr;
|
2005-01-29 23:12:00 +00:00
|
|
|
imgp->args = args;
|
2016-05-26 23:18:54 +00:00
|
|
|
oldcred = p->p_ucred;
|
2019-11-17 14:52:45 +00:00
|
|
|
orig_osrel = p->p_osrel;
|
|
|
|
orig_fctl0 = p->p_fctl0;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2002-11-05 17:51:56 +00:00
|
|
|
#ifdef MAC
|
Modify the MAC Framework so that instead of embedding a (struct label)
in various kernel objects to represent security data, we embed a
(struct label *) pointer, which now references labels allocated using
a UMA zone (mac_label.c). This allows the size and shape of struct
label to be varied without changing the size and shape of these kernel
objects, which become part of the frozen ABI with 5-STABLE. This opens
the door for boot-time selection of the number of label slots, and hence
changes to the bound on the number of simultaneous labeled policies
at boot-time instead of compile-time. This also makes it easier to
embed label references in new objects as required for locking/caching
with fine-grained network stack locking, such as inpcb structures.
This change also moves us further in the direction of hiding the
structure of kernel objects from MAC policy modules, not to mention
dramatically reducing the number of '&' symbols appearing in both the
MAC Framework and MAC policy modules, and improving readability.
While this results in minimal performance change with MAC enabled, it
will observably shrink the size of a number of critical kernel data
structures for the !MAC case, and should have a small (but measurable)
performance benefit (i.e., struct vnode, struct socket) do to memory
conservation and reduced cost of zeroing memory.
NOTE: Users of MAC must recompile their kernel and all MAC modules as a
result of this change. Because this is an API change, third party
MAC modules will also need to be updated to make less use of the '&'
symbol.
Suggestions from: bmilekic
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
|
|
|
error = mac_execve_enter(imgp, mac_p);
|
2005-05-03 16:24:59 +00:00
|
|
|
if (error)
|
2002-11-05 17:51:56 +00:00
|
|
|
goto exec_fail;
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* Translate the file name. namei() returns a vnode pointer
|
2016-04-29 22:15:33 +00:00
|
|
|
* in ni_vp among other things.
|
2006-09-01 11:45:40 +00:00
|
|
|
*
|
|
|
|
* XXXAUDIT: It would be desirable to also audit the name of the
|
|
|
|
* interpreter if this is an interpreted binary.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname != NULL) {
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
|
|
|
|
SAVENAME | AUDITVNODE1, UIO_SYSSPACE, args->fname, td);
|
2008-03-31 12:05:52 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2015-12-16 23:39:27 +00:00
|
|
|
SDT_PROBE1(proc, , , exec, args->fname);
|
2008-11-05 19:40:36 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
interpret:
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname != NULL) {
|
2011-06-30 10:56:02 +00:00
|
|
|
#ifdef CAPABILITY_MODE
|
|
|
|
/*
|
|
|
|
* While capability mode can't reach this point via direct
|
|
|
|
* path arguments to execve(), we also don't allow
|
|
|
|
* interpreters to be used in capability mode (for now).
|
|
|
|
* Catch indirect lookups and return a permissions error.
|
|
|
|
*/
|
|
|
|
if (IN_CAPABILITY_MODE(td)) {
|
|
|
|
error = ECAPMODE;
|
|
|
|
goto exec_fail;
|
|
|
|
}
|
|
|
|
#endif
|
2009-02-26 16:32:48 +00:00
|
|
|
error = namei(&nd);
|
2008-03-31 12:05:52 +00:00
|
|
|
if (error)
|
|
|
|
goto exec_fail;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2015-07-14 01:13:37 +00:00
|
|
|
newtextvp = nd.ni_vp;
|
|
|
|
imgp->vp = newtextvp;
|
2008-03-31 12:05:52 +00:00
|
|
|
} else {
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_FD(args->fd);
|
2011-08-11 12:30:23 +00:00
|
|
|
/*
|
2012-09-27 16:43:23 +00:00
|
|
|
* Descriptors opened only with O_EXEC or O_RDONLY are allowed.
|
2011-08-11 12:30:23 +00:00
|
|
|
*/
|
2018-05-09 18:47:24 +00:00
|
|
|
error = fgetvp_exec(td, args->fd, &cap_fexecve_rights, &newtextvp);
|
2008-03-31 12:05:52 +00:00
|
|
|
if (error)
|
|
|
|
goto exec_fail;
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
vn_lock(newtextvp, LK_SHARED | LK_RETRY);
|
2015-07-14 01:13:37 +00:00
|
|
|
AUDIT_ARG_VNODE1(newtextvp);
|
|
|
|
imgp->vp = newtextvp;
|
2008-03-31 12:05:52 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-03-19 23:08:12 +00:00
|
|
|
/*
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
* Check file permissions. Also 'opens' file and sets its vnode to
|
|
|
|
* text mode.
|
1995-03-19 23:08:12 +00:00
|
|
|
*/
|
1995-11-06 12:52:37 +00:00
|
|
|
error = exec_check_permissions(imgp);
|
2002-08-13 06:55:28 +00:00
|
|
|
if (error)
|
1994-05-25 09:21:21 +00:00
|
|
|
goto exec_fail_dealloc;
|
2002-08-13 06:55:28 +00:00
|
|
|
|
2005-01-25 00:40:01 +00:00
|
|
|
imgp->object = imgp->vp->v_object;
|
|
|
|
if (imgp->object != NULL)
|
2002-08-13 06:55:28 +00:00
|
|
|
vm_object_reference(imgp->object);
|
|
|
|
|
1998-01-11 21:35:38 +00:00
|
|
|
error = exec_map_first_page(imgp);
|
1997-04-04 04:17:11 +00:00
|
|
|
if (error)
|
1994-05-25 09:21:21 +00:00
|
|
|
goto exec_fail_dealloc;
|
|
|
|
|
2007-12-04 12:28:07 +00:00
|
|
|
imgp->proc->p_osrel = 0;
|
2018-11-23 23:07:57 +00:00
|
|
|
imgp->proc->p_fctl0 = 0;
|
2016-05-26 23:18:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Implement image setuid/setgid.
|
|
|
|
*
|
|
|
|
* Determine new credentials before attempting image activators
|
|
|
|
* so that it can be used by process_exec handlers to determine
|
|
|
|
* credential/setid changes.
|
|
|
|
*
|
|
|
|
* Don't honor setuid/setgid if the filesystem prohibits it or if
|
|
|
|
* the process is being traced.
|
|
|
|
*
|
|
|
|
* We disable setuid/setgid/etc in capability mode on the basis
|
|
|
|
* that most setugid applications are not written with that
|
|
|
|
* environment in mind, and will therefore almost certainly operate
|
|
|
|
* incorrectly. In principle there's no reason that setugid
|
|
|
|
* applications might not be useful in capability mode, so we may want
|
|
|
|
* to reconsider this conservative design choice in the future.
|
|
|
|
*
|
|
|
|
* XXXMAC: For the time being, use NOSUID to also prohibit
|
|
|
|
* transitions on the file system.
|
|
|
|
*/
|
|
|
|
credential_changing = 0;
|
|
|
|
credential_changing |= (attr.va_mode & S_ISUID) &&
|
|
|
|
oldcred->cr_uid != attr.va_uid;
|
|
|
|
credential_changing |= (attr.va_mode & S_ISGID) &&
|
|
|
|
oldcred->cr_gid != attr.va_gid;
|
|
|
|
#ifdef MAC
|
|
|
|
will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
|
|
|
|
interpvplabel, imgp);
|
|
|
|
credential_changing |= will_transition;
|
|
|
|
#endif
|
|
|
|
|
2018-04-20 15:19:27 +00:00
|
|
|
/* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
|
2018-04-18 21:31:13 +00:00
|
|
|
if (credential_changing)
|
|
|
|
imgp->proc->p_pdeathsig = 0;
|
|
|
|
|
2016-05-26 23:18:54 +00:00
|
|
|
if (credential_changing &&
|
|
|
|
#ifdef CAPABILITY_MODE
|
|
|
|
((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
|
|
|
|
#endif
|
|
|
|
(imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
|
|
|
|
(p->p_flag & P_TRACED) == 0) {
|
|
|
|
imgp->credential_setid = true;
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
2016-05-26 23:18:54 +00:00
|
|
|
imgp->newcred = crdup(oldcred);
|
|
|
|
if (attr.va_mode & S_ISUID) {
|
|
|
|
euip = uifind(attr.va_uid);
|
|
|
|
change_euid(imgp->newcred, euip);
|
|
|
|
}
|
2019-09-07 16:10:57 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2016-05-26 23:18:54 +00:00
|
|
|
if (attr.va_mode & S_ISGID)
|
|
|
|
change_egid(imgp->newcred, attr.va_gid);
|
|
|
|
/*
|
|
|
|
* Implement correct POSIX saved-id behavior.
|
|
|
|
*
|
|
|
|
* XXXMAC: Note that the current logic will save the
|
|
|
|
* uid and gid if a MAC domain transition occurs, even
|
|
|
|
* though maybe it shouldn't.
|
|
|
|
*/
|
|
|
|
change_svuid(imgp->newcred, imgp->newcred->cr_uid);
|
|
|
|
change_svgid(imgp->newcred, imgp->newcred->cr_gid);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Implement correct POSIX saved-id behavior.
|
|
|
|
*
|
|
|
|
* XXX: It's not clear that the existing behavior is
|
|
|
|
* POSIX-compliant. A number of sources indicate that the
|
|
|
|
* saved uid/gid should only be updated if the new ruid is
|
|
|
|
* not equal to the old ruid, or the new euid is not equal
|
|
|
|
* to the old euid and the new euid is not equal to the old
|
|
|
|
* ruid. The FreeBSD code always updates the saved uid/gid.
|
|
|
|
* Also, this code uses the new (replaced) euid and egid as
|
|
|
|
* the source, which may or may not be the right ones to use.
|
|
|
|
*/
|
|
|
|
if (oldcred->cr_svuid != oldcred->cr_uid ||
|
|
|
|
oldcred->cr_svgid != oldcred->cr_gid) {
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
2016-05-26 23:18:54 +00:00
|
|
|
imgp->newcred = crdup(oldcred);
|
2019-09-07 16:10:57 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2016-05-26 23:18:54 +00:00
|
|
|
change_svuid(imgp->newcred, imgp->newcred->cr_uid);
|
|
|
|
change_svgid(imgp->newcred, imgp->newcred->cr_gid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* The new credentials are installed into the process later. */
|
|
|
|
|
2016-05-26 23:19:39 +00:00
|
|
|
/*
|
|
|
|
* Do the best to calculate the full path to the image file.
|
|
|
|
*/
|
|
|
|
if (args->fname != NULL && args->fname[0] == '/')
|
|
|
|
imgp->execpath = args->fname;
|
|
|
|
else {
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
2020-08-24 08:57:02 +00:00
|
|
|
if (vn_fullpath(imgp->vp, &imgp->execpath, &imgp->freepath) != 0)
|
2016-05-26 23:19:39 +00:00
|
|
|
imgp->execpath = args->fname;
|
2019-09-07 16:10:57 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2016-05-26 23:19:39 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2000-04-26 20:58:40 +00:00
|
|
|
* If the current process has a special image activator it
|
2005-01-29 23:12:00 +00:00
|
|
|
* wants to try first, call it. For example, emulating shell
|
2000-04-26 20:58:40 +00:00
|
|
|
* scripts differently.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2000-04-26 20:58:40 +00:00
|
|
|
error = -1;
|
|
|
|
if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
|
|
|
|
error = img_first(imgp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop through the list of image activators, calling each one.
|
|
|
|
* An activator returns -1 if there is no match, 0 on success,
|
|
|
|
* and an error otherwise.
|
|
|
|
*/
|
|
|
|
for (i = 0; error == -1 && execsw[i]; ++i) {
|
|
|
|
if (execsw[i]->ex_imgact == NULL ||
|
|
|
|
execsw[i]->ex_imgact == img_first) {
|
1994-05-25 09:21:21 +00:00
|
|
|
continue;
|
|
|
|
}
|
2000-04-26 20:58:40 +00:00
|
|
|
error = (*execsw[i]->ex_imgact)(imgp);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
2000-04-26 20:58:40 +00:00
|
|
|
|
|
|
|
if (error) {
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
if (error == -1)
|
2000-04-26 20:58:40 +00:00
|
|
|
error = ENOEXEC;
|
1994-05-25 09:21:21 +00:00
|
|
|
goto exec_fail_dealloc;
|
|
|
|
}
|
|
|
|
|
2000-04-26 20:58:40 +00:00
|
|
|
/*
|
|
|
|
* Special interpreter operation, cleanup and loop up to try to
|
|
|
|
* activate the interpreter.
|
|
|
|
*/
|
|
|
|
if (imgp->interpreted) {
|
|
|
|
exec_unmap_first_page(imgp);
|
2002-08-13 06:55:28 +00:00
|
|
|
/*
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
* The text reference needs to be removed for scripts.
|
|
|
|
* There is a short period before we determine that
|
|
|
|
* something is a script where text reference is active.
|
|
|
|
* The vnode lock is held over this entire period
|
|
|
|
* so nothing should illegitimately be blocked.
|
2002-08-13 06:55:28 +00:00
|
|
|
*/
|
2019-09-07 16:05:17 +00:00
|
|
|
MPASS(imgp->textset);
|
|
|
|
VOP_UNSET_TEXT_CHECKED(newtextvp);
|
|
|
|
imgp->textset = false;
|
2000-04-26 20:58:40 +00:00
|
|
|
/* free name buffer and old vnode */
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname != NULL)
|
2009-02-26 16:32:48 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2002-11-05 17:51:56 +00:00
|
|
|
#ifdef MAC
|
2015-07-14 01:13:37 +00:00
|
|
|
mac_execve_interpreter_enter(newtextvp, &interpvplabel);
|
2002-11-05 17:51:56 +00:00
|
|
|
#endif
|
2008-07-17 16:44:07 +00:00
|
|
|
if (imgp->opened) {
|
2015-07-14 01:13:37 +00:00
|
|
|
VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
|
2008-07-17 16:44:07 +00:00
|
|
|
imgp->opened = 0;
|
|
|
|
}
|
2015-07-14 01:13:37 +00:00
|
|
|
vput(newtextvp);
|
2002-07-06 07:00:01 +00:00
|
|
|
vm_object_deallocate(imgp->object);
|
|
|
|
imgp->object = NULL;
|
2016-05-26 23:18:54 +00:00
|
|
|
imgp->credential_setid = false;
|
|
|
|
if (imgp->newcred != NULL) {
|
|
|
|
crfree(imgp->newcred);
|
|
|
|
imgp->newcred = NULL;
|
|
|
|
}
|
2016-05-26 23:19:39 +00:00
|
|
|
imgp->execpath = NULL;
|
|
|
|
free(imgp->freepath, M_TEMP);
|
|
|
|
imgp->freepath = NULL;
|
2000-04-26 20:58:40 +00:00
|
|
|
/* set new name to that of the interpreter */
|
2020-03-04 19:52:34 +00:00
|
|
|
NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
|
|
|
|
SAVENAME, UIO_SYSSPACE, imgp->interpreter_name, td);
|
2008-03-31 12:05:52 +00:00
|
|
|
args->fname = imgp->interpreter_name;
|
2000-04-26 20:58:40 +00:00
|
|
|
goto interpret;
|
|
|
|
}
|
|
|
|
|
2008-08-12 21:27:48 +00:00
|
|
|
/*
|
|
|
|
* NB: We unlock the vnode here because it is believed that none
|
|
|
|
* of the sv_copyout_strings/sv_fixup operations require the vnode.
|
|
|
|
*/
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
2009-03-17 12:53:28 +00:00
|
|
|
|
2013-10-15 06:38:40 +00:00
|
|
|
if (disallow_high_osrel &&
|
|
|
|
P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
|
|
|
|
error = ENOEXEC;
|
|
|
|
uprintf("Osrel %d for image %s too high\n", p->p_osrel,
|
|
|
|
imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
|
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
|
|
|
goto exec_fail_dealloc;
|
|
|
|
}
|
|
|
|
|
2015-08-03 13:41:47 +00:00
|
|
|
/* ABI enforces the use of Capsicum. Switch into capabilities mode. */
|
|
|
|
if (SV_PROC_FLAG(p, SV_CAPSICUM))
|
|
|
|
sys_cap_enter(td, NULL);
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2019-04-12 14:18:16 +00:00
|
|
|
* Copy out strings (args and env) and initialize stack base.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2019-11-18 20:07:43 +00:00
|
|
|
error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
|
|
|
|
if (error != 0) {
|
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
|
|
|
goto exec_fail_dealloc;
|
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
/*
|
2019-04-12 14:18:16 +00:00
|
|
|
* Stack setup.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2019-04-12 14:18:16 +00:00
|
|
|
error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
|
2019-02-08 04:06:48 +00:00
|
|
|
if (error != 0) {
|
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2018-05-24 16:25:18 +00:00
|
|
|
goto exec_fail_dealloc;
|
2019-02-08 04:06:48 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
if (args->fdp != NULL) {
|
|
|
|
/* Install a brand new file descriptor table. */
|
|
|
|
fdinstall_remapped(td, args->fdp);
|
|
|
|
args->fdp = NULL;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Keep on using the existing file descriptor table. For
|
|
|
|
* security and other reasons, the file descriptor table
|
|
|
|
* cannot be shared after an exec.
|
|
|
|
*/
|
|
|
|
fdunshare(td);
|
2020-11-17 21:14:13 +00:00
|
|
|
pdunshare(td);
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
/* close files on exec */
|
|
|
|
fdcloseexec(td);
|
|
|
|
}
|
1997-08-04 05:39:24 +00:00
|
|
|
|
2002-05-02 15:00:14 +00:00
|
|
|
/*
|
|
|
|
* Malloc things before we need locks.
|
|
|
|
*/
|
2018-11-29 21:00:56 +00:00
|
|
|
i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
|
2005-10-01 08:33:56 +00:00
|
|
|
/* Cache arguments if they fit inside our allowance */
|
|
|
|
if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
|
2002-05-02 15:00:14 +00:00
|
|
|
newargs = pargs_alloc(i);
|
2005-10-01 08:33:56 +00:00
|
|
|
bcopy(imgp->args->begin_argv, newargs->ar_args, i);
|
|
|
|
}
|
2002-05-02 15:00:14 +00:00
|
|
|
|
2001-07-09 19:01:42 +00:00
|
|
|
/*
|
|
|
|
* For security and other reasons, signal handlers cannot
|
2001-10-09 17:25:30 +00:00
|
|
|
* be shared after an exec. The new process gets a copy of the old
|
2001-07-11 02:04:43 +00:00
|
|
|
* handlers. In execsigs(), the new process will have its signals
|
2001-07-09 19:01:42 +00:00
|
|
|
* reset.
|
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
if (sigacts_shared(p->p_sigacts)) {
|
|
|
|
oldsigacts = p->p_sigacts;
|
|
|
|
newsigacts = sigacts_alloc();
|
|
|
|
sigacts_copy(newsigacts, oldsigacts);
|
2014-07-02 05:45:40 +00:00
|
|
|
}
|
2001-07-09 19:01:42 +00:00
|
|
|
|
2016-05-27 15:03:38 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
|
|
|
|
2014-07-01 06:29:15 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
if (oldsigacts)
|
|
|
|
p->p_sigacts = newsigacts;
|
1999-08-11 20:35:38 +00:00
|
|
|
/* Stop profiling */
|
|
|
|
stopprofclock(p);
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/* reset caught signals */
|
|
|
|
execsigs(p);
|
|
|
|
|
|
|
|
/* name this process - nameiexec(p, ndp) */
|
2009-10-23 15:14:54 +00:00
|
|
|
bzero(p->p_comm, sizeof(p->p_comm));
|
|
|
|
if (args->fname)
|
|
|
|
bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
|
|
|
|
min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
|
2015-07-14 01:13:37 +00:00
|
|
|
else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
|
2009-10-23 15:14:54 +00:00
|
|
|
bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
|
2007-11-14 06:04:57 +00:00
|
|
|
bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
|
2012-03-08 19:41:05 +00:00
|
|
|
#ifdef KTR
|
|
|
|
sched_clear_tdname(td);
|
|
|
|
#endif
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1996-04-29 15:07:59 +00:00
|
|
|
* mark as execed, wakeup the process that vforked (if any) and tell
|
1998-04-17 22:37:19 +00:00
|
|
|
* it that it now has its own resources back
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
|
|
|
p->p_flag |= P_EXEC;
|
2015-01-18 15:13:11 +00:00
|
|
|
if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
|
|
|
|
p->p_flag2 &= ~P2_NOTRACE;
|
2019-09-03 18:56:25 +00:00
|
|
|
if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
|
|
|
|
p->p_flag2 &= ~P2_STKGAP_DISABLE;
|
2014-07-14 22:40:46 +00:00
|
|
|
if (p->p_flag & P_PPWAIT) {
|
2013-02-07 15:34:22 +00:00
|
|
|
p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
|
2008-12-05 20:50:24 +00:00
|
|
|
cv_broadcast(&p->p_pwait);
|
2016-07-18 10:53:47 +00:00
|
|
|
/* STOPs are no longer ignored, arrange for AST */
|
|
|
|
signotify(td);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2016-05-26 23:18:54 +00:00
|
|
|
* Implement image setuid/setgid installation.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2016-05-26 23:18:54 +00:00
|
|
|
if (imgp->credential_setid) {
|
1995-11-06 12:52:37 +00:00
|
|
|
/*
|
|
|
|
* Turn off syscall tracing for set-id programs, except for
|
2001-06-16 23:34:23 +00:00
|
|
|
* root. Record any set-id flags first to make sure that
|
|
|
|
* we do not regain any tracing during a possible block.
|
1995-11-06 12:52:37 +00:00
|
|
|
*/
|
2001-06-16 23:34:23 +00:00
|
|
|
setsugid(p);
|
2006-11-06 13:42:10 +00:00
|
|
|
|
2002-06-07 05:41:27 +00:00
|
|
|
#ifdef KTRACE
|
2012-10-06 19:23:44 +00:00
|
|
|
if (p->p_tracecred != NULL &&
|
2018-12-11 19:32:16 +00:00
|
|
|
priv_check_cred(p->p_tracecred, PRIV_DEBUG_DIFFCRED))
|
2010-10-21 19:17:40 +00:00
|
|
|
ktrprocexec(p, &tracecred, &tracevp);
|
2002-06-07 05:41:27 +00:00
|
|
|
#endif
|
2002-09-13 09:31:56 +00:00
|
|
|
/*
|
2002-09-14 18:55:11 +00:00
|
|
|
* Close any file descriptors 0..2 that reference procfs,
|
|
|
|
* then make sure file descriptors 0..2 are in use.
|
2002-09-13 09:31:56 +00:00
|
|
|
*
|
2014-10-31 09:56:00 +00:00
|
|
|
* Both fdsetugidsafety() and fdcheckstd() may call functions
|
|
|
|
* taking sleepable locks, so temporarily drop our locks.
|
2002-09-13 09:31:56 +00:00
|
|
|
*/
|
|
|
|
PROC_UNLOCK(p);
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
2014-10-22 00:23:43 +00:00
|
|
|
fdsetugidsafety(td);
|
2002-04-19 00:45:29 +00:00
|
|
|
error = fdcheckstd(td);
|
2014-07-07 14:03:30 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2016-05-27 15:03:38 +00:00
|
|
|
if (error != 0)
|
|
|
|
goto exec_fail_dealloc;
|
2002-10-11 21:04:01 +00:00
|
|
|
PROC_LOCK(p);
|
2002-11-05 14:57:49 +00:00
|
|
|
#ifdef MAC
|
2002-11-05 17:51:56 +00:00
|
|
|
if (will_transition) {
|
2016-05-26 23:18:54 +00:00
|
|
|
mac_vnode_execve_transition(oldcred, imgp->newcred,
|
|
|
|
imgp->vp, interpvplabel, imgp);
|
2002-11-05 17:51:56 +00:00
|
|
|
}
|
2002-11-05 14:57:49 +00:00
|
|
|
#endif
|
1995-11-06 12:52:37 +00:00
|
|
|
} else {
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
if (oldcred->cr_uid == oldcred->cr_ruid &&
|
|
|
|
oldcred->cr_gid == oldcred->cr_rgid)
|
1997-02-19 03:51:34 +00:00
|
|
|
p->p_flag &= ~P_SUGID;
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
}
|
2016-05-26 23:18:54 +00:00
|
|
|
/*
|
|
|
|
* Set the new credentials.
|
|
|
|
*/
|
2016-06-08 04:37:03 +00:00
|
|
|
if (imgp->newcred != NULL) {
|
2016-05-26 23:18:54 +00:00
|
|
|
proc_set_cred(p, imgp->newcred);
|
2016-06-08 04:37:03 +00:00
|
|
|
crfree(oldcred);
|
|
|
|
oldcred = NULL;
|
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1994-09-24 16:58:43 +00:00
|
|
|
/*
|
2015-07-14 00:43:08 +00:00
|
|
|
* Store the vp for use in procfs. This vnode was referenced by namei
|
|
|
|
* or fgetvp_exec.
|
1994-09-24 16:58:43 +00:00
|
|
|
*/
|
2015-07-14 01:13:37 +00:00
|
|
|
oldtextvp = p->p_textvp;
|
|
|
|
p->p_textvp = newtextvp;
|
1994-09-24 16:58:43 +00:00
|
|
|
|
2008-05-24 06:22:16 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
/*
|
|
|
|
* Tell the DTrace fasttrap provider about the exec if it
|
|
|
|
* has declared an interest.
|
|
|
|
*/
|
|
|
|
if (dtrace_fasttrap_exec)
|
|
|
|
dtrace_fasttrap_exec(p);
|
|
|
|
#endif
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/*
|
2001-10-27 11:11:25 +00:00
|
|
|
* Notify others that we exec'd, and clear the P_INEXEC flag
|
|
|
|
* as we're now a bona fide freshly-execed process.
|
2000-04-16 18:53:38 +00:00
|
|
|
*/
|
When filt_proc() removes event from the knlist due to the process
exiting (NOTE_EXIT->knlist_remove_inevent()), two things happen:
- knote kn_knlist pointer is reset
- INFLUX knote is removed from the process knlist.
And, there are two consequences:
- KN_LIST_UNLOCK() on such knote is nop
- there is nothing which would block exit1() from processing past the
knlist_destroy() (and knlist_destroy() resets knlist lock pointers).
Both consequences result either in leaked process lock, or
dereferencing NULL function pointers for locking.
Handle this by stopping embedding the process knlist into struct proc.
Instead, the knlist is allocated together with struct proc, but marked
as autodestroy on the zombie reap, by knlist_detach() function. The
knlist is freed when last kevent is removed from the list, in
particular, at the zombie reap time if the list is empty. As result,
the knlist_remove_inevent() is no longer needed and removed.
Other changes:
In filt_procattach(), clear NOTE_EXEC and NOTE_FORK desired events
from kn_sfflags for knote registered by kernel to only get NOTE_CHILD
notifications. The flags leak resulted in excessive
NOTE_EXEC/NOTE_FORK reports.
Fix immediate note activation in filt_procattach(). Condition should
be either the immediate CHILD_NOTE activation, or immediate NOTE_EXIT
report for the exiting process.
In knote_fork(), do not perform racy check for KN_INFLUX before kq
lock is taken. Besides being racy, it did not accounted for notes
just added by scan (KN_SCAN).
Some minor and incomplete style fixes.
Analyzed and tested by: Eric Badger <eric@badgerio.us>
Reviewed by: jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Approved by: re (gjb)
Differential revision: https://reviews.freebsd.org/D6859
2016-06-27 21:52:17 +00:00
|
|
|
KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
|
2001-10-27 11:11:25 +00:00
|
|
|
p->p_flag &= ~P_INEXEC;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/* clear "fork but no exec" flag, as we _are_ execing */
|
|
|
|
p->p_acflag &= ~AFORK;
|
|
|
|
|
2005-10-01 08:33:56 +00:00
|
|
|
/*
|
2005-10-04 04:02:33 +00:00
|
|
|
* Free any previous argument cache and replace it with
|
2005-10-01 08:33:56 +00:00
|
|
|
* the new argument cache, if any.
|
|
|
|
*/
|
2002-05-02 15:00:14 +00:00
|
|
|
oldargs = p->p_args;
|
2005-10-01 08:33:56 +00:00
|
|
|
p->p_args = newargs;
|
|
|
|
newargs = NULL;
|
2005-04-19 04:01:25 +00:00
|
|
|
|
2017-10-19 00:38:14 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
/*
|
2005-06-09 19:45:09 +00:00
|
|
|
* Check if system-wide sampling is in effect or if the
|
|
|
|
* current process is using PMCs. If so, do exec() time
|
2005-04-19 04:01:25 +00:00
|
|
|
* processing. This processing needs to happen AFTER the
|
|
|
|
* P_INEXEC flag is cleared.
|
|
|
|
*/
|
2005-06-09 19:45:09 +00:00
|
|
|
if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
2005-06-30 19:01:26 +00:00
|
|
|
pe.pm_credentialschanged = credential_changing;
|
|
|
|
pe.pm_entryaddr = imgp->entry_addr;
|
|
|
|
|
|
|
|
PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
|
2012-01-19 23:03:31 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2017-10-19 00:38:14 +00:00
|
|
|
}
|
2005-04-19 04:01:25 +00:00
|
|
|
#endif
|
2002-05-02 15:00:14 +00:00
|
|
|
|
2002-10-11 21:04:01 +00:00
|
|
|
/* Set values passed into the program in registers. */
|
2019-12-03 23:17:54 +00:00
|
|
|
(*p->p_sysent->sv_setregs)(td, imgp, stack_base);
|
2002-08-13 06:55:28 +00:00
|
|
|
|
2020-02-01 06:46:55 +00:00
|
|
|
VOP_MMAPPED(imgp->vp);
|
2005-05-31 19:39:52 +00:00
|
|
|
|
2015-12-16 23:39:27 +00:00
|
|
|
SDT_PROBE1(proc, , , exec__success, args->fname);
|
2008-05-24 06:22:16 +00:00
|
|
|
|
1998-01-11 21:35:38 +00:00
|
|
|
exec_fail_dealloc:
|
2019-11-17 14:52:45 +00:00
|
|
|
if (error != 0) {
|
|
|
|
p->p_osrel = orig_osrel;
|
|
|
|
p->p_fctl0 = orig_fctl0;
|
|
|
|
}
|
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->firstpage != NULL)
|
1998-01-11 21:35:38 +00:00
|
|
|
exec_unmap_first_page(imgp);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->vp != NULL) {
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname)
|
2009-02-26 16:32:48 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2008-07-17 16:44:07 +00:00
|
|
|
if (imgp->opened)
|
|
|
|
VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
if (imgp->textset)
|
|
|
|
VOP_UNSET_TEXT_CHECKED(imgp->vp);
|
2015-07-14 00:43:08 +00:00
|
|
|
if (error != 0)
|
|
|
|
vput(imgp->vp);
|
|
|
|
else
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(imgp->vp);
|
1997-04-04 07:30:06 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->object != NULL)
|
2002-07-06 07:00:01 +00:00
|
|
|
vm_object_deallocate(imgp->object);
|
|
|
|
|
2009-03-17 12:53:28 +00:00
|
|
|
free(imgp->freepath, M_TEMP);
|
|
|
|
|
2002-11-26 17:30:55 +00:00
|
|
|
if (error == 0) {
|
2017-10-19 00:46:15 +00:00
|
|
|
if (p->p_ptevents & PTRACE_EXEC) {
|
|
|
|
PROC_LOCK(p);
|
|
|
|
if (p->p_ptevents & PTRACE_EXEC)
|
|
|
|
td->td_dbgflags |= TDB_EXEC;
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2016-05-27 15:03:38 +00:00
|
|
|
} else {
|
1994-05-25 09:21:21 +00:00
|
|
|
exec_fail:
|
2016-05-27 15:03:38 +00:00
|
|
|
/* we're done here, clear P_INEXEC */
|
|
|
|
PROC_LOCK(p);
|
|
|
|
p->p_flag &= ~P_INEXEC;
|
|
|
|
PROC_UNLOCK(p);
|
2005-01-29 23:12:00 +00:00
|
|
|
|
2016-05-27 15:03:38 +00:00
|
|
|
SDT_PROBE1(proc, , , exec__failure, error);
|
|
|
|
}
|
2008-05-24 06:22:16 +00:00
|
|
|
|
2016-06-08 04:37:03 +00:00
|
|
|
if (imgp->newcred != NULL && oldcred != NULL)
|
|
|
|
crfree(imgp->newcred);
|
|
|
|
|
2002-11-05 17:51:56 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_execve_exit(imgp);
|
Introduce two related changes to the TrustedBSD MAC Framework:
(1) Abstract interpreter vnode labeling in execve(2) and mac_execve(2)
so that the general exec code isn't aware of the details of
allocating, copying, and freeing labels, rather, simply passes in
a void pointer to start and stop functions that will be used by
the framework. This change will be MFC'd.
(2) Introduce a new flags field to the MAC_POLICY_SET(9) interface
allowing policies to declare which types of objects require label
allocation, initialization, and destruction, and define a set of
flags covering various supported object types (MPC_OBJECT_PROC,
MPC_OBJECT_VNODE, MPC_OBJECT_INPCB, ...). This change reduces the
overhead of compiling the MAC Framework into the kernel if policies
aren't loaded, or if policies require labels on only a small number
or even no object types. Each time a policy is loaded or unloaded,
we recalculate a mask of labeled object types across all policies
present in the system. Eliminate MAC_ALWAYS_LABEL_MBUF option as it
is no longer required.
MFC after: 1 week ((1) only)
Reviewed by: csjp
Obtained from: TrustedBSD Project
Sponsored by: Apple, Inc.
2008-08-23 15:26:36 +00:00
|
|
|
mac_execve_interpreter_exit(interpvplabel);
|
2002-11-05 17:51:56 +00:00
|
|
|
#endif
|
2006-02-06 22:06:54 +00:00
|
|
|
exec_free_args(args);
|
|
|
|
|
2016-05-27 15:03:38 +00:00
|
|
|
/*
|
|
|
|
* Handle deferred decrement of ref counts.
|
|
|
|
*/
|
|
|
|
if (oldtextvp != NULL)
|
|
|
|
vrele(oldtextvp);
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (tracevp != NULL)
|
|
|
|
vrele(tracevp);
|
|
|
|
if (tracecred != NULL)
|
|
|
|
crfree(tracecred);
|
|
|
|
#endif
|
|
|
|
pargs_drop(oldargs);
|
|
|
|
pargs_drop(newargs);
|
|
|
|
if (oldsigacts != NULL)
|
|
|
|
sigacts_free(oldsigacts);
|
|
|
|
if (euip != NULL)
|
|
|
|
uifree(euip);
|
|
|
|
|
2006-02-06 22:06:54 +00:00
|
|
|
if (error && imgp->vmspace_destroyed) {
|
|
|
|
/* sorry, no more process anymore. exit gracefully */
|
2020-09-23 18:03:07 +00:00
|
|
|
exec_cleanup(td, oldvmspace);
|
2015-07-18 09:02:50 +00:00
|
|
|
exit1(td, 0, SIGABRT);
|
2006-02-06 22:06:54 +00:00
|
|
|
/* NOT REACHED */
|
|
|
|
}
|
2011-02-25 22:05:33 +00:00
|
|
|
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (error == 0)
|
|
|
|
ktrprocctor(p);
|
|
|
|
#endif
|
|
|
|
|
2017-11-24 07:35:08 +00:00
|
|
|
/*
|
|
|
|
* We don't want cpu_set_syscall_retval() to overwrite any of
|
|
|
|
* the register values put in place by exec_setregs().
|
|
|
|
* Implementations of cpu_set_syscall_retval() will leave
|
|
|
|
* registers unmodified when returning EJUSTRETURN.
|
|
|
|
*/
|
|
|
|
return (error == 0 ? EJUSTRETURN : error);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
2020-09-23 18:03:07 +00:00
|
|
|
void
|
|
|
|
exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
|
|
|
|
{
|
|
|
|
if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
|
2020-09-24 12:14:25 +00:00
|
|
|
KASSERT(td->td_proc->p_vmspace != oldvmspace,
|
2020-09-23 18:03:07 +00:00
|
|
|
("oldvmspace still used"));
|
|
|
|
vmspace_free(oldvmspace);
|
|
|
|
td->td_pflags &= ~TDP_EXECVMSPC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-01-11 21:35:38 +00:00
|
|
|
int
|
2018-06-01 13:26:45 +00:00
|
|
|
exec_map_first_page(struct image_params *imgp)
|
1998-01-11 21:35:38 +00:00
|
|
|
{
|
|
|
|
vm_object_t object;
|
2019-12-15 02:00:32 +00:00
|
|
|
vm_page_t m;
|
|
|
|
int error;
|
1998-01-11 21:35:38 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->firstpage != NULL)
|
1998-01-11 21:35:38 +00:00
|
|
|
exec_unmap_first_page(imgp);
|
|
|
|
|
2005-01-25 00:40:01 +00:00
|
|
|
object = imgp->vp->v_object;
|
2005-05-01 00:58:19 +00:00
|
|
|
if (object == NULL)
|
|
|
|
return (EACCES);
|
2007-12-29 19:53:04 +00:00
|
|
|
#if VM_NRESERVLEVEL > 0
|
2020-02-28 20:34:30 +00:00
|
|
|
if ((object->flags & OBJ_COLORED) == 0) {
|
|
|
|
VM_OBJECT_WLOCK(object);
|
|
|
|
vm_object_color(object, 0);
|
|
|
|
VM_OBJECT_WUNLOCK(object);
|
|
|
|
}
|
2007-12-29 19:53:04 +00:00
|
|
|
#endif
|
2020-02-28 20:34:30 +00:00
|
|
|
error = vm_page_grab_valid_unlocked(&m, object, 0,
|
2019-12-15 02:00:32 +00:00
|
|
|
VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
|
|
|
|
VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
|
1998-01-11 21:35:38 +00:00
|
|
|
|
2019-12-15 02:00:32 +00:00
|
|
|
if (error != VM_PAGER_OK)
|
|
|
|
return (EIO);
|
|
|
|
imgp->firstpage = sf_buf_alloc(m, 0);
|
2004-04-23 03:01:40 +00:00
|
|
|
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
|
1998-01-11 21:35:38 +00:00
|
|
|
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_unmap_first_page(struct image_params *imgp)
|
1998-01-11 21:35:38 +00:00
|
|
|
{
|
2004-04-23 03:01:40 +00:00
|
|
|
vm_page_t m;
|
2001-05-19 01:28:09 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->firstpage != NULL) {
|
2004-04-23 03:01:40 +00:00
|
|
|
m = sf_buf_page(imgp->firstpage);
|
|
|
|
sf_buf_free(imgp->firstpage);
|
|
|
|
imgp->firstpage = NULL;
|
2019-07-08 19:46:20 +00:00
|
|
|
vm_page_unwire(m, PQ_ACTIVE);
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2017-07-03 20:44:01 +00:00
|
|
|
* Destroy old address space, and allocate a new stack.
|
|
|
|
* The new stack is only sgrowsiz large because it is grown
|
|
|
|
* automatically on a page fault.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
|
|
|
int error;
|
2002-03-31 00:05:30 +00:00
|
|
|
struct proc *p = imgp->proc;
|
|
|
|
struct vmspace *vmspace = p->p_vmspace;
|
Add a way to manage thread signal mask using shared word, instead of syscall.
A new syscall sigfastblock(2) is added which registers a uint32_t
variable as containing the count of blocks for signal delivery. Its
content is read by kernel on each syscall entry and on AST processing,
non-zero count of blocks is interpreted same as the signal mask
blocking all signals.
The biggest downside of the feature that I see is that memory
corruption that affects the registered fast sigblock location, would
cause quite strange application misbehavior. For instance, the process
would be immune to ^C (but killable by SIGKILL).
With consumers (rtld and libthr added), benchmarks do not show a
slow-down of the syscalls in micro-measurements, and macro benchmarks
like buildworld do not demonstrate a difference. Part of the reason is
that buildworld time is dominated by compiler, and clang already links
to libthr. On the other hand, small utilities typically used by shell
scripts have the total number of syscalls cut by half.
The syscall is not exported from the stable libc version namespace on
purpose. It is intended to be used only by our C runtime
implementation internals.
Tested by: pho
Disscussed with: cem, emaste, jilles
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D12773
2020-02-09 11:53:12 +00:00
|
|
|
struct thread *td = curthread;
|
2011-01-08 16:13:44 +00:00
|
|
|
vm_object_t obj;
|
2015-04-15 08:13:53 +00:00
|
|
|
struct rlimit rlim_stack;
|
2009-10-02 17:48:51 +00:00
|
|
|
vm_offset_t sv_minuser, stack_addr;
|
2002-09-21 22:07:17 +00:00
|
|
|
vm_map_t map;
|
2007-07-12 18:01:31 +00:00
|
|
|
u_long ssiz;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-11-06 12:52:37 +00:00
|
|
|
imgp->vmspace_destroyed = 1;
|
2006-08-15 12:10:57 +00:00
|
|
|
imgp->sysent = sv;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2020-02-20 15:34:02 +00:00
|
|
|
sigfastblock_clear(td);
|
2020-11-21 10:32:40 +00:00
|
|
|
umtx_exec(p);
|
2020-11-21 21:43:36 +00:00
|
|
|
itimers_exec(p);
|
2020-11-23 17:29:25 +00:00
|
|
|
if (sv->sv_onexec != NULL)
|
|
|
|
sv->sv_onexec(p, imgp);
|
Add a way to manage thread signal mask using shared word, instead of syscall.
A new syscall sigfastblock(2) is added which registers a uint32_t
variable as containing the count of blocks for signal delivery. Its
content is read by kernel on each syscall entry and on AST processing,
non-zero count of blocks is interpreted same as the signal mask
blocking all signals.
The biggest downside of the feature that I see is that memory
corruption that affects the registered fast sigblock location, would
cause quite strange application misbehavior. For instance, the process
would be immune to ^C (but killable by SIGKILL).
With consumers (rtld and libthr added), benchmarks do not show a
slow-down of the syscalls in micro-measurements, and macro benchmarks
like buildworld do not demonstrate a difference. Part of the reason is
that buildworld time is dominated by compiler, and clang already links
to libthr. On the other hand, small utilities typically used by shell
scripts have the total number of syscalls cut by half.
The syscall is not exported from the stable libc version namespace on
purpose. It is intended to be used only by our C runtime
implementation internals.
Tested by: pho
Disscussed with: cem, emaste, jilles
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D12773
2020-02-09 11:53:12 +00:00
|
|
|
|
2017-11-09 22:51:48 +00:00
|
|
|
EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
|
2002-01-13 19:36:35 +00:00
|
|
|
|
1997-04-11 23:37:23 +00:00
|
|
|
/*
|
|
|
|
* Blow away entire process VM, if address space not shared,
|
|
|
|
* otherwise, create a new VM space so that other threads are
|
|
|
|
* not disrupted
|
|
|
|
*/
|
2002-09-21 22:07:17 +00:00
|
|
|
map = &vmspace->vm_map;
|
2009-10-02 17:48:51 +00:00
|
|
|
if (map_at_zero)
|
|
|
|
sv_minuser = sv->sv_minuser;
|
|
|
|
else
|
|
|
|
sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
|
2020-11-04 16:30:56 +00:00
|
|
|
if (refcount_load(&vmspace->vm_refcnt) == 1 &&
|
|
|
|
vm_map_min(map) == sv_minuser &&
|
2019-03-16 11:31:01 +00:00
|
|
|
vm_map_max(map) == sv->sv_maxuser &&
|
|
|
|
cpu_exec_vmspace_reuse(p, map)) {
|
2003-01-13 23:04:32 +00:00
|
|
|
shmexit(vmspace);
|
2006-04-03 21:16:10 +00:00
|
|
|
pmap_remove_pages(vmspace_pmap(vmspace));
|
2002-09-21 22:07:17 +00:00
|
|
|
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
|
Implement Address Space Layout Randomization (ASLR)
With this change, randomization can be enabled for all non-fixed
mappings. It means that the base address for the mapping is selected
with a guaranteed amount of entropy (bits). If the mapping was
requested to be superpage aligned, the randomization honours the
superpage attributes.
Although the value of ASLR is diminshing over time as exploit authors
work out simple ASLR bypass techniques, it elimintates the trivial
exploitation of certain vulnerabilities, at least in theory. This
implementation is relatively small and happens at the correct
architectural level. Also, it is not expected to introduce
regressions in existing cases when turned off (default for now), or
cause any significant maintaince burden.
The randomization is done on a best-effort basis - that is, the
allocator falls back to a first fit strategy if fragmentation prevents
entropy injection. It is trivial to implement a strong mode where
failure to guarantee the requested amount of entropy results in
mapping request failure, but I do not consider that to be usable.
I have not fine-tuned the amount of entropy injected right now. It is
only a quantitive change that will not change the implementation. The
current amount is controlled by aslr_pages_rnd.
To not spoil coalescing optimizations, to reduce the page table
fragmentation inherent to ASLR, and to keep the transient superpage
promotion for the malloced memory, locality clustering is implemented
for anonymous private mappings, which are automatically grouped until
fragmentation kicks in. The initial location for the anon group range
is, of course, randomized. This is controlled by vm.cluster_anon,
enabled by default.
The default mode keeps the sbrk area unpopulated by other mappings,
but this can be turned off, which gives much more breathing bits on
architectures with small address space, such as i386. This is tied
with the question of following an application's hint about the mmap(2)
base address. Testing shows that ignoring the hint does not affect the
function of common applications, but I would expect more demanding
code could break. By default sbrk is preserved and mmap hints are
satisfied, which can be changed by using the
kern.elf{32,64}.aslr.honor_sbrk sysctl.
ASLR is enabled on per-ABI basis, and currently it is only allowed on
FreeBSD native i386 and amd64 (including compat 32bit) ABIs. Support
for additional architectures will be added after further testing.
Both per-process and per-image controls are implemented:
- procctl(2) adds PROC_ASLR_CTL/PROC_ASLR_STATUS;
- NT_FREEBSD_FCTL_ASLR_DISABLE feature control note bit makes it possible
to force ASLR off for the given binary. (A tool to edit the feature
control note is in development.)
Global controls are:
- kern.elf{32,64}.aslr.enable - for non-fixed mappings done by mmap(2);
- kern.elf{32,64}.aslr.pie_enable - for PIE image activation mappings;
- kern.elf{32,64}.aslr.honor_sbrk - allow to use sbrk area for mmap(2);
- vm.cluster_anon - enables anon mapping clustering.
PR: 208580 (exp runs)
Exp-runs done by: antoine
Reviewed by: markj (previous version)
Discussed with: emaste
Tested by: pho
MFC after: 1 month
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D5603
2019-02-10 17:19:45 +00:00
|
|
|
/*
|
|
|
|
* An exec terminates mlockall(MCL_FUTURE), ASLR state
|
|
|
|
* must be re-evaluated.
|
|
|
|
*/
|
2017-06-30 15:49:36 +00:00
|
|
|
vm_map_lock(map);
|
Implement Address Space Layout Randomization (ASLR)
With this change, randomization can be enabled for all non-fixed
mappings. It means that the base address for the mapping is selected
with a guaranteed amount of entropy (bits). If the mapping was
requested to be superpage aligned, the randomization honours the
superpage attributes.
Although the value of ASLR is diminshing over time as exploit authors
work out simple ASLR bypass techniques, it elimintates the trivial
exploitation of certain vulnerabilities, at least in theory. This
implementation is relatively small and happens at the correct
architectural level. Also, it is not expected to introduce
regressions in existing cases when turned off (default for now), or
cause any significant maintaince burden.
The randomization is done on a best-effort basis - that is, the
allocator falls back to a first fit strategy if fragmentation prevents
entropy injection. It is trivial to implement a strong mode where
failure to guarantee the requested amount of entropy results in
mapping request failure, but I do not consider that to be usable.
I have not fine-tuned the amount of entropy injected right now. It is
only a quantitive change that will not change the implementation. The
current amount is controlled by aslr_pages_rnd.
To not spoil coalescing optimizations, to reduce the page table
fragmentation inherent to ASLR, and to keep the transient superpage
promotion for the malloced memory, locality clustering is implemented
for anonymous private mappings, which are automatically grouped until
fragmentation kicks in. The initial location for the anon group range
is, of course, randomized. This is controlled by vm.cluster_anon,
enabled by default.
The default mode keeps the sbrk area unpopulated by other mappings,
but this can be turned off, which gives much more breathing bits on
architectures with small address space, such as i386. This is tied
with the question of following an application's hint about the mmap(2)
base address. Testing shows that ignoring the hint does not affect the
function of common applications, but I would expect more demanding
code could break. By default sbrk is preserved and mmap hints are
satisfied, which can be changed by using the
kern.elf{32,64}.aslr.honor_sbrk sysctl.
ASLR is enabled on per-ABI basis, and currently it is only allowed on
FreeBSD native i386 and amd64 (including compat 32bit) ABIs. Support
for additional architectures will be added after further testing.
Both per-process and per-image controls are implemented:
- procctl(2) adds PROC_ASLR_CTL/PROC_ASLR_STATUS;
- NT_FREEBSD_FCTL_ASLR_DISABLE feature control note bit makes it possible
to force ASLR off for the given binary. (A tool to edit the feature
control note is in development.)
Global controls are:
- kern.elf{32,64}.aslr.enable - for non-fixed mappings done by mmap(2);
- kern.elf{32,64}.aslr.pie_enable - for PIE image activation mappings;
- kern.elf{32,64}.aslr.honor_sbrk - allow to use sbrk area for mmap(2);
- vm.cluster_anon - enables anon mapping clustering.
PR: 208580 (exp runs)
Exp-runs done by: antoine
Reviewed by: markj (previous version)
Discussed with: emaste
Tested by: pho
MFC after: 1 month
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D5603
2019-02-10 17:19:45 +00:00
|
|
|
vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
|
|
|
|
MAP_ASLR_IGNSTART);
|
2017-06-30 15:49:36 +00:00
|
|
|
vm_map_unlock(map);
|
1997-04-11 23:37:23 +00:00
|
|
|
} else {
|
2009-10-02 17:48:51 +00:00
|
|
|
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
|
2007-11-05 11:36:16 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-03-31 00:05:30 +00:00
|
|
|
vmspace = p->p_vmspace;
|
2002-09-21 22:07:17 +00:00
|
|
|
map = &vmspace->vm_map;
|
1997-04-11 23:37:23 +00:00
|
|
|
}
|
Implement Address Space Layout Randomization (ASLR)
With this change, randomization can be enabled for all non-fixed
mappings. It means that the base address for the mapping is selected
with a guaranteed amount of entropy (bits). If the mapping was
requested to be superpage aligned, the randomization honours the
superpage attributes.
Although the value of ASLR is diminshing over time as exploit authors
work out simple ASLR bypass techniques, it elimintates the trivial
exploitation of certain vulnerabilities, at least in theory. This
implementation is relatively small and happens at the correct
architectural level. Also, it is not expected to introduce
regressions in existing cases when turned off (default for now), or
cause any significant maintaince burden.
The randomization is done on a best-effort basis - that is, the
allocator falls back to a first fit strategy if fragmentation prevents
entropy injection. It is trivial to implement a strong mode where
failure to guarantee the requested amount of entropy results in
mapping request failure, but I do not consider that to be usable.
I have not fine-tuned the amount of entropy injected right now. It is
only a quantitive change that will not change the implementation. The
current amount is controlled by aslr_pages_rnd.
To not spoil coalescing optimizations, to reduce the page table
fragmentation inherent to ASLR, and to keep the transient superpage
promotion for the malloced memory, locality clustering is implemented
for anonymous private mappings, which are automatically grouped until
fragmentation kicks in. The initial location for the anon group range
is, of course, randomized. This is controlled by vm.cluster_anon,
enabled by default.
The default mode keeps the sbrk area unpopulated by other mappings,
but this can be turned off, which gives much more breathing bits on
architectures with small address space, such as i386. This is tied
with the question of following an application's hint about the mmap(2)
base address. Testing shows that ignoring the hint does not affect the
function of common applications, but I would expect more demanding
code could break. By default sbrk is preserved and mmap hints are
satisfied, which can be changed by using the
kern.elf{32,64}.aslr.honor_sbrk sysctl.
ASLR is enabled on per-ABI basis, and currently it is only allowed on
FreeBSD native i386 and amd64 (including compat 32bit) ABIs. Support
for additional architectures will be added after further testing.
Both per-process and per-image controls are implemented:
- procctl(2) adds PROC_ASLR_CTL/PROC_ASLR_STATUS;
- NT_FREEBSD_FCTL_ASLR_DISABLE feature control note bit makes it possible
to force ASLR off for the given binary. (A tool to edit the feature
control note is in development.)
Global controls are:
- kern.elf{32,64}.aslr.enable - for non-fixed mappings done by mmap(2);
- kern.elf{32,64}.aslr.pie_enable - for PIE image activation mappings;
- kern.elf{32,64}.aslr.honor_sbrk - allow to use sbrk area for mmap(2);
- vm.cluster_anon - enables anon mapping clustering.
PR: 208580 (exp runs)
Exp-runs done by: antoine
Reviewed by: markj (previous version)
Discussed with: emaste
Tested by: pho
MFC after: 1 month
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D5603
2019-02-10 17:19:45 +00:00
|
|
|
map->flags |= imgp->map_flags;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2011-01-08 16:13:44 +00:00
|
|
|
/* Map a shared page */
|
|
|
|
obj = sv->sv_shared_page_obj;
|
|
|
|
if (obj != NULL) {
|
|
|
|
vm_object_reference(obj);
|
|
|
|
error = vm_map_fixed(map, obj, 0,
|
|
|
|
sv->sv_shared_page_base, sv->sv_shared_page_len,
|
2013-06-03 04:32:53 +00:00
|
|
|
VM_PROT_READ | VM_PROT_EXECUTE,
|
|
|
|
VM_PROT_READ | VM_PROT_EXECUTE,
|
|
|
|
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
|
2017-07-03 20:44:01 +00:00
|
|
|
if (error != KERN_SUCCESS) {
|
2011-01-08 16:13:44 +00:00
|
|
|
vm_object_deallocate(obj);
|
2017-07-03 20:44:01 +00:00
|
|
|
return (vm_mmap_to_errno(error));
|
2011-01-08 16:13:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/* Allocate a new stack */
|
2015-04-15 08:13:53 +00:00
|
|
|
if (imgp->stack_sz != 0) {
|
2015-04-23 11:27:21 +00:00
|
|
|
ssiz = trunc_page(imgp->stack_sz);
|
2015-04-15 08:13:53 +00:00
|
|
|
PROC_LOCK(p);
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
|
2015-04-15 08:13:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
if (ssiz > rlim_stack.rlim_max)
|
|
|
|
ssiz = rlim_stack.rlim_max;
|
|
|
|
if (ssiz > rlim_stack.rlim_cur) {
|
|
|
|
rlim_stack.rlim_cur = ssiz;
|
|
|
|
kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
|
|
|
|
}
|
|
|
|
} else if (sv->sv_maxssiz != NULL) {
|
2007-07-12 18:01:31 +00:00
|
|
|
ssiz = *sv->sv_maxssiz;
|
2015-04-15 08:13:53 +00:00
|
|
|
} else {
|
2007-07-12 18:01:31 +00:00
|
|
|
ssiz = maxssiz;
|
2015-04-15 08:13:53 +00:00
|
|
|
}
|
2019-08-05 19:16:33 +00:00
|
|
|
imgp->eff_stack_sz = lim_cur(curthread, RLIMIT_STACK);
|
|
|
|
if (ssiz < imgp->eff_stack_sz)
|
|
|
|
imgp->eff_stack_sz = ssiz;
|
2007-07-12 18:01:31 +00:00
|
|
|
stack_addr = sv->sv_usrstack - ssiz;
|
|
|
|
error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
|
2011-01-08 16:13:44 +00:00
|
|
|
obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
|
2017-07-03 20:44:01 +00:00
|
|
|
sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
|
|
|
|
if (error != KERN_SUCCESS)
|
|
|
|
return (vm_mmap_to_errno(error));
|
1999-01-06 23:05:42 +00:00
|
|
|
|
2014-06-09 00:15:16 +00:00
|
|
|
/*
|
|
|
|
* vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
|
|
|
|
* are still used to enforce the stack rlimit on the process stack.
|
1999-01-06 23:05:42 +00:00
|
|
|
*/
|
2001-10-10 23:06:54 +00:00
|
|
|
vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
|
2015-06-30 15:22:47 +00:00
|
|
|
vmspace->vm_maxsaddr = (char *)stack_addr;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Copy out argument and environment strings from the old process address
|
|
|
|
* space into the temporary string buffer.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
|
|
|
int
|
2018-11-02 20:50:22 +00:00
|
|
|
exec_copyin_args(struct image_args *args, const char *fname,
|
2005-01-29 23:12:00 +00:00
|
|
|
enum uio_seg segflg, char **argv, char **envv)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2018-11-29 21:00:56 +00:00
|
|
|
u_long arg, env;
|
2005-01-29 23:12:00 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
bzero(args, sizeof(*args));
|
|
|
|
if (argv == NULL)
|
|
|
|
return (EFAULT);
|
2010-07-27 17:31:03 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2010-07-27 17:31:03 +00:00
|
|
|
* Allocate demand-paged memory for the file name, argument, and
|
|
|
|
* environment strings.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2010-07-27 17:31:03 +00:00
|
|
|
error = exec_alloc_args(args);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
Change the order in which the file name, arguments, environment, and
shell command are stored in exec*()'s demand-paged string buffer. For
a "buildworld" on an 8GB amd64 multiprocessor, the new order reduces
the number of global TLB shootdowns by 31%. It also eliminates about
330k page faults on the kernel address space.
Change exec_shell_imgact() to use "args->begin_argv" consistently as
the start of the argument and environment strings. Previously, it
would sometimes use "args->buf", which is the start of the overall
buffer, but no longer the start of the argument and environment
strings. While I'm here, eliminate unnecessary passing of "&length"
to copystr(), where we don't actually care about the length of the
copied string.
Clean up the initialization of the exec map. In particular, use the
correct size for an entry, and express that size in the same way that
is used when an entry is allocated. The old size was one page too
large. (This discrepancy originated in 2004 when I rewrote
exec_map_first_page() to use sf_buf_alloc() instead of the exec map
for mapping the first page of the executable.)
Reviewed by: kib
2010-07-25 17:43:38 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
/*
|
|
|
|
* Copy the file name.
|
|
|
|
*/
|
2018-11-29 21:00:56 +00:00
|
|
|
error = exec_args_add_fname(args, fname, segflg);
|
|
|
|
if (error != 0)
|
|
|
|
goto err_exit;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
/*
|
|
|
|
* extract arguments first
|
|
|
|
*/
|
2014-10-28 15:28:20 +00:00
|
|
|
for (;;) {
|
2018-11-29 21:00:56 +00:00
|
|
|
error = fueword(argv++, &arg);
|
2014-10-28 15:28:20 +00:00
|
|
|
if (error == -1) {
|
2006-03-08 20:21:54 +00:00
|
|
|
error = EFAULT;
|
|
|
|
goto err_exit;
|
|
|
|
}
|
2018-11-29 21:00:56 +00:00
|
|
|
if (arg == 0)
|
2014-10-28 15:28:20 +00:00
|
|
|
break;
|
2018-11-29 21:00:56 +00:00
|
|
|
error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
|
|
|
|
UIO_USERSPACE);
|
|
|
|
if (error != 0)
|
2006-03-08 20:21:54 +00:00
|
|
|
goto err_exit;
|
2005-01-29 23:12:00 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* extract environment strings
|
|
|
|
*/
|
1994-08-24 10:53:53 +00:00
|
|
|
if (envv) {
|
2014-10-28 15:28:20 +00:00
|
|
|
for (;;) {
|
2018-11-29 21:00:56 +00:00
|
|
|
error = fueword(envv++, &env);
|
2014-10-28 15:28:20 +00:00
|
|
|
if (error == -1) {
|
2006-03-08 20:21:54 +00:00
|
|
|
error = EFAULT;
|
|
|
|
goto err_exit;
|
|
|
|
}
|
2018-11-29 21:00:56 +00:00
|
|
|
if (env == 0)
|
2014-10-28 15:28:20 +00:00
|
|
|
break;
|
2018-11-29 21:00:56 +00:00
|
|
|
error = exec_args_add_env(args,
|
|
|
|
(char *)(uintptr_t)env, UIO_USERSPACE);
|
|
|
|
if (error != 0)
|
2006-03-08 20:21:54 +00:00
|
|
|
goto err_exit;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
1994-08-24 10:53:53 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
return (0);
|
2006-03-08 20:21:54 +00:00
|
|
|
|
|
|
|
err_exit:
|
|
|
|
exec_free_args(args);
|
|
|
|
return (error);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
int
|
|
|
|
exec_copyin_data_fds(struct thread *td, struct image_args *args,
|
|
|
|
const void *data, size_t datalen, const int *fds, size_t fdslen)
|
|
|
|
{
|
|
|
|
struct filedesc *ofdp;
|
|
|
|
const char *p;
|
|
|
|
int *kfds;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
memset(args, '\0', sizeof(*args));
|
|
|
|
ofdp = td->td_proc->p_fd;
|
2020-07-15 10:24:04 +00:00
|
|
|
if (datalen >= ARG_MAX || fdslen >= ofdp->fd_nfiles)
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
return (E2BIG);
|
|
|
|
error = exec_alloc_args(args);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
args->begin_argv = args->buf;
|
|
|
|
args->stringspace = ARG_MAX;
|
|
|
|
|
|
|
|
if (datalen > 0) {
|
|
|
|
/*
|
|
|
|
* Argument buffer has been provided. Copy it into the
|
|
|
|
* kernel as a single string and add a terminating null
|
|
|
|
* byte.
|
|
|
|
*/
|
|
|
|
error = copyin(data, args->begin_argv, datalen);
|
|
|
|
if (error != 0)
|
|
|
|
goto err_exit;
|
|
|
|
args->begin_argv[datalen] = '\0';
|
|
|
|
args->endp = args->begin_argv + datalen + 1;
|
|
|
|
args->stringspace -= datalen + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Traditional argument counting. Count the number of
|
|
|
|
* null bytes.
|
|
|
|
*/
|
|
|
|
for (p = args->begin_argv; p < args->endp; ++p)
|
|
|
|
if (*p == '\0')
|
|
|
|
++args->argc;
|
|
|
|
} else {
|
|
|
|
/* No argument buffer provided. */
|
|
|
|
args->endp = args->begin_argv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create new file descriptor table. */
|
|
|
|
kfds = malloc(fdslen * sizeof(int), M_TEMP, M_WAITOK);
|
|
|
|
error = copyin(fds, kfds, fdslen * sizeof(int));
|
|
|
|
if (error != 0) {
|
|
|
|
free(kfds, M_TEMP);
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
error = fdcopy_remapped(ofdp, kfds, fdslen, &args->fdp);
|
|
|
|
free(kfds, M_TEMP);
|
|
|
|
if (error != 0)
|
|
|
|
goto err_exit;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
err_exit:
|
|
|
|
exec_free_args(args);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2017-01-05 01:44:12 +00:00
|
|
|
struct exec_args_kva {
|
|
|
|
vm_offset_t addr;
|
2017-02-15 01:50:58 +00:00
|
|
|
u_int gen;
|
2017-01-05 01:44:12 +00:00
|
|
|
SLIST_ENTRY(exec_args_kva) next;
|
|
|
|
};
|
|
|
|
|
2018-07-05 17:13:37 +00:00
|
|
|
DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
|
2017-01-05 01:44:12 +00:00
|
|
|
|
|
|
|
static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
|
|
|
|
static struct mtx exec_args_kva_mtx;
|
2017-02-15 01:50:58 +00:00
|
|
|
static u_int exec_args_gen;
|
2017-01-05 01:44:12 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
exec_prealloc_args_kva(void *arg __unused)
|
|
|
|
{
|
|
|
|
struct exec_args_kva *argkva;
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
SLIST_INIT(&exec_args_kva_freelist);
|
|
|
|
mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
|
|
|
|
for (i = 0; i < exec_map_entries; i++) {
|
|
|
|
argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
|
|
|
|
argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
|
2017-02-15 01:50:58 +00:00
|
|
|
argkva->gen = exec_args_gen;
|
2017-01-05 01:44:12 +00:00
|
|
|
SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
|
|
|
|
|
|
|
|
static vm_offset_t
|
|
|
|
exec_alloc_args_kva(void **cookie)
|
|
|
|
{
|
|
|
|
struct exec_args_kva *argkva;
|
|
|
|
|
|
|
|
argkva = (void *)atomic_readandclear_ptr(
|
|
|
|
(uintptr_t *)DPCPU_PTR(exec_args_kva));
|
|
|
|
if (argkva == NULL) {
|
|
|
|
mtx_lock(&exec_args_kva_mtx);
|
|
|
|
while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
|
|
|
|
(void)mtx_sleep(&exec_args_kva_freelist,
|
|
|
|
&exec_args_kva_mtx, 0, "execkva", 0);
|
|
|
|
SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
|
|
|
|
mtx_unlock(&exec_args_kva_mtx);
|
|
|
|
}
|
|
|
|
*(struct exec_args_kva **)cookie = argkva;
|
|
|
|
return (argkva->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-02-15 01:50:58 +00:00
|
|
|
exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
|
2017-01-05 01:44:12 +00:00
|
|
|
{
|
|
|
|
vm_offset_t base;
|
|
|
|
|
|
|
|
base = argkva->addr;
|
2017-02-15 01:50:58 +00:00
|
|
|
if (argkva->gen != gen) {
|
Use a single, consistent approach to returning success versus failure in
vm_map_madvise(). Previously, vm_map_madvise() used a traditional Unix-
style "return (0);" to indicate success in the common case, but Mach-
style return values in the edge cases. Since KERN_SUCCESS equals zero,
the only problem with this inconsistency was stylistic. vm_map_madvise()
has exactly two callers in the entire source tree, and only one of them
cares about the return value. That caller, kern_madvise(), can be
simplified if vm_map_madvise() consistently uses Unix-style return
values.
Since vm_map_madvise() uses the variable modify_map as a Boolean, make it
one.
Eliminate a redundant error check from kern_madvise(). Add a comment
explaining where the check is performed.
Explicitly note that exec_release_args_kva() doesn't care about
vm_map_madvise()'s return value. Since MADV_FREE is passed as the
behavior, the return value will always be zero.
Reviewed by: kib, markj
MFC after: 7 days
2018-06-04 16:28:06 +00:00
|
|
|
(void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
|
2017-02-15 01:50:58 +00:00
|
|
|
MADV_FREE);
|
|
|
|
argkva->gen = gen;
|
|
|
|
}
|
2017-01-05 01:44:12 +00:00
|
|
|
if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
|
|
|
|
(uintptr_t)NULL, (uintptr_t)argkva)) {
|
|
|
|
mtx_lock(&exec_args_kva_mtx);
|
|
|
|
SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
|
|
|
|
wakeup_one(&exec_args_kva_freelist);
|
|
|
|
mtx_unlock(&exec_args_kva_mtx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-15 01:50:58 +00:00
|
|
|
static void
|
|
|
|
exec_free_args_kva(void *cookie)
|
|
|
|
{
|
|
|
|
|
|
|
|
exec_release_args_kva(cookie, exec_args_gen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
exec_args_kva_lowmem(void *arg __unused)
|
|
|
|
{
|
|
|
|
SLIST_HEAD(, exec_args_kva) head;
|
|
|
|
struct exec_args_kva *argkva;
|
|
|
|
u_int gen;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force an madvise of each KVA range. Any currently allocated ranges
|
|
|
|
* will have MADV_FREE applied once they are freed.
|
|
|
|
*/
|
|
|
|
SLIST_INIT(&head);
|
|
|
|
mtx_lock(&exec_args_kva_mtx);
|
|
|
|
SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
|
|
|
|
mtx_unlock(&exec_args_kva_mtx);
|
|
|
|
while ((argkva = SLIST_FIRST(&head)) != NULL) {
|
|
|
|
SLIST_REMOVE_HEAD(&head, next);
|
|
|
|
exec_release_args_kva(argkva, gen);
|
|
|
|
}
|
|
|
|
|
|
|
|
CPU_FOREACH(i) {
|
|
|
|
argkva = (void *)atomic_readandclear_ptr(
|
|
|
|
(uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
|
|
|
|
if (argkva != NULL)
|
|
|
|
exec_release_args_kva(argkva, gen);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
|
|
|
|
2010-07-27 17:31:03 +00:00
|
|
|
/*
|
|
|
|
* Allocate temporary demand-paged, zero-filled memory for the file name,
|
2017-01-05 01:44:12 +00:00
|
|
|
* argument, and environment strings.
|
2010-07-27 17:31:03 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
exec_alloc_args(struct image_args *args)
|
|
|
|
{
|
|
|
|
|
2017-01-05 01:44:12 +00:00
|
|
|
args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
|
|
|
|
return (0);
|
2010-07-27 17:31:03 +00:00
|
|
|
}
|
|
|
|
|
2010-07-23 18:58:27 +00:00
|
|
|
void
|
2005-01-29 23:12:00 +00:00
|
|
|
exec_free_args(struct image_args *args)
|
|
|
|
{
|
|
|
|
|
2010-07-23 18:58:27 +00:00
|
|
|
if (args->buf != NULL) {
|
2017-01-05 01:44:12 +00:00
|
|
|
exec_free_args_kva(args->bufkva);
|
2005-01-29 23:12:00 +00:00
|
|
|
args->buf = NULL;
|
|
|
|
}
|
2010-07-28 04:47:40 +00:00
|
|
|
if (args->fname_buf != NULL) {
|
|
|
|
free(args->fname_buf, M_TEMP);
|
|
|
|
args->fname_buf = NULL;
|
|
|
|
}
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
if (args->fdp != NULL)
|
|
|
|
fdescfree_remapped(args->fdp);
|
2005-01-29 23:12:00 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 21:00:56 +00:00
|
|
|
/*
|
|
|
|
* A set to functions to fill struct image args.
|
|
|
|
*
|
|
|
|
* NOTE: exec_args_add_fname() must be called (possibly with a NULL
|
|
|
|
* fname) before the other functions. All exec_args_add_arg() calls must
|
|
|
|
* be made before any exec_args_add_env() calls. exec_args_adjust_args()
|
|
|
|
* may be called any time after exec_args_add_fname().
|
|
|
|
*
|
|
|
|
* exec_args_add_fname() - install path to be executed
|
|
|
|
* exec_args_add_arg() - append an argument string
|
|
|
|
* exec_args_add_env() - append an env string
|
|
|
|
* exec_args_adjust_args() - adjust location of the argument list to
|
|
|
|
* allow new arguments to be prepended
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
exec_args_add_fname(struct image_args *args, const char *fname,
|
|
|
|
enum uio_seg segflg)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
size_t length;
|
|
|
|
|
|
|
|
KASSERT(args->fname == NULL, ("fname already appended"));
|
|
|
|
KASSERT(args->endp == NULL, ("already appending to args"));
|
|
|
|
|
|
|
|
if (fname != NULL) {
|
|
|
|
args->fname = args->buf;
|
|
|
|
error = segflg == UIO_SYSSPACE ?
|
|
|
|
copystr(fname, args->fname, PATH_MAX, &length) :
|
|
|
|
copyinstr(fname, args->fname, PATH_MAX, &length);
|
|
|
|
if (error != 0)
|
|
|
|
return (error == ENAMETOOLONG ? E2BIG : error);
|
|
|
|
} else
|
|
|
|
length = 0;
|
|
|
|
|
|
|
|
/* Set up for _arg_*()/_env_*() */
|
|
|
|
args->endp = args->buf + length;
|
|
|
|
/* begin_argv must be set and kept updated */
|
|
|
|
args->begin_argv = args->endp;
|
|
|
|
KASSERT(exec_map_entry_size - length >= ARG_MAX,
|
|
|
|
("too little space remaining for arguments %zu < %zu",
|
|
|
|
exec_map_entry_size - length, (size_t)ARG_MAX));
|
|
|
|
args->stringspace = ARG_MAX;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
exec_args_add_str(struct image_args *args, const char *str,
|
|
|
|
enum uio_seg segflg, int *countp)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
size_t length;
|
|
|
|
|
|
|
|
KASSERT(args->endp != NULL, ("endp not initialized"));
|
|
|
|
KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
|
|
|
|
|
|
|
|
error = (segflg == UIO_SYSSPACE) ?
|
|
|
|
copystr(str, args->endp, args->stringspace, &length) :
|
|
|
|
copyinstr(str, args->endp, args->stringspace, &length);
|
|
|
|
if (error != 0)
|
|
|
|
return (error == ENAMETOOLONG ? E2BIG : error);
|
|
|
|
args->stringspace -= length;
|
|
|
|
args->endp += length;
|
|
|
|
(*countp)++;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
exec_args_add_arg(struct image_args *args, const char *argp,
|
|
|
|
enum uio_seg segflg)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(args->envc == 0, ("appending args after env"));
|
|
|
|
|
|
|
|
return (exec_args_add_str(args, argp, segflg, &args->argc));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
exec_args_add_env(struct image_args *args, const char *envp,
|
|
|
|
enum uio_seg segflg)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (args->envc == 0)
|
|
|
|
args->begin_envv = args->endp;
|
|
|
|
|
|
|
|
return (exec_args_add_str(args, envp, segflg, &args->envc));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
|
|
|
|
{
|
|
|
|
ssize_t offset;
|
|
|
|
|
|
|
|
KASSERT(args->endp != NULL, ("endp not initialized"));
|
|
|
|
KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
|
|
|
|
|
|
|
|
offset = extend - consume;
|
|
|
|
if (args->stringspace < offset)
|
|
|
|
return (E2BIG);
|
|
|
|
memmove(args->begin_argv + extend, args->begin_argv + consume,
|
|
|
|
args->endp - args->begin_argv + consume);
|
|
|
|
if (args->envc > 0)
|
|
|
|
args->begin_envv += offset;
|
|
|
|
args->endp += offset;
|
|
|
|
args->stringspace -= offset;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
char *
|
|
|
|
exec_args_get_begin_envv(struct image_args *args)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(args->endp != NULL, ("endp not initialized"));
|
|
|
|
|
|
|
|
if (args->envc > 0)
|
|
|
|
return (args->begin_envv);
|
|
|
|
return (args->endp);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Copy strings out to the new process address space, constructing new arg
|
|
|
|
* and env vector tables. Return a pointer to the base so that it can be used
|
|
|
|
* as the initial stack pointer.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2019-11-18 20:07:43 +00:00
|
|
|
int
|
2019-12-03 23:17:54 +00:00
|
|
|
exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
|
|
|
int argc, envc;
|
|
|
|
char **vectp;
|
2014-03-19 12:35:04 +00:00
|
|
|
char *stringp;
|
2019-12-03 23:17:54 +00:00
|
|
|
uintptr_t destp, ustringp;
|
1994-08-06 09:06:31 +00:00
|
|
|
struct ps_strings *arginfo;
|
2002-08-29 01:28:27 +00:00
|
|
|
struct proc *p;
|
2009-03-17 12:53:28 +00:00
|
|
|
size_t execpath_len;
|
2019-11-18 20:07:43 +00:00
|
|
|
int error, szsigcode, szps;
|
2010-08-17 08:55:45 +00:00
|
|
|
char canary[sizeof(long) * 8];
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2010-08-17 08:55:45 +00:00
|
|
|
szps = sizeof(pagesizes[0]) * MAXPAGESIZES;
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* Calculate string base and vector table pointers.
|
Mega-commit for Linux emulator update.. This has been stress tested under
netscape-2.0 for Linux running all the Java stuff. The scrollbars are now
working, at least on my machine. (whew! :-)
I'm uncomfortable with the size of this commit, but it's too
inter-dependant to easily seperate out.
The main changes:
COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386
machine dependent section into the linux emulator itself. The int 0x80
syscall code was almost identical to the lcall 7,0 code and a minor tweak
allows them to both be used with the same C code. All kernels can now
just modload the lkm and it'll DTRT without having to rebuild the kernel
first. Like IBCS2, you can statically compile it in with "options LINUX".
A pile of new syscalls implemented, including getdents(), llseek(),
readv(), writev(), msync(), personality(). The Linux-ELF libraries want
to use some of these.
linux_select() now obeys Linux semantics, ie: returns the time remaining
of the timeout value rather than leaving it the original value.
Quite a few bugs removed, including incorrect arguments being used in
syscalls.. eg: mixups between passing the sigset as an int, vs passing
it as a pointer and doing a copyin(), missing return values, unhandled
cases, SIOC* ioctls, etc.
The build for the code has changed. i386/conf/files now knows how
to build linux_genassym and generate linux_assym.h on the fly.
Supporting changes elsewhere in the kernel:
The user-mode signal trampoline has moved from the U area to immediately
below the top of the stack (below PS_STRINGS). This allows the different
binary emulations to have their own signal trampoline code (which gets rid
of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so
that the emulator can provide the exact "struct sigcontext *" argument to
the program's signal handlers.
The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which
have the same values as the re-used SA_DISABLE and SA_ONSTACK which are
intended for sigaction only. This enables the support of a SA_RESETHAND
flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal
semantics where the signal handler is reset when it's triggered.
makesyscalls.sh no longer appends the struct sysentvec on the end of the
generated init_sysent.c code. It's a lot saner to have it in a seperate
file rather than trying to update the structure inside the awk script. :-)
At exec time, the dozen bytes or so of signal trampoline code are copied
to the top of the user's stack, rather than obtaining the trampoline code
the old way by getting a clone of the parent's user area. This allows
Linux and native binaries to freely exec each other without getting
trampolines mixed up.
1996-03-02 19:38:20 +00:00
|
|
|
* Also deal with signal trampoline code for this exec type.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2009-03-17 12:53:28 +00:00
|
|
|
if (imgp->execpath != NULL && imgp->auxargs != NULL)
|
|
|
|
execpath_len = strlen(imgp->execpath) + 1;
|
|
|
|
else
|
|
|
|
execpath_len = 0;
|
2002-08-29 01:28:27 +00:00
|
|
|
p = imgp->proc;
|
|
|
|
szsigcode = 0;
|
2002-09-21 22:07:17 +00:00
|
|
|
arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
|
2020-04-15 20:21:30 +00:00
|
|
|
imgp->ps_strings = arginfo;
|
2011-01-08 16:13:44 +00:00
|
|
|
if (p->p_sysent->sv_sigcode_base == 0) {
|
|
|
|
if (p->p_sysent->sv_szsigcode != NULL)
|
|
|
|
szsigcode = *(p->p_sysent->sv_szsigcode);
|
|
|
|
}
|
2014-03-19 12:35:04 +00:00
|
|
|
destp = (uintptr_t)arginfo;
|
1995-12-09 04:29:11 +00:00
|
|
|
|
Mega-commit for Linux emulator update.. This has been stress tested under
netscape-2.0 for Linux running all the Java stuff. The scrollbars are now
working, at least on my machine. (whew! :-)
I'm uncomfortable with the size of this commit, but it's too
inter-dependant to easily seperate out.
The main changes:
COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386
machine dependent section into the linux emulator itself. The int 0x80
syscall code was almost identical to the lcall 7,0 code and a minor tweak
allows them to both be used with the same C code. All kernels can now
just modload the lkm and it'll DTRT without having to rebuild the kernel
first. Like IBCS2, you can statically compile it in with "options LINUX".
A pile of new syscalls implemented, including getdents(), llseek(),
readv(), writev(), msync(), personality(). The Linux-ELF libraries want
to use some of these.
linux_select() now obeys Linux semantics, ie: returns the time remaining
of the timeout value rather than leaving it the original value.
Quite a few bugs removed, including incorrect arguments being used in
syscalls.. eg: mixups between passing the sigset as an int, vs passing
it as a pointer and doing a copyin(), missing return values, unhandled
cases, SIOC* ioctls, etc.
The build for the code has changed. i386/conf/files now knows how
to build linux_genassym and generate linux_assym.h on the fly.
Supporting changes elsewhere in the kernel:
The user-mode signal trampoline has moved from the U area to immediately
below the top of the stack (below PS_STRINGS). This allows the different
binary emulations to have their own signal trampoline code (which gets rid
of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so
that the emulator can provide the exact "struct sigcontext *" argument to
the program's signal handlers.
The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which
have the same values as the re-used SA_DISABLE and SA_ONSTACK which are
intended for sigaction only. This enables the support of a SA_RESETHAND
flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal
semantics where the signal handler is reset when it's triggered.
makesyscalls.sh no longer appends the struct sysentvec on the end of the
generated init_sysent.c code. It's a lot saner to have it in a seperate
file rather than trying to update the structure inside the awk script. :-)
At exec time, the dozen bytes or so of signal trampoline code are copied
to the top of the user's stack, rather than obtaining the trampoline code
the old way by getting a clone of the parent's user area. This allows
Linux and native binaries to freely exec each other without getting
trampolines mixed up.
1996-03-02 19:38:20 +00:00
|
|
|
/*
|
|
|
|
* install sigcode
|
|
|
|
*/
|
2014-03-19 12:35:04 +00:00
|
|
|
if (szsigcode != 0) {
|
|
|
|
destp -= szsigcode;
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
2019-11-18 20:07:43 +00:00
|
|
|
error = copyout(p->p_sysent->sv_sigcode, (void *)destp,
|
|
|
|
szsigcode);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2014-03-19 12:35:04 +00:00
|
|
|
}
|
Mega-commit for Linux emulator update.. This has been stress tested under
netscape-2.0 for Linux running all the Java stuff. The scrollbars are now
working, at least on my machine. (whew! :-)
I'm uncomfortable with the size of this commit, but it's too
inter-dependant to easily seperate out.
The main changes:
COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386
machine dependent section into the linux emulator itself. The int 0x80
syscall code was almost identical to the lcall 7,0 code and a minor tweak
allows them to both be used with the same C code. All kernels can now
just modload the lkm and it'll DTRT without having to rebuild the kernel
first. Like IBCS2, you can statically compile it in with "options LINUX".
A pile of new syscalls implemented, including getdents(), llseek(),
readv(), writev(), msync(), personality(). The Linux-ELF libraries want
to use some of these.
linux_select() now obeys Linux semantics, ie: returns the time remaining
of the timeout value rather than leaving it the original value.
Quite a few bugs removed, including incorrect arguments being used in
syscalls.. eg: mixups between passing the sigset as an int, vs passing
it as a pointer and doing a copyin(), missing return values, unhandled
cases, SIOC* ioctls, etc.
The build for the code has changed. i386/conf/files now knows how
to build linux_genassym and generate linux_assym.h on the fly.
Supporting changes elsewhere in the kernel:
The user-mode signal trampoline has moved from the U area to immediately
below the top of the stack (below PS_STRINGS). This allows the different
binary emulations to have their own signal trampoline code (which gets rid
of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so
that the emulator can provide the exact "struct sigcontext *" argument to
the program's signal handlers.
The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which
have the same values as the re-used SA_DISABLE and SA_ONSTACK which are
intended for sigaction only. This enables the support of a SA_RESETHAND
flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal
semantics where the signal handler is reset when it's triggered.
makesyscalls.sh no longer appends the struct sysentvec on the end of the
generated init_sysent.c code. It's a lot saner to have it in a seperate
file rather than trying to update the structure inside the awk script. :-)
At exec time, the dozen bytes or so of signal trampoline code are copied
to the top of the user's stack, rather than obtaining the trampoline code
the old way by getting a clone of the parent's user area. This allows
Linux and native binaries to freely exec each other without getting
trampolines mixed up.
1996-03-02 19:38:20 +00:00
|
|
|
|
2009-03-17 12:53:28 +00:00
|
|
|
/*
|
|
|
|
* Copy the image path for the rtld.
|
|
|
|
*/
|
|
|
|
if (execpath_len != 0) {
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= execpath_len;
|
2018-07-13 11:32:27 +00:00
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
2020-04-16 21:53:17 +00:00
|
|
|
imgp->execpathp = (void *)destp;
|
|
|
|
error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
|
2019-11-18 20:07:43 +00:00
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2009-03-17 12:53:28 +00:00
|
|
|
}
|
|
|
|
|
2010-08-17 08:55:45 +00:00
|
|
|
/*
|
|
|
|
* Prepare the canary for SSP.
|
|
|
|
*/
|
|
|
|
arc4rand(canary, sizeof(canary), 0);
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= sizeof(canary);
|
2020-04-16 21:53:17 +00:00
|
|
|
imgp->canary = (void *)destp;
|
|
|
|
error = copyout(canary, imgp->canary, sizeof(canary));
|
2019-11-18 20:07:43 +00:00
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2010-08-17 08:55:45 +00:00
|
|
|
imgp->canarylen = sizeof(canary);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare the pagesizes array.
|
|
|
|
*/
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= szps;
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
2020-04-16 21:53:17 +00:00
|
|
|
imgp->pagesizes = (void *)destp;
|
|
|
|
error = copyout(pagesizes, imgp->pagesizes, szps);
|
2019-11-18 20:07:43 +00:00
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2010-08-17 08:55:45 +00:00
|
|
|
imgp->pagesizeslen = szps;
|
|
|
|
|
2019-12-03 23:17:54 +00:00
|
|
|
/*
|
|
|
|
* Allocate room for the argument and environment strings.
|
|
|
|
*/
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= ARG_MAX - imgp->args->stringspace;
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
2019-12-03 23:17:54 +00:00
|
|
|
ustringp = destp;
|
2014-03-19 12:35:04 +00:00
|
|
|
|
2019-07-31 20:23:10 +00:00
|
|
|
if (imgp->sysent->sv_stackgap != NULL)
|
2019-12-03 23:17:54 +00:00
|
|
|
imgp->sysent->sv_stackgap(imgp, &destp);
|
2019-07-31 20:23:10 +00:00
|
|
|
|
2019-11-18 20:07:43 +00:00
|
|
|
if (imgp->auxargs) {
|
2019-12-09 19:17:28 +00:00
|
|
|
/*
|
|
|
|
* Allocate room on the stack for the ELF auxargs
|
|
|
|
* array. It has up to AT_COUNT entries.
|
|
|
|
*/
|
|
|
|
destp -= AT_COUNT * sizeof(Elf_Auxinfo);
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
2019-11-18 20:07:43 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2019-12-03 23:17:54 +00:00
|
|
|
vectp = (char **)destp;
|
|
|
|
|
2018-04-19 16:00:34 +00:00
|
|
|
/*
|
|
|
|
* Allocate room for the argv[] and env vectors including the
|
|
|
|
* terminating NULL pointers.
|
|
|
|
*/
|
|
|
|
vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* vectp also becomes our initial stack base
|
|
|
|
*/
|
2019-12-03 23:17:54 +00:00
|
|
|
*stack_base = (uintptr_t)vectp;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
stringp = imgp->args->begin_argv;
|
|
|
|
argc = imgp->args->argc;
|
|
|
|
envc = imgp->args->envc;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-03-01 04:09:50 +00:00
|
|
|
/*
|
|
|
|
* Copy out strings - arguments and environment.
|
|
|
|
*/
|
2019-12-03 23:17:54 +00:00
|
|
|
error = copyout(stringp, (void *)ustringp,
|
2019-11-18 20:07:43 +00:00
|
|
|
ARG_MAX - imgp->args->stringspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
1995-03-01 04:09:50 +00:00
|
|
|
|
1994-08-06 09:06:31 +00:00
|
|
|
/*
|
|
|
|
* Fill in "ps_strings" struct for ps, w, etc.
|
|
|
|
*/
|
2020-04-15 20:23:55 +00:00
|
|
|
imgp->argv = vectp;
|
2019-11-18 20:07:43 +00:00
|
|
|
if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
|
|
|
|
suword32(&arginfo->ps_nargvstr, argc) != 0)
|
|
|
|
return (EFAULT);
|
1994-08-06 09:06:31 +00:00
|
|
|
|
|
|
|
/*
|
1995-03-01 04:09:50 +00:00
|
|
|
* Fill in argument portion of vector table.
|
1994-08-06 09:06:31 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
for (; argc > 0; --argc) {
|
2019-12-03 23:17:54 +00:00
|
|
|
if (suword(vectp++, ustringp) != 0)
|
2019-11-18 20:07:43 +00:00
|
|
|
return (EFAULT);
|
1995-03-01 04:09:50 +00:00
|
|
|
while (*stringp++ != 0)
|
2019-12-03 23:17:54 +00:00
|
|
|
ustringp++;
|
|
|
|
ustringp++;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
2001-02-06 11:21:58 +00:00
|
|
|
/* a null vector table pointer separates the argp's from the envp's */
|
2019-11-18 20:07:43 +00:00
|
|
|
if (suword(vectp++, 0) != 0)
|
|
|
|
return (EFAULT);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2020-04-15 20:23:55 +00:00
|
|
|
imgp->envv = vectp;
|
2019-11-18 20:07:43 +00:00
|
|
|
if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
|
|
|
|
suword32(&arginfo->ps_nenvstr, envc) != 0)
|
|
|
|
return (EFAULT);
|
1994-08-06 09:06:31 +00:00
|
|
|
|
|
|
|
/*
|
1995-03-01 04:09:50 +00:00
|
|
|
* Fill in environment portion of vector table.
|
1994-08-06 09:06:31 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
for (; envc > 0; --envc) {
|
2019-12-03 23:17:54 +00:00
|
|
|
if (suword(vectp++, ustringp) != 0)
|
2019-11-18 20:07:43 +00:00
|
|
|
return (EFAULT);
|
1995-03-01 04:09:50 +00:00
|
|
|
while (*stringp++ != 0)
|
2019-12-03 23:17:54 +00:00
|
|
|
ustringp++;
|
|
|
|
ustringp++;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* end of vector table is a null pointer */
|
2019-11-18 20:07:43 +00:00
|
|
|
if (suword(vectp, 0) != 0)
|
|
|
|
return (EFAULT);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2019-12-09 19:17:28 +00:00
|
|
|
if (imgp->auxargs) {
|
|
|
|
vectp++;
|
|
|
|
error = imgp->sysent->sv_copyout_auxargs(imgp,
|
|
|
|
(uintptr_t)vectp);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2019-11-18 20:07:43 +00:00
|
|
|
return (0);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1994-05-25 09:21:21 +00:00
|
|
|
* Check permissions of file to execute.
|
2000-11-30 21:06:05 +00:00
|
|
|
* Called with imgp->vp locked.
|
1994-05-25 09:21:21 +00:00
|
|
|
* Return 0 for success or error code on failure.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1998-03-02 05:47:58 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_check_permissions(struct image_params *imgp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-11-06 12:52:37 +00:00
|
|
|
struct vnode *vp = imgp->vp;
|
|
|
|
struct vattr *attr = imgp->attr;
|
2002-02-27 18:32:23 +00:00
|
|
|
struct thread *td;
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
int error;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2008-03-12 10:12:01 +00:00
|
|
|
td = curthread;
|
2002-08-01 14:31:58 +00:00
|
|
|
|
2003-01-21 03:26:28 +00:00
|
|
|
/* Get file attributes */
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, attr, td->td_ucred);
|
2003-01-21 03:26:28 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2002-08-01 14:31:58 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
|
2002-08-01 14:31:58 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
#endif
|
2010-08-30 16:30:18 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2010-08-30 16:30:18 +00:00
|
|
|
* 1) Check if file execution is disabled for the filesystem that
|
|
|
|
* this file resides on.
|
|
|
|
* 2) Ensure that at least one execute bit is on. Otherwise, a
|
|
|
|
* privileged user will always succeed, and we don't want this
|
|
|
|
* to happen unless the file really is executable.
|
|
|
|
* 3) Ensure that the file is a regular file.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
1995-11-06 12:52:37 +00:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
|
2010-08-30 16:30:18 +00:00
|
|
|
(attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
|
2002-02-27 18:32:23 +00:00
|
|
|
(attr->va_type != VREG))
|
1994-05-25 09:21:21 +00:00
|
|
|
return (EACCES);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1994-05-25 09:21:21 +00:00
|
|
|
* Zero length files can't be exec'd
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
if (attr->va_size == 0)
|
|
|
|
return (ENOEXEC);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for execute permission to file based on current credentials.
|
|
|
|
*/
|
2002-02-27 18:32:23 +00:00
|
|
|
error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
|
1994-05-25 09:21:21 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
1997-04-04 04:17:11 +00:00
|
|
|
/*
|
|
|
|
* Check number of open-for-writes on the file and deny execution
|
|
|
|
* if there are any.
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
*
|
|
|
|
* Add a text reference now so no one can write to the
|
|
|
|
* executable while we're activating it.
|
|
|
|
*
|
|
|
|
* Remember if this was set before and unset it in case this is not
|
|
|
|
* actually an executable image.
|
1997-04-04 04:17:11 +00:00
|
|
|
*/
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
error = VOP_SET_TEXT(vp);
|
2012-11-02 13:56:36 +00:00
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
Switch to use shared vnode locks for text files during image activation.
kern_execve() locks text vnode exclusive to be able to set and clear
VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0
condition.
The change removes VV_TEXT, replacing it with the condition
v_writecount <= -1, and puts v_writecount under the vnode interlock.
Each text reference decrements v_writecount. To clear the text
reference when the segment is unmapped, it is recorded in the
vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and
v_writecount is incremented on the map entry removal
The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that
v_writecount does not contradict the desired change. vn_writecheck()
is now racy and its use was eliminated everywhere except access.
Atomic check for writeability and increment of v_writecount is
performed by the VOP. vn_truncate() now increments v_writecount
around VOP_SETATTR() call, lack of which is arguably a bug on its own.
nullfs bypasses v_writecount to the lower vnode always, so nullfs
vnode has its own v_writecount correct, and lower vnode gets all
references, since object->handle is always lower vnode.
On the text vnode' vm object dealloc, the v_writecount value is reset
to zero, and deadfs vop_unset_text short-circuit the operation.
Reclamation of lowervp always reclaims all nullfs vnodes referencing
lowervp first, so no stray references are left.
Reviewed by: markj, trasz
Tested by: mjg, pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 month
Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
|
|
|
imgp->textset = true;
|
1997-04-04 04:17:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Call filesystem specific open routine (which does nothing in the
|
|
|
|
* general case).
|
|
|
|
*/
|
2007-05-31 11:51:53 +00:00
|
|
|
error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
|
2008-07-17 16:44:07 +00:00
|
|
|
if (error == 0)
|
|
|
|
imgp->opened = 1;
|
2002-02-27 18:32:23 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-10-16 03:55:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Exec handler registration
|
|
|
|
*/
|
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_register(const struct execsw *execsw_arg)
|
1998-10-16 03:55:01 +00:00
|
|
|
{
|
|
|
|
const struct execsw **es, **xs, **newexecsw;
|
2018-01-22 02:08:10 +00:00
|
|
|
u_int count = 2; /* New slot and trailing NULL */
|
1998-10-16 03:55:01 +00:00
|
|
|
|
|
|
|
if (execsw)
|
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
count++;
|
2003-02-19 05:47:46 +00:00
|
|
|
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
|
1998-10-16 03:55:01 +00:00
|
|
|
xs = newexecsw;
|
|
|
|
if (execsw)
|
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
*xs++ = *es;
|
|
|
|
*xs++ = execsw_arg;
|
|
|
|
*xs = NULL;
|
|
|
|
if (execsw)
|
|
|
|
free(execsw, M_TEMP);
|
|
|
|
execsw = newexecsw;
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1998-10-16 03:55:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_unregister(const struct execsw *execsw_arg)
|
1998-10-16 03:55:01 +00:00
|
|
|
{
|
|
|
|
const struct execsw **es, **xs, **newexecsw;
|
|
|
|
int count = 1;
|
|
|
|
|
|
|
|
if (execsw == NULL)
|
|
|
|
panic("unregister with no handlers left?\n");
|
|
|
|
|
|
|
|
for (es = execsw; *es; es++) {
|
|
|
|
if (*es == execsw_arg)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (*es == NULL)
|
2002-08-24 22:01:40 +00:00
|
|
|
return (ENOENT);
|
1998-10-16 03:55:01 +00:00
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
if (*es != execsw_arg)
|
|
|
|
count++;
|
2003-02-19 05:47:46 +00:00
|
|
|
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
|
1998-10-16 03:55:01 +00:00
|
|
|
xs = newexecsw;
|
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
if (*es != execsw_arg)
|
|
|
|
*xs++ = *es;
|
|
|
|
*xs = NULL;
|
|
|
|
if (execsw)
|
|
|
|
free(execsw, M_TEMP);
|
|
|
|
execsw = newexecsw;
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1998-10-16 03:55:01 +00:00
|
|
|
}
|