2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
1994-05-25 09:21:21 +00:00
|
|
|
* Copyright (c) 1993, David Greenman
|
|
|
|
* All rights reserved.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
1994-05-25 09:21:21 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
1994-05-24 10:09:53 +00:00
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1994-05-25 09:21:21 +00:00
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
1994-05-24 10:09:53 +00:00
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2011-06-30 10:56:02 +00:00
|
|
|
#include "opt_capsicum.h"
|
2005-06-24 00:16:57 +00:00
|
|
|
#include "opt_hwpmc_hooks.h"
|
2002-07-01 19:49:04 +00:00
|
|
|
#include "opt_ktrace.h"
|
2007-12-29 19:53:04 +00:00
|
|
|
#include "opt_vm.h"
|
2002-07-01 19:49:04 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/acct.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/capsicum.h>
|
|
|
|
#include <sys/eventhandler.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/exec.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/filedesc.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/imgact.h>
|
1996-03-10 08:42:54 +00:00
|
|
|
#include <sys/imgact_elf.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
2001-07-09 19:01:42 +00:00
|
|
|
#include <sys/malloc.h>
|
2017-01-05 01:44:12 +00:00
|
|
|
#include <sys/mman.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/pioctl.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1996-05-01 02:43:13 +00:00
|
|
|
#include <sys/proc.h>
|
2016-07-15 15:32:09 +00:00
|
|
|
#include <sys/ptrace.h>
|
2004-11-27 06:51:39 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2013-03-09 02:32:23 +00:00
|
|
|
#include <sys/rwlock.h>
|
2012-03-08 19:41:05 +00:00
|
|
|
#include <sys/sched.h>
|
2008-05-24 06:22:16 +00:00
|
|
|
#include <sys/sdt.h>
|
2004-04-23 03:01:40 +00:00
|
|
|
#include <sys/sf_buf.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/shm.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/signalvar.h>
|
2017-01-05 01:44:12 +00:00
|
|
|
#include <sys/smp.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/syscallsubr.h>
|
1996-02-24 14:32:53 +00:00
|
|
|
#include <sys/sysctl.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/sysent.h>
|
|
|
|
#include <sys/sysproto.h>
|
1996-05-01 02:43:13 +00:00
|
|
|
#include <sys/vnode.h>
|
2017-01-05 01:28:08 +00:00
|
|
|
#include <sys/wait.h>
|
2002-07-01 23:18:08 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/pmap.h>
|
1998-01-11 21:35:38 +00:00
|
|
|
#include <vm/vm_page.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_map.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <vm/vm_kern.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_extern.h>
|
1997-04-18 02:43:05 +00:00
|
|
|
#include <vm/vm_object.h>
|
1998-01-11 21:35:38 +00:00
|
|
|
#include <vm/vm_pager.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
#include <sys/pmckern.h>
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <machine/reg.h>
|
|
|
|
|
2006-09-01 11:45:40 +00:00
|
|
|
#include <security/audit/audit.h>
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
2006-09-01 11:45:40 +00:00
|
|
|
|
2008-05-24 06:22:16 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
#include <sys/dtrace_bsd.h>
|
|
|
|
dtrace_execexit_func_t dtrace_fasttrap_exec;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
SDT_PROVIDER_DECLARE(proc);
|
2015-12-16 23:39:27 +00:00
|
|
|
SDT_PROBE_DEFINE1(proc, , , exec, "char *");
|
|
|
|
SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
|
|
|
|
SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
|
2008-05-24 06:22:16 +00:00
|
|
|
|
1999-11-16 20:31:58 +00:00
|
|
|
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
|
|
|
|
|
2015-09-07 16:44:28 +00:00
|
|
|
int coredump_pack_fileinfo = 1;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
|
|
|
|
&coredump_pack_fileinfo, 0,
|
|
|
|
"Enable file path packing in 'procstat -f' coredump notes");
|
|
|
|
|
Fix core corruption caused by race in note_procstat_vmmap
This fix is spiritually similar to r287442 and was discovered thanks to
the KASSERT added in that revision.
NT_PROCSTAT_VMMAP output length, when packing kinfo structs, is tied to
the length of filenames corresponding to vnodes in the process' vm map
via vn_fullpath. As vnodes may move during coredump, this is racy.
We do not remove the race, only prevent it from causing coredump
corruption.
- Add a sysctl, kern.coredump_pack_vmmapinfo, to allow users to disable
kinfo packing for PROCSTAT_VMMAP notes. This avoids VMMAP corruption
and truncation, even if names change, at the cost of up to PATH_MAX
bytes per mapped object. The new sysctl is documented in core.5.
- Fix note_procstat_vmmap to self-limit in the second pass. This
addresses corruption, at the cost of sometimes producing a truncated
result.
- Fix PROCSTAT_VMMAP consumers libutil (and libprocstat, via copy-paste)
to grok the new zero padding.
Reported by: pho (https://people.freebsd.org/~pho/stress/log/datamove4-2.txt)
Relnotes: yes
Sponsored by: EMC / Isilon Storage Division
Differential Revision: https://reviews.freebsd.org/D3824
2015-10-06 18:07:00 +00:00
|
|
|
int coredump_pack_vmmapinfo = 1;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
|
|
|
|
&coredump_pack_vmmapinfo, 0,
|
|
|
|
"Enable file path packing in 'procstat -v' coredump notes");
|
|
|
|
|
2002-09-21 22:07:17 +00:00
|
|
|
static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
|
|
|
|
static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
|
2003-01-04 07:54:23 +00:00
|
|
|
static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
|
2005-01-29 23:12:00 +00:00
|
|
|
static int do_execve(struct thread *td, struct image_args *args,
|
|
|
|
struct mac *mac_p);
|
2002-09-21 22:07:17 +00:00
|
|
|
|
2000-07-05 07:46:41 +00:00
|
|
|
/* XXX This should be vm_size_t. */
|
2016-10-19 19:42:01 +00:00
|
|
|
SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
|
2018-08-17 14:35:10 +00:00
|
|
|
CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU", "");
|
1998-12-27 18:03:29 +00:00
|
|
|
|
2000-07-05 07:46:41 +00:00
|
|
|
/* XXX This should be vm_size_t. */
|
2011-07-17 23:05:24 +00:00
|
|
|
SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
|
2016-10-19 19:42:01 +00:00
|
|
|
CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU", "");
|
1996-02-24 14:32:53 +00:00
|
|
|
|
2016-10-19 19:42:01 +00:00
|
|
|
SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
|
2003-01-04 07:54:23 +00:00
|
|
|
NULL, 0, sysctl_kern_stackprot, "I", "");
|
|
|
|
|
1999-11-16 20:31:58 +00:00
|
|
|
u_long ps_arg_cache_limit = PAGE_SIZE / 16;
|
2001-11-08 00:24:48 +00:00
|
|
|
SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
|
2000-07-05 07:46:41 +00:00
|
|
|
&ps_arg_cache_limit, 0, "");
|
1999-11-16 20:31:58 +00:00
|
|
|
|
2013-10-15 06:38:40 +00:00
|
|
|
static int disallow_high_osrel;
|
|
|
|
SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
|
|
|
|
&disallow_high_osrel, 0,
|
|
|
|
"Disallow execution of binaries built for higher version of the world");
|
|
|
|
|
2009-10-02 17:48:51 +00:00
|
|
|
static int map_at_zero = 0;
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
|
2009-10-02 17:48:51 +00:00
|
|
|
"Permit processes to map an object at virtual address 0.");
|
|
|
|
|
2017-11-09 22:51:48 +00:00
|
|
|
EVENTHANDLER_LIST_DECLARE(process_exec);
|
|
|
|
|
2002-09-21 22:07:17 +00:00
|
|
|
static int
|
|
|
|
sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
2004-02-18 00:54:17 +00:00
|
|
|
int error;
|
2002-09-21 22:07:17 +00:00
|
|
|
|
|
|
|
p = curproc;
|
2004-10-11 22:04:16 +00:00
|
|
|
#ifdef SCTL_MASK32
|
|
|
|
if (req->flags & SCTL_MASK32) {
|
2004-02-18 00:54:17 +00:00
|
|
|
unsigned int val;
|
|
|
|
val = (unsigned int)p->p_sysent->sv_psstrings;
|
|
|
|
error = SYSCTL_OUT(req, &val, sizeof(val));
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
|
|
|
|
sizeof(p->p_sysent->sv_psstrings));
|
|
|
|
return error;
|
2002-09-21 22:07:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
2004-02-18 00:54:17 +00:00
|
|
|
int error;
|
2002-09-21 22:07:17 +00:00
|
|
|
|
|
|
|
p = curproc;
|
2004-10-11 22:04:16 +00:00
|
|
|
#ifdef SCTL_MASK32
|
|
|
|
if (req->flags & SCTL_MASK32) {
|
2004-02-18 00:54:17 +00:00
|
|
|
unsigned int val;
|
|
|
|
val = (unsigned int)p->p_sysent->sv_usrstack;
|
|
|
|
error = SYSCTL_OUT(req, &val, sizeof(val));
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
|
|
|
|
sizeof(p->p_sysent->sv_usrstack));
|
|
|
|
return error;
|
2002-09-21 22:07:17 +00:00
|
|
|
}
|
|
|
|
|
2003-01-04 07:54:23 +00:00
|
|
|
static int
|
|
|
|
sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = curproc;
|
|
|
|
return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
|
|
|
|
sizeof(p->p_sysent->sv_stackprot)));
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1998-10-16 03:55:01 +00:00
|
|
|
* Each of the items is a pointer to a `const struct execsw', hence the
|
|
|
|
* double pointer here.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
1998-10-16 03:55:01 +00:00
|
|
|
static const struct execsw **execsw;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2003-12-28 04:18:13 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct execve_args {
|
2004-07-24 04:57:41 +00:00
|
|
|
char *fname;
|
|
|
|
char **argv;
|
|
|
|
char **envv;
|
2003-12-28 04:18:13 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
2015-05-10 09:00:40 +00:00
|
|
|
sys_execve(struct thread *td, struct execve_args *uap)
|
2003-12-28 04:18:13 +00:00
|
|
|
{
|
2005-01-29 23:12:00 +00:00
|
|
|
struct image_args args;
|
2015-05-10 09:00:40 +00:00
|
|
|
struct vmspace *oldvmspace;
|
|
|
|
int error;
|
2005-01-29 23:12:00 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
error = pre_execve(td, &oldvmspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2005-01-29 23:12:00 +00:00
|
|
|
error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
|
|
|
|
uap->argv, uap->envv);
|
|
|
|
if (error == 0)
|
|
|
|
error = kern_execve(td, &args, NULL);
|
2015-05-10 09:00:40 +00:00
|
|
|
post_execve(td, error, oldvmspace);
|
2005-01-29 23:12:00 +00:00
|
|
|
return (error);
|
2003-12-28 04:18:13 +00:00
|
|
|
}
|
|
|
|
|
2008-03-31 12:05:52 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct fexecve_args {
|
|
|
|
int fd;
|
|
|
|
char **argv;
|
|
|
|
char **envv;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_fexecve(struct thread *td, struct fexecve_args *uap)
|
2008-03-31 12:05:52 +00:00
|
|
|
{
|
|
|
|
struct image_args args;
|
2015-05-10 09:00:40 +00:00
|
|
|
struct vmspace *oldvmspace;
|
|
|
|
int error;
|
2008-03-31 12:05:52 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
error = pre_execve(td, &oldvmspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2008-03-31 12:05:52 +00:00
|
|
|
error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
|
|
|
|
uap->argv, uap->envv);
|
|
|
|
if (error == 0) {
|
|
|
|
args.fd = uap->fd;
|
|
|
|
error = kern_execve(td, &args, NULL);
|
|
|
|
}
|
2015-05-10 09:00:40 +00:00
|
|
|
post_execve(td, error, oldvmspace);
|
2008-03-31 12:05:52 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2003-12-28 04:18:13 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct __mac_execve_args {
|
|
|
|
char *fname;
|
|
|
|
char **argv;
|
|
|
|
char **envv;
|
|
|
|
struct mac *mac_p;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
2015-05-10 09:00:40 +00:00
|
|
|
sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
|
2003-12-28 04:18:13 +00:00
|
|
|
{
|
|
|
|
#ifdef MAC
|
2005-01-29 23:12:00 +00:00
|
|
|
struct image_args args;
|
2015-05-10 09:00:40 +00:00
|
|
|
struct vmspace *oldvmspace;
|
|
|
|
int error;
|
2005-01-29 23:12:00 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
error = pre_execve(td, &oldvmspace);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2005-01-29 23:12:00 +00:00
|
|
|
error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
|
|
|
|
uap->argv, uap->envv);
|
|
|
|
if (error == 0)
|
|
|
|
error = kern_execve(td, &args, uap->mac_p);
|
2015-05-10 09:00:40 +00:00
|
|
|
post_execve(td, error, oldvmspace);
|
2005-01-29 23:12:00 +00:00
|
|
|
return (error);
|
2003-12-28 04:18:13 +00:00
|
|
|
#else
|
|
|
|
return (ENOSYS);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-10-07 13:50:10 +00:00
|
|
|
int
|
2015-05-10 09:00:40 +00:00
|
|
|
pre_execve(struct thread *td, struct vmspace **oldvmspace)
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
{
|
2015-05-10 09:00:40 +00:00
|
|
|
struct proc *p;
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
int error;
|
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(td == curthread, ("non-current thread %p", td));
|
|
|
|
error = 0;
|
|
|
|
p = td->td_proc;
|
|
|
|
if ((p->p_flag & P_HADTHREADS) != 0) {
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_LOCK(p);
|
2015-05-10 09:00:40 +00:00
|
|
|
if (thread_single(p, SINGLE_BOUNDARY) != 0)
|
|
|
|
error = ERESTART;
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
|
|
|
|
("nested execve"));
|
|
|
|
*oldvmspace = p->p_vmspace;
|
|
|
|
return (error);
|
|
|
|
}
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
void
|
|
|
|
post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(td == curthread, ("non-current thread %p", td));
|
|
|
|
p = td->td_proc;
|
|
|
|
if ((p->p_flag & P_HADTHREADS) != 0) {
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
/*
|
|
|
|
* If success, we upgrade to SINGLE_EXIT state to
|
|
|
|
* force other threads to suicide.
|
|
|
|
*/
|
2017-11-24 07:35:08 +00:00
|
|
|
if (error == EJUSTRETURN)
|
2014-12-13 16:18:29 +00:00
|
|
|
thread_single(p, SINGLE_EXIT);
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
else
|
2014-12-13 16:18:29 +00:00
|
|
|
thread_single_end(p, SINGLE_BOUNDARY);
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
When exec_new_vmspace() decides that current vmspace cannot be reused
on execve(2), it calls vmspace_exec(), which frees the current
vmspace. The thread executing an exec syscall gets new vmspace
assigned, and old vmspace is freed if only referenced by the current
process. The free operation includes pmap_release(), which
de-constructs the paging structures used by hardware.
If the calling process is multithreaded, other threads are suspended
in the thread_suspend_check(), and need to be unsuspended and run to
be able to exit on successfull exec. Now, since the old vmspace is
destroyed, paging structures are invalid, threads are resumed on the
non-existent pmaps (page tables), which leads to triple fault on x86.
To fix, postpone the free of old vmspace until the threads are resumed
and exited. To avoid modifications to all image activators all of
which use exec_new_vmspace(), memoize the current (old) vmspace in
kern_execve(), and notify it about the need to call vmspace_free()
with a thread-private flag TDP_EXECVMSPC.
http://bugs.debian.org/743141
Reported by: Ivo De Decker <ivo.dedecker@ugent.be> through secteam
Sponsored by: The FreeBSD Foundation
MFC after: 3 days
2014-05-20 09:19:35 +00:00
|
|
|
if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
|
2015-05-10 09:00:40 +00:00
|
|
|
KASSERT(p->p_vmspace != oldvmspace,
|
When exec_new_vmspace() decides that current vmspace cannot be reused
on execve(2), it calls vmspace_exec(), which frees the current
vmspace. The thread executing an exec syscall gets new vmspace
assigned, and old vmspace is freed if only referenced by the current
process. The free operation includes pmap_release(), which
de-constructs the paging structures used by hardware.
If the calling process is multithreaded, other threads are suspended
in the thread_suspend_check(), and need to be unsuspended and run to
be able to exit on successfull exec. Now, since the old vmspace is
destroyed, paging structures are invalid, threads are resumed on the
non-existent pmaps (page tables), which leads to triple fault on x86.
To fix, postpone the free of old vmspace until the threads are resumed
and exited. To avoid modifications to all image activators all of
which use exec_new_vmspace(), memoize the current (old) vmspace in
kern_execve(), and notify it about the need to call vmspace_free()
with a thread-private flag TDP_EXECVMSPC.
http://bugs.debian.org/743141
Reported by: Ivo De Decker <ivo.dedecker@ugent.be> through secteam
Sponsored by: The FreeBSD Foundation
MFC after: 3 days
2014-05-20 09:19:35 +00:00
|
|
|
("oldvmspace still used"));
|
|
|
|
vmspace_free(oldvmspace);
|
|
|
|
td->td_pflags &= ~TDP_EXECVMSPC;
|
|
|
|
}
|
2015-05-10 09:00:40 +00:00
|
|
|
}
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
|
2015-05-10 09:00:40 +00:00
|
|
|
/*
|
|
|
|
* XXX: kern_execve has the astonishing property of not always returning to
|
|
|
|
* the caller. If sufficiently bad things happen during the call to
|
|
|
|
* do_execve(), it can end up calling exit1(); as a result, callers must
|
|
|
|
* avoid doing anything which they might need to undo (e.g., allocating
|
|
|
|
* memory).
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p)
|
|
|
|
{
|
|
|
|
|
|
|
|
AUDIT_ARG_ARGV(args->begin_argv, args->argc,
|
|
|
|
args->begin_envv - args->begin_argv);
|
|
|
|
AUDIT_ARG_ENVV(args->begin_envv, args->envc,
|
|
|
|
args->endp - args->begin_envv);
|
|
|
|
return (do_execve(td, args, mac_p));
|
In original kern_execve() code, at the start of the function, it forces
all other threads to suicide, problem is execve() could be failed, and
a failed execve() would change threaded process to unthreaded, this side
effect is unexpected.
The new code introduces a new single threading mode SINGLE_BOUNDARY, in
the mode, all threads should suspend themself at user boundary except
the singler. we can not use SINGLE_NO_EXIT because we want to start from
a clean state if execve() is successful, suspending other threads at unknown
point and later resuming them from there and forcing them to exit at user
boundary may cause the process to start from a dirty state. If execve() is
successful, current thread upgrades to SINGLE_EXIT mode and forces other
threads to suicide at user boundary, otherwise, other threads will be resumed
and their interrupted syscall will be restarted.
Reviewed by: julian
2004-10-06 00:40:41 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
Remove reference to struct execve_args from struct imgact, which
describes an image activation instance. Instead, make use of the
existing fname structure entry, and introduce two new entries,
userspace_argv, and userspace_envv. With the addition of
mac_execve(), this divorces the image structure from the specifics
of the execve() system call, removes a redundant pointer, etc.
No semantic change from current behavior, but it means that the
structure doesn't depend on syscalls.master-generated includes.
There seems to be some redundant initialization of imgact entries,
which I have maintained, but which could probably use some cleaning
up at some point.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2002-11-05 01:59:56 +00:00
|
|
|
* In-kernel implementation of execve(). All arguments are assumed to be
|
|
|
|
* userspace pointers from the passed thread.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
Remove reference to struct execve_args from struct imgact, which
describes an image activation instance. Instead, make use of the
existing fname structure entry, and introduce two new entries,
userspace_argv, and userspace_envv. With the addition of
mac_execve(), this divorces the image structure from the specifics
of the execve() system call, removes a redundant pointer, etc.
No semantic change from current behavior, but it means that the
structure doesn't depend on syscalls.master-generated includes.
There seems to be some redundant initialization of imgact entries,
which I have maintained, but which could probably use some cleaning
up at some point.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2002-11-05 01:59:56 +00:00
|
|
|
static int
|
2017-05-17 00:34:34 +00:00
|
|
|
do_execve(struct thread *td, struct image_args *args, struct mac *mac_p)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2009-02-26 16:32:48 +00:00
|
|
|
struct nameidata nd;
|
2016-05-26 23:18:54 +00:00
|
|
|
struct ucred *oldcred;
|
2014-07-01 09:21:32 +00:00
|
|
|
struct uidinfo *euip = NULL;
|
1999-12-27 10:42:55 +00:00
|
|
|
register_t *stack_base;
|
2009-10-23 15:14:54 +00:00
|
|
|
int error, i;
|
1995-11-06 12:52:37 +00:00
|
|
|
struct image_params image_params, *imgp;
|
2005-10-12 06:56:00 +00:00
|
|
|
struct vattr attr;
|
2002-03-19 21:25:46 +00:00
|
|
|
int (*img_first)(struct image_params *);
|
2002-06-20 17:27:28 +00:00
|
|
|
struct pargs *oldargs = NULL, *newargs = NULL;
|
2016-05-27 15:03:38 +00:00
|
|
|
struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
|
2002-06-07 05:41:27 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct vnode *tracevp = NULL;
|
2003-03-13 18:24:22 +00:00
|
|
|
struct ucred *tracecred = NULL;
|
2002-06-07 05:41:27 +00:00
|
|
|
#endif
|
2015-07-14 01:13:37 +00:00
|
|
|
struct vnode *oldtextvp = NULL, *newtextvp;
|
2002-07-27 18:06:49 +00:00
|
|
|
int credential_changing;
|
2002-08-13 06:55:28 +00:00
|
|
|
int textset;
|
2002-11-05 14:57:49 +00:00
|
|
|
#ifdef MAC
|
Introduce two related changes to the TrustedBSD MAC Framework:
(1) Abstract interpreter vnode labeling in execve(2) and mac_execve(2)
so that the general exec code isn't aware of the details of
allocating, copying, and freeing labels, rather, simply passes in
a void pointer to start and stop functions that will be used by
the framework. This change will be MFC'd.
(2) Introduce a new flags field to the MAC_POLICY_SET(9) interface
allowing policies to declare which types of objects require label
allocation, initialization, and destruction, and define a set of
flags covering various supported object types (MPC_OBJECT_PROC,
MPC_OBJECT_VNODE, MPC_OBJECT_INPCB, ...). This change reduces the
overhead of compiling the MAC Framework into the kernel if policies
aren't loaded, or if policies require labels on only a small number
or even no object types. Each time a policy is loaded or unloaded,
we recalculate a mask of labeled object types across all policies
present in the system. Eliminate MAC_ALWAYS_LABEL_MBUF option as it
is no longer required.
MFC after: 1 week ((1) only)
Reviewed by: csjp
Obtained from: TrustedBSD Project
Sponsored by: Apple, Inc.
2008-08-23 15:26:36 +00:00
|
|
|
struct label *interpvplabel = NULL;
|
Modify the MAC Framework so that instead of embedding a (struct label)
in various kernel objects to represent security data, we embed a
(struct label *) pointer, which now references labels allocated using
a UMA zone (mac_label.c). This allows the size and shape of struct
label to be varied without changing the size and shape of these kernel
objects, which become part of the frozen ABI with 5-STABLE. This opens
the door for boot-time selection of the number of label slots, and hence
changes to the bound on the number of simultaneous labeled policies
at boot-time instead of compile-time. This also makes it easier to
embed label references in new objects as required for locking/caching
with fine-grained network stack locking, such as inpcb structures.
This change also moves us further in the direction of hiding the
structure of kernel objects from MAC policy modules, not to mention
dramatically reducing the number of '&' symbols appearing in both the
MAC Framework and MAC policy modules, and improving readability.
While this results in minimal performance change with MAC enabled, it
will observably shrink the size of a number of critical kernel data
structures for the !MAC case, and should have a small (but measurable)
performance benefit (i.e., struct vnode, struct socket) do to memory
conservation and reduced cost of zeroing memory.
NOTE: Users of MAC must recompile their kernel and all MAC modules as a
result of this change. Because this is an API change, third party
MAC modules will also need to be updated to make less use of the '&'
symbol.
Suggestions from: bmilekic
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
|
|
|
int will_transition;
|
2002-11-05 14:57:49 +00:00
|
|
|
#endif
|
2005-06-30 19:01:26 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
struct pmckern_procexec pe;
|
|
|
|
#endif
|
2008-03-31 12:05:52 +00:00
|
|
|
static const char fexecv_proc_title[] = "(fexecv)";
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-11-06 12:52:37 +00:00
|
|
|
imgp = &image_params;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2001-10-27 11:11:25 +00:00
|
|
|
/*
|
|
|
|
* Lock the process and set the P_INEXEC flag to indicate that
|
|
|
|
* it should be left alone until we're done here. This is
|
|
|
|
* necessary to avoid race conditions - e.g. in ptrace() -
|
|
|
|
* that might allow a local user to illicitly obtain elevated
|
|
|
|
* privileges.
|
|
|
|
*/
|
|
|
|
PROC_LOCK(p);
|
|
|
|
KASSERT((p->p_flag & P_INEXEC) == 0,
|
2001-12-10 05:40:12 +00:00
|
|
|
("%s(): process already has P_INEXEC flag", __func__));
|
2001-10-27 11:11:25 +00:00
|
|
|
p->p_flag |= P_INEXEC;
|
|
|
|
PROC_UNLOCK(p);
|
2001-09-12 08:38:13 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1995-11-06 12:52:37 +00:00
|
|
|
* Initialize part of the common data
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2014-09-29 23:59:19 +00:00
|
|
|
bzero(imgp, sizeof(*imgp));
|
1995-11-06 12:52:37 +00:00
|
|
|
imgp->proc = p;
|
|
|
|
imgp->attr = &attr;
|
2005-01-29 23:12:00 +00:00
|
|
|
imgp->args = args;
|
2016-05-26 23:18:54 +00:00
|
|
|
oldcred = p->p_ucred;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2002-11-05 17:51:56 +00:00
|
|
|
#ifdef MAC
|
Modify the MAC Framework so that instead of embedding a (struct label)
in various kernel objects to represent security data, we embed a
(struct label *) pointer, which now references labels allocated using
a UMA zone (mac_label.c). This allows the size and shape of struct
label to be varied without changing the size and shape of these kernel
objects, which become part of the frozen ABI with 5-STABLE. This opens
the door for boot-time selection of the number of label slots, and hence
changes to the bound on the number of simultaneous labeled policies
at boot-time instead of compile-time. This also makes it easier to
embed label references in new objects as required for locking/caching
with fine-grained network stack locking, such as inpcb structures.
This change also moves us further in the direction of hiding the
structure of kernel objects from MAC policy modules, not to mention
dramatically reducing the number of '&' symbols appearing in both the
MAC Framework and MAC policy modules, and improving readability.
While this results in minimal performance change with MAC enabled, it
will observably shrink the size of a number of critical kernel data
structures for the !MAC case, and should have a small (but measurable)
performance benefit (i.e., struct vnode, struct socket) do to memory
conservation and reduced cost of zeroing memory.
NOTE: Users of MAC must recompile their kernel and all MAC modules as a
result of this change. Because this is an API change, third party
MAC modules will also need to be updated to make less use of the '&'
symbol.
Suggestions from: bmilekic
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
|
|
|
error = mac_execve_enter(imgp, mac_p);
|
2005-05-03 16:24:59 +00:00
|
|
|
if (error)
|
2002-11-05 17:51:56 +00:00
|
|
|
goto exec_fail;
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* Translate the file name. namei() returns a vnode pointer
|
2016-04-29 22:15:33 +00:00
|
|
|
* in ni_vp among other things.
|
2006-09-01 11:45:40 +00:00
|
|
|
*
|
|
|
|
* XXXAUDIT: It would be desirable to also audit the name of the
|
|
|
|
* interpreter if this is an interpreted binary.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname != NULL) {
|
2009-02-26 16:32:48 +00:00
|
|
|
NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME
|
2012-10-22 17:50:54 +00:00
|
|
|
| AUDITVNODE1, UIO_SYSSPACE, args->fname, td);
|
2008-03-31 12:05:52 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2015-12-16 23:39:27 +00:00
|
|
|
SDT_PROBE1(proc, , , exec, args->fname);
|
2008-11-05 19:40:36 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
interpret:
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname != NULL) {
|
2011-06-30 10:56:02 +00:00
|
|
|
#ifdef CAPABILITY_MODE
|
|
|
|
/*
|
|
|
|
* While capability mode can't reach this point via direct
|
|
|
|
* path arguments to execve(), we also don't allow
|
|
|
|
* interpreters to be used in capability mode (for now).
|
|
|
|
* Catch indirect lookups and return a permissions error.
|
|
|
|
*/
|
|
|
|
if (IN_CAPABILITY_MODE(td)) {
|
|
|
|
error = ECAPMODE;
|
|
|
|
goto exec_fail;
|
|
|
|
}
|
|
|
|
#endif
|
2009-02-26 16:32:48 +00:00
|
|
|
error = namei(&nd);
|
2008-03-31 12:05:52 +00:00
|
|
|
if (error)
|
|
|
|
goto exec_fail;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2015-07-14 01:13:37 +00:00
|
|
|
newtextvp = nd.ni_vp;
|
|
|
|
imgp->vp = newtextvp;
|
2008-03-31 12:05:52 +00:00
|
|
|
} else {
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_FD(args->fd);
|
2011-08-11 12:30:23 +00:00
|
|
|
/*
|
2012-09-27 16:43:23 +00:00
|
|
|
* Descriptors opened only with O_EXEC or O_RDONLY are allowed.
|
2011-08-11 12:30:23 +00:00
|
|
|
*/
|
2018-05-09 18:47:24 +00:00
|
|
|
error = fgetvp_exec(td, args->fd, &cap_fexecve_rights, &newtextvp);
|
2008-03-31 12:05:52 +00:00
|
|
|
if (error)
|
|
|
|
goto exec_fail;
|
2015-07-14 01:13:37 +00:00
|
|
|
vn_lock(newtextvp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
AUDIT_ARG_VNODE1(newtextvp);
|
|
|
|
imgp->vp = newtextvp;
|
2008-03-31 12:05:52 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-03-19 23:08:12 +00:00
|
|
|
/*
|
1995-03-19 23:27:57 +00:00
|
|
|
* Check file permissions (also 'opens' file)
|
1995-03-19 23:08:12 +00:00
|
|
|
*/
|
1995-11-06 12:52:37 +00:00
|
|
|
error = exec_check_permissions(imgp);
|
2002-08-13 06:55:28 +00:00
|
|
|
if (error)
|
1994-05-25 09:21:21 +00:00
|
|
|
goto exec_fail_dealloc;
|
2002-08-13 06:55:28 +00:00
|
|
|
|
2005-01-25 00:40:01 +00:00
|
|
|
imgp->object = imgp->vp->v_object;
|
|
|
|
if (imgp->object != NULL)
|
2002-08-13 06:55:28 +00:00
|
|
|
vm_object_reference(imgp->object);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set VV_TEXT now so no one can write to the executable while we're
|
|
|
|
* activating it.
|
|
|
|
*
|
|
|
|
* Remember if this was set before and unset it in case this is not
|
|
|
|
* actually an executable image.
|
|
|
|
*/
|
2012-09-28 11:25:02 +00:00
|
|
|
textset = VOP_IS_TEXT(imgp->vp);
|
|
|
|
VOP_SET_TEXT(imgp->vp);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1998-01-11 21:35:38 +00:00
|
|
|
error = exec_map_first_page(imgp);
|
1997-04-04 04:17:11 +00:00
|
|
|
if (error)
|
1994-05-25 09:21:21 +00:00
|
|
|
goto exec_fail_dealloc;
|
|
|
|
|
2007-12-04 12:28:07 +00:00
|
|
|
imgp->proc->p_osrel = 0;
|
2018-11-23 23:07:57 +00:00
|
|
|
imgp->proc->p_fctl0 = 0;
|
2016-05-26 23:18:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Implement image setuid/setgid.
|
|
|
|
*
|
|
|
|
* Determine new credentials before attempting image activators
|
|
|
|
* so that it can be used by process_exec handlers to determine
|
|
|
|
* credential/setid changes.
|
|
|
|
*
|
|
|
|
* Don't honor setuid/setgid if the filesystem prohibits it or if
|
|
|
|
* the process is being traced.
|
|
|
|
*
|
|
|
|
* We disable setuid/setgid/etc in capability mode on the basis
|
|
|
|
* that most setugid applications are not written with that
|
|
|
|
* environment in mind, and will therefore almost certainly operate
|
|
|
|
* incorrectly. In principle there's no reason that setugid
|
|
|
|
* applications might not be useful in capability mode, so we may want
|
|
|
|
* to reconsider this conservative design choice in the future.
|
|
|
|
*
|
|
|
|
* XXXMAC: For the time being, use NOSUID to also prohibit
|
|
|
|
* transitions on the file system.
|
|
|
|
*/
|
|
|
|
credential_changing = 0;
|
|
|
|
credential_changing |= (attr.va_mode & S_ISUID) &&
|
|
|
|
oldcred->cr_uid != attr.va_uid;
|
|
|
|
credential_changing |= (attr.va_mode & S_ISGID) &&
|
|
|
|
oldcred->cr_gid != attr.va_gid;
|
|
|
|
#ifdef MAC
|
|
|
|
will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
|
|
|
|
interpvplabel, imgp);
|
|
|
|
credential_changing |= will_transition;
|
|
|
|
#endif
|
|
|
|
|
2018-04-20 15:19:27 +00:00
|
|
|
/* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
|
2018-04-18 21:31:13 +00:00
|
|
|
if (credential_changing)
|
|
|
|
imgp->proc->p_pdeathsig = 0;
|
|
|
|
|
2016-05-26 23:18:54 +00:00
|
|
|
if (credential_changing &&
|
|
|
|
#ifdef CAPABILITY_MODE
|
|
|
|
((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
|
|
|
|
#endif
|
|
|
|
(imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
|
|
|
|
(p->p_flag & P_TRACED) == 0) {
|
|
|
|
imgp->credential_setid = true;
|
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
|
|
|
imgp->newcred = crdup(oldcred);
|
|
|
|
if (attr.va_mode & S_ISUID) {
|
|
|
|
euip = uifind(attr.va_uid);
|
|
|
|
change_euid(imgp->newcred, euip);
|
|
|
|
}
|
|
|
|
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
if (attr.va_mode & S_ISGID)
|
|
|
|
change_egid(imgp->newcred, attr.va_gid);
|
|
|
|
/*
|
|
|
|
* Implement correct POSIX saved-id behavior.
|
|
|
|
*
|
|
|
|
* XXXMAC: Note that the current logic will save the
|
|
|
|
* uid and gid if a MAC domain transition occurs, even
|
|
|
|
* though maybe it shouldn't.
|
|
|
|
*/
|
|
|
|
change_svuid(imgp->newcred, imgp->newcred->cr_uid);
|
|
|
|
change_svgid(imgp->newcred, imgp->newcred->cr_gid);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Implement correct POSIX saved-id behavior.
|
|
|
|
*
|
|
|
|
* XXX: It's not clear that the existing behavior is
|
|
|
|
* POSIX-compliant. A number of sources indicate that the
|
|
|
|
* saved uid/gid should only be updated if the new ruid is
|
|
|
|
* not equal to the old ruid, or the new euid is not equal
|
|
|
|
* to the old euid and the new euid is not equal to the old
|
|
|
|
* ruid. The FreeBSD code always updates the saved uid/gid.
|
|
|
|
* Also, this code uses the new (replaced) euid and egid as
|
|
|
|
* the source, which may or may not be the right ones to use.
|
|
|
|
*/
|
|
|
|
if (oldcred->cr_svuid != oldcred->cr_uid ||
|
|
|
|
oldcred->cr_svgid != oldcred->cr_gid) {
|
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
|
|
|
imgp->newcred = crdup(oldcred);
|
|
|
|
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
change_svuid(imgp->newcred, imgp->newcred->cr_uid);
|
|
|
|
change_svgid(imgp->newcred, imgp->newcred->cr_gid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* The new credentials are installed into the process later. */
|
|
|
|
|
2016-05-26 23:19:39 +00:00
|
|
|
/*
|
|
|
|
* Do the best to calculate the full path to the image file.
|
|
|
|
*/
|
|
|
|
if (args->fname != NULL && args->fname[0] == '/')
|
|
|
|
imgp->execpath = args->fname;
|
|
|
|
else {
|
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
|
|
|
if (vn_fullpath(td, imgp->vp, &imgp->execpath,
|
|
|
|
&imgp->freepath) != 0)
|
|
|
|
imgp->execpath = args->fname;
|
|
|
|
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2000-04-26 20:58:40 +00:00
|
|
|
* If the current process has a special image activator it
|
2005-01-29 23:12:00 +00:00
|
|
|
* wants to try first, call it. For example, emulating shell
|
2000-04-26 20:58:40 +00:00
|
|
|
* scripts differently.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2000-04-26 20:58:40 +00:00
|
|
|
error = -1;
|
|
|
|
if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
|
|
|
|
error = img_first(imgp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop through the list of image activators, calling each one.
|
|
|
|
* An activator returns -1 if there is no match, 0 on success,
|
|
|
|
* and an error otherwise.
|
|
|
|
*/
|
|
|
|
for (i = 0; error == -1 && execsw[i]; ++i) {
|
|
|
|
if (execsw[i]->ex_imgact == NULL ||
|
|
|
|
execsw[i]->ex_imgact == img_first) {
|
1994-05-25 09:21:21 +00:00
|
|
|
continue;
|
|
|
|
}
|
2000-04-26 20:58:40 +00:00
|
|
|
error = (*execsw[i]->ex_imgact)(imgp);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
2000-04-26 20:58:40 +00:00
|
|
|
|
|
|
|
if (error) {
|
2002-08-13 06:55:28 +00:00
|
|
|
if (error == -1) {
|
2012-09-28 11:25:02 +00:00
|
|
|
if (textset == 0)
|
|
|
|
VOP_UNSET_TEXT(imgp->vp);
|
2000-04-26 20:58:40 +00:00
|
|
|
error = ENOEXEC;
|
2002-08-13 06:55:28 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
goto exec_fail_dealloc;
|
|
|
|
}
|
|
|
|
|
2000-04-26 20:58:40 +00:00
|
|
|
/*
|
|
|
|
* Special interpreter operation, cleanup and loop up to try to
|
|
|
|
* activate the interpreter.
|
|
|
|
*/
|
|
|
|
if (imgp->interpreted) {
|
|
|
|
exec_unmap_first_page(imgp);
|
2002-08-13 06:55:28 +00:00
|
|
|
/*
|
|
|
|
* VV_TEXT needs to be unset for scripts. There is a short
|
|
|
|
* period before we determine that something is a script where
|
|
|
|
* VV_TEXT will be set. The vnode lock is held over this
|
|
|
|
* entire period so nothing should illegitimately be blocked.
|
|
|
|
*/
|
2012-09-28 11:25:02 +00:00
|
|
|
VOP_UNSET_TEXT(imgp->vp);
|
2000-04-26 20:58:40 +00:00
|
|
|
/* free name buffer and old vnode */
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname != NULL)
|
2009-02-26 16:32:48 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2002-11-05 17:51:56 +00:00
|
|
|
#ifdef MAC
|
2015-07-14 01:13:37 +00:00
|
|
|
mac_execve_interpreter_enter(newtextvp, &interpvplabel);
|
2002-11-05 17:51:56 +00:00
|
|
|
#endif
|
2008-07-17 16:44:07 +00:00
|
|
|
if (imgp->opened) {
|
2015-07-14 01:13:37 +00:00
|
|
|
VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
|
2008-07-17 16:44:07 +00:00
|
|
|
imgp->opened = 0;
|
|
|
|
}
|
2015-07-14 01:13:37 +00:00
|
|
|
vput(newtextvp);
|
2002-07-06 07:00:01 +00:00
|
|
|
vm_object_deallocate(imgp->object);
|
|
|
|
imgp->object = NULL;
|
2016-05-26 23:18:54 +00:00
|
|
|
imgp->credential_setid = false;
|
|
|
|
if (imgp->newcred != NULL) {
|
|
|
|
crfree(imgp->newcred);
|
|
|
|
imgp->newcred = NULL;
|
|
|
|
}
|
2016-05-26 23:19:39 +00:00
|
|
|
imgp->execpath = NULL;
|
|
|
|
free(imgp->freepath, M_TEMP);
|
|
|
|
imgp->freepath = NULL;
|
2000-04-26 20:58:40 +00:00
|
|
|
/* set new name to that of the interpreter */
|
2012-10-22 17:50:54 +00:00
|
|
|
NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
|
2001-09-12 08:38:13 +00:00
|
|
|
UIO_SYSSPACE, imgp->interpreter_name, td);
|
2008-03-31 12:05:52 +00:00
|
|
|
args->fname = imgp->interpreter_name;
|
2000-04-26 20:58:40 +00:00
|
|
|
goto interpret;
|
|
|
|
}
|
|
|
|
|
2008-08-12 21:27:48 +00:00
|
|
|
/*
|
|
|
|
* NB: We unlock the vnode here because it is believed that none
|
|
|
|
* of the sv_copyout_strings/sv_fixup operations require the vnode.
|
|
|
|
*/
|
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
2009-03-17 12:53:28 +00:00
|
|
|
|
2013-10-15 06:38:40 +00:00
|
|
|
if (disallow_high_osrel &&
|
|
|
|
P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
|
|
|
|
error = ENOEXEC;
|
|
|
|
uprintf("Osrel %d for image %s too high\n", p->p_osrel,
|
|
|
|
imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
|
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
|
|
|
goto exec_fail_dealloc;
|
|
|
|
}
|
|
|
|
|
2015-08-03 13:41:47 +00:00
|
|
|
/* ABI enforces the use of Capsicum. Switch into capabilities mode. */
|
|
|
|
if (SV_PROC_FLAG(p, SV_CAPSICUM))
|
|
|
|
sys_cap_enter(td, NULL);
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* Copy out strings (args and env) and initialize stack base
|
|
|
|
*/
|
2002-07-20 02:56:12 +00:00
|
|
|
if (p->p_sysent->sv_copyout_strings)
|
|
|
|
stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
|
|
|
|
else
|
|
|
|
stack_base = exec_copyout_strings(imgp);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
/*
|
1995-02-14 19:23:22 +00:00
|
|
|
* If custom stack fixup routine present for this process
|
|
|
|
* let it do the stack setup.
|
|
|
|
* Else stuff argument count as first item on stack
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2003-12-28 04:37:59 +00:00
|
|
|
if (p->p_sysent->sv_fixup != NULL)
|
2018-05-24 16:25:18 +00:00
|
|
|
error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
|
1995-02-14 19:23:22 +00:00
|
|
|
else
|
2018-05-24 16:25:18 +00:00
|
|
|
error = suword(--stack_base, imgp->args->argc) == 0 ?
|
|
|
|
0 : EFAULT;
|
|
|
|
if (error != 0)
|
|
|
|
goto exec_fail_dealloc;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
if (args->fdp != NULL) {
|
|
|
|
/* Install a brand new file descriptor table. */
|
|
|
|
fdinstall_remapped(td, args->fdp);
|
|
|
|
args->fdp = NULL;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Keep on using the existing file descriptor table. For
|
|
|
|
* security and other reasons, the file descriptor table
|
|
|
|
* cannot be shared after an exec.
|
|
|
|
*/
|
|
|
|
fdunshare(td);
|
|
|
|
/* close files on exec */
|
|
|
|
fdcloseexec(td);
|
|
|
|
}
|
1997-08-04 05:39:24 +00:00
|
|
|
|
2002-05-02 15:00:14 +00:00
|
|
|
/*
|
|
|
|
* Malloc things before we need locks.
|
|
|
|
*/
|
2005-01-29 23:12:00 +00:00
|
|
|
i = imgp->args->begin_envv - imgp->args->begin_argv;
|
2005-10-01 08:33:56 +00:00
|
|
|
/* Cache arguments if they fit inside our allowance */
|
|
|
|
if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
|
2002-05-02 15:00:14 +00:00
|
|
|
newargs = pargs_alloc(i);
|
2005-10-01 08:33:56 +00:00
|
|
|
bcopy(imgp->args->begin_argv, newargs->ar_args, i);
|
|
|
|
}
|
2002-05-02 15:00:14 +00:00
|
|
|
|
2001-07-09 19:01:42 +00:00
|
|
|
/*
|
|
|
|
* For security and other reasons, signal handlers cannot
|
2001-10-09 17:25:30 +00:00
|
|
|
* be shared after an exec. The new process gets a copy of the old
|
2001-07-11 02:04:43 +00:00
|
|
|
* handlers. In execsigs(), the new process will have its signals
|
2001-07-09 19:01:42 +00:00
|
|
|
* reset.
|
|
|
|
*/
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
if (sigacts_shared(p->p_sigacts)) {
|
|
|
|
oldsigacts = p->p_sigacts;
|
|
|
|
newsigacts = sigacts_alloc();
|
|
|
|
sigacts_copy(newsigacts, oldsigacts);
|
2014-07-02 05:45:40 +00:00
|
|
|
}
|
2001-07-09 19:01:42 +00:00
|
|
|
|
2016-05-27 15:03:38 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
|
|
|
|
2014-07-01 06:29:15 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
if (oldsigacts)
|
|
|
|
p->p_sigacts = newsigacts;
|
1999-08-11 20:35:38 +00:00
|
|
|
/* Stop profiling */
|
|
|
|
stopprofclock(p);
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/* reset caught signals */
|
|
|
|
execsigs(p);
|
|
|
|
|
|
|
|
/* name this process - nameiexec(p, ndp) */
|
2009-10-23 15:14:54 +00:00
|
|
|
bzero(p->p_comm, sizeof(p->p_comm));
|
|
|
|
if (args->fname)
|
|
|
|
bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
|
|
|
|
min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
|
2015-07-14 01:13:37 +00:00
|
|
|
else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
|
2009-10-23 15:14:54 +00:00
|
|
|
bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
|
2007-11-14 06:04:57 +00:00
|
|
|
bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
|
2012-03-08 19:41:05 +00:00
|
|
|
#ifdef KTR
|
|
|
|
sched_clear_tdname(td);
|
|
|
|
#endif
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1996-04-29 15:07:59 +00:00
|
|
|
* mark as execed, wakeup the process that vforked (if any) and tell
|
1998-04-17 22:37:19 +00:00
|
|
|
* it that it now has its own resources back
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
|
|
|
p->p_flag |= P_EXEC;
|
2015-01-18 15:13:11 +00:00
|
|
|
if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
|
|
|
|
p->p_flag2 &= ~P2_NOTRACE;
|
2014-07-14 22:40:46 +00:00
|
|
|
if (p->p_flag & P_PPWAIT) {
|
2013-02-07 15:34:22 +00:00
|
|
|
p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
|
2008-12-05 20:50:24 +00:00
|
|
|
cv_broadcast(&p->p_pwait);
|
2016-07-18 10:53:47 +00:00
|
|
|
/* STOPs are no longer ignored, arrange for AST */
|
|
|
|
signotify(td);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2016-05-26 23:18:54 +00:00
|
|
|
* Implement image setuid/setgid installation.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2016-05-26 23:18:54 +00:00
|
|
|
if (imgp->credential_setid) {
|
1995-11-06 12:52:37 +00:00
|
|
|
/*
|
|
|
|
* Turn off syscall tracing for set-id programs, except for
|
2001-06-16 23:34:23 +00:00
|
|
|
* root. Record any set-id flags first to make sure that
|
|
|
|
* we do not regain any tracing during a possible block.
|
1995-11-06 12:52:37 +00:00
|
|
|
*/
|
2001-06-16 23:34:23 +00:00
|
|
|
setsugid(p);
|
2006-11-06 13:42:10 +00:00
|
|
|
|
2002-06-07 05:41:27 +00:00
|
|
|
#ifdef KTRACE
|
2012-10-06 19:23:44 +00:00
|
|
|
if (p->p_tracecred != NULL &&
|
|
|
|
priv_check_cred(p->p_tracecred, PRIV_DEBUG_DIFFCRED, 0))
|
2010-10-21 19:17:40 +00:00
|
|
|
ktrprocexec(p, &tracecred, &tracevp);
|
2002-06-07 05:41:27 +00:00
|
|
|
#endif
|
2002-09-13 09:31:56 +00:00
|
|
|
/*
|
2002-09-14 18:55:11 +00:00
|
|
|
* Close any file descriptors 0..2 that reference procfs,
|
|
|
|
* then make sure file descriptors 0..2 are in use.
|
2002-09-13 09:31:56 +00:00
|
|
|
*
|
2014-10-31 09:56:00 +00:00
|
|
|
* Both fdsetugidsafety() and fdcheckstd() may call functions
|
|
|
|
* taking sleepable locks, so temporarily drop our locks.
|
2002-09-13 09:31:56 +00:00
|
|
|
*/
|
|
|
|
PROC_UNLOCK(p);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
2014-10-22 00:23:43 +00:00
|
|
|
fdsetugidsafety(td);
|
2002-04-19 00:45:29 +00:00
|
|
|
error = fdcheckstd(td);
|
2014-07-07 14:03:30 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2016-05-27 15:03:38 +00:00
|
|
|
if (error != 0)
|
|
|
|
goto exec_fail_dealloc;
|
2002-10-11 21:04:01 +00:00
|
|
|
PROC_LOCK(p);
|
2002-11-05 14:57:49 +00:00
|
|
|
#ifdef MAC
|
2002-11-05 17:51:56 +00:00
|
|
|
if (will_transition) {
|
2016-05-26 23:18:54 +00:00
|
|
|
mac_vnode_execve_transition(oldcred, imgp->newcred,
|
|
|
|
imgp->vp, interpvplabel, imgp);
|
2002-11-05 17:51:56 +00:00
|
|
|
}
|
2002-11-05 14:57:49 +00:00
|
|
|
#endif
|
1995-11-06 12:52:37 +00:00
|
|
|
} else {
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
if (oldcred->cr_uid == oldcred->cr_ruid &&
|
|
|
|
oldcred->cr_gid == oldcred->cr_rgid)
|
1997-02-19 03:51:34 +00:00
|
|
|
p->p_flag &= ~P_SUGID;
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
}
|
2016-05-26 23:18:54 +00:00
|
|
|
/*
|
|
|
|
* Set the new credentials.
|
|
|
|
*/
|
2016-06-08 04:37:03 +00:00
|
|
|
if (imgp->newcred != NULL) {
|
2016-05-26 23:18:54 +00:00
|
|
|
proc_set_cred(p, imgp->newcred);
|
2016-06-08 04:37:03 +00:00
|
|
|
crfree(oldcred);
|
|
|
|
oldcred = NULL;
|
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1994-09-24 16:58:43 +00:00
|
|
|
/*
|
2015-07-14 00:43:08 +00:00
|
|
|
* Store the vp for use in procfs. This vnode was referenced by namei
|
|
|
|
* or fgetvp_exec.
|
1994-09-24 16:58:43 +00:00
|
|
|
*/
|
2015-07-14 01:13:37 +00:00
|
|
|
oldtextvp = p->p_textvp;
|
|
|
|
p->p_textvp = newtextvp;
|
1994-09-24 16:58:43 +00:00
|
|
|
|
2008-05-24 06:22:16 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
/*
|
|
|
|
* Tell the DTrace fasttrap provider about the exec if it
|
|
|
|
* has declared an interest.
|
|
|
|
*/
|
|
|
|
if (dtrace_fasttrap_exec)
|
|
|
|
dtrace_fasttrap_exec(p);
|
|
|
|
#endif
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/*
|
2001-10-27 11:11:25 +00:00
|
|
|
* Notify others that we exec'd, and clear the P_INEXEC flag
|
|
|
|
* as we're now a bona fide freshly-execed process.
|
2000-04-16 18:53:38 +00:00
|
|
|
*/
|
When filt_proc() removes event from the knlist due to the process
exiting (NOTE_EXIT->knlist_remove_inevent()), two things happen:
- knote kn_knlist pointer is reset
- INFLUX knote is removed from the process knlist.
And, there are two consequences:
- KN_LIST_UNLOCK() on such knote is nop
- there is nothing which would block exit1() from processing past the
knlist_destroy() (and knlist_destroy() resets knlist lock pointers).
Both consequences result either in leaked process lock, or
dereferencing NULL function pointers for locking.
Handle this by stopping embedding the process knlist into struct proc.
Instead, the knlist is allocated together with struct proc, but marked
as autodestroy on the zombie reap, by knlist_detach() function. The
knlist is freed when last kevent is removed from the list, in
particular, at the zombie reap time if the list is empty. As result,
the knlist_remove_inevent() is no longer needed and removed.
Other changes:
In filt_procattach(), clear NOTE_EXEC and NOTE_FORK desired events
from kn_sfflags for knote registered by kernel to only get NOTE_CHILD
notifications. The flags leak resulted in excessive
NOTE_EXEC/NOTE_FORK reports.
Fix immediate note activation in filt_procattach(). Condition should
be either the immediate CHILD_NOTE activation, or immediate NOTE_EXIT
report for the exiting process.
In knote_fork(), do not perform racy check for KN_INFLUX before kq
lock is taken. Besides being racy, it did not accounted for notes
just added by scan (KN_SCAN).
Some minor and incomplete style fixes.
Analyzed and tested by: Eric Badger <eric@badgerio.us>
Reviewed by: jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Approved by: re (gjb)
Differential revision: https://reviews.freebsd.org/D6859
2016-06-27 21:52:17 +00:00
|
|
|
KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
|
2001-10-27 11:11:25 +00:00
|
|
|
p->p_flag &= ~P_INEXEC;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/* clear "fork but no exec" flag, as we _are_ execing */
|
|
|
|
p->p_acflag &= ~AFORK;
|
|
|
|
|
2005-10-01 08:33:56 +00:00
|
|
|
/*
|
2005-10-04 04:02:33 +00:00
|
|
|
* Free any previous argument cache and replace it with
|
2005-10-01 08:33:56 +00:00
|
|
|
* the new argument cache, if any.
|
|
|
|
*/
|
2002-05-02 15:00:14 +00:00
|
|
|
oldargs = p->p_args;
|
2005-10-01 08:33:56 +00:00
|
|
|
p->p_args = newargs;
|
|
|
|
newargs = NULL;
|
2005-04-19 04:01:25 +00:00
|
|
|
|
2017-10-19 00:38:14 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
/*
|
2005-06-09 19:45:09 +00:00
|
|
|
* Check if system-wide sampling is in effect or if the
|
|
|
|
* current process is using PMCs. If so, do exec() time
|
2005-04-19 04:01:25 +00:00
|
|
|
* processing. This processing needs to happen AFTER the
|
|
|
|
* P_INEXEC flag is cleared.
|
|
|
|
*/
|
2005-06-09 19:45:09 +00:00
|
|
|
if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
|
2009-09-09 10:52:36 +00:00
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
2005-06-30 19:01:26 +00:00
|
|
|
pe.pm_credentialschanged = credential_changing;
|
|
|
|
pe.pm_entryaddr = imgp->entry_addr;
|
|
|
|
|
|
|
|
PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
|
2012-01-19 23:03:31 +00:00
|
|
|
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
|
2017-10-19 00:38:14 +00:00
|
|
|
}
|
2005-04-19 04:01:25 +00:00
|
|
|
#endif
|
2002-05-02 15:00:14 +00:00
|
|
|
|
2002-10-11 21:04:01 +00:00
|
|
|
/* Set values passed into the program in registers. */
|
|
|
|
if (p->p_sysent->sv_setregs)
|
2010-03-25 14:24:00 +00:00
|
|
|
(*p->p_sysent->sv_setregs)(td, imgp,
|
|
|
|
(u_long)(uintptr_t)stack_base);
|
2002-10-11 21:04:01 +00:00
|
|
|
else
|
2010-03-25 14:24:00 +00:00
|
|
|
exec_setregs(td, imgp, (u_long)(uintptr_t)stack_base);
|
2002-08-13 06:55:28 +00:00
|
|
|
|
2008-08-28 15:23:18 +00:00
|
|
|
vfs_mark_atime(imgp->vp, td->td_ucred);
|
2005-05-31 19:39:52 +00:00
|
|
|
|
2015-12-16 23:39:27 +00:00
|
|
|
SDT_PROBE1(proc, , , exec__success, args->fname);
|
2008-05-24 06:22:16 +00:00
|
|
|
|
1998-01-11 21:35:38 +00:00
|
|
|
exec_fail_dealloc:
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->firstpage != NULL)
|
1998-01-11 21:35:38 +00:00
|
|
|
exec_unmap_first_page(imgp);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->vp != NULL) {
|
2008-03-31 12:05:52 +00:00
|
|
|
if (args->fname)
|
2009-02-26 16:32:48 +00:00
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
2008-07-17 16:44:07 +00:00
|
|
|
if (imgp->opened)
|
|
|
|
VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
|
2015-07-14 00:43:08 +00:00
|
|
|
if (error != 0)
|
|
|
|
vput(imgp->vp);
|
|
|
|
else
|
|
|
|
VOP_UNLOCK(imgp->vp, 0);
|
1997-04-04 07:30:06 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->object != NULL)
|
2002-07-06 07:00:01 +00:00
|
|
|
vm_object_deallocate(imgp->object);
|
|
|
|
|
2009-03-17 12:53:28 +00:00
|
|
|
free(imgp->freepath, M_TEMP);
|
|
|
|
|
2002-11-26 17:30:55 +00:00
|
|
|
if (error == 0) {
|
2017-10-19 00:46:15 +00:00
|
|
|
if (p->p_ptevents & PTRACE_EXEC) {
|
|
|
|
PROC_LOCK(p);
|
|
|
|
if (p->p_ptevents & PTRACE_EXEC)
|
|
|
|
td->td_dbgflags |= TDB_EXEC;
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
Reorganize syscall entry and leave handling.
Extend struct sysvec with three new elements:
sv_fetch_syscall_args - the method to fetch syscall arguments from
usermode into struct syscall_args. The structure is machine-depended
(this might be reconsidered after all architectures are converted).
sv_set_syscall_retval - the method to set a return value for usermode
from the syscall. It is a generalization of
cpu_set_syscall_retval(9) to allow ABIs to override the way to set a
return value.
sv_syscallnames - the table of syscall names.
Use sv_set_syscall_retval in kern_sigsuspend() instead of hardcoding
the call to cpu_set_syscall_retval().
The new functions syscallenter(9) and syscallret(9) are provided that
use sv_*syscall* pointers and contain the common repeated code from
the syscall() implementations for the architecture-specific syscall
trap handlers.
Syscallenter() fetches arguments, calls syscall implementation from
ABI sysent table, and set up return frame. The end of syscall
bookkeeping is done by syscallret().
Take advantage of single place for MI syscall handling code and
implement ptrace_lwpinfo pl_flags PL_FLAG_SCE, PL_FLAG_SCX and
PL_FLAG_EXEC. The SCE and SCX flags notify the debugger that the
thread is stopped at syscall entry or return point respectively. The
EXEC flag augments SCX and notifies debugger that the process address
space was changed by one of exec(2)-family syscalls.
The i386, amd64, sparc64, sun4v, powerpc and ia64 syscall()s are
changed to use syscallenter()/syscallret(). MIPS and arm are not
converted and use the mostly unchanged syscall() implementation.
Reviewed by: jhb, marcel, marius, nwhitehorn, stas
Tested by: marcel (ia64), marius (sparc64), nwhitehorn (powerpc),
stas (mips)
MFC after: 1 month
2010-05-23 18:32:02 +00:00
|
|
|
|
2002-11-26 17:30:55 +00:00
|
|
|
/*
|
|
|
|
* Stop the process here if its stop event mask has
|
|
|
|
* the S_EXEC bit set.
|
|
|
|
*/
|
|
|
|
STOPEVENT(p, S_EXEC, 0);
|
2016-05-27 15:03:38 +00:00
|
|
|
} else {
|
1994-05-25 09:21:21 +00:00
|
|
|
exec_fail:
|
2016-05-27 15:03:38 +00:00
|
|
|
/* we're done here, clear P_INEXEC */
|
|
|
|
PROC_LOCK(p);
|
|
|
|
p->p_flag &= ~P_INEXEC;
|
|
|
|
PROC_UNLOCK(p);
|
2005-01-29 23:12:00 +00:00
|
|
|
|
2016-05-27 15:03:38 +00:00
|
|
|
SDT_PROBE1(proc, , , exec__failure, error);
|
|
|
|
}
|
2008-05-24 06:22:16 +00:00
|
|
|
|
2016-06-08 04:37:03 +00:00
|
|
|
if (imgp->newcred != NULL && oldcred != NULL)
|
|
|
|
crfree(imgp->newcred);
|
|
|
|
|
2002-11-05 17:51:56 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_execve_exit(imgp);
|
Introduce two related changes to the TrustedBSD MAC Framework:
(1) Abstract interpreter vnode labeling in execve(2) and mac_execve(2)
so that the general exec code isn't aware of the details of
allocating, copying, and freeing labels, rather, simply passes in
a void pointer to start and stop functions that will be used by
the framework. This change will be MFC'd.
(2) Introduce a new flags field to the MAC_POLICY_SET(9) interface
allowing policies to declare which types of objects require label
allocation, initialization, and destruction, and define a set of
flags covering various supported object types (MPC_OBJECT_PROC,
MPC_OBJECT_VNODE, MPC_OBJECT_INPCB, ...). This change reduces the
overhead of compiling the MAC Framework into the kernel if policies
aren't loaded, or if policies require labels on only a small number
or even no object types. Each time a policy is loaded or unloaded,
we recalculate a mask of labeled object types across all policies
present in the system. Eliminate MAC_ALWAYS_LABEL_MBUF option as it
is no longer required.
MFC after: 1 week ((1) only)
Reviewed by: csjp
Obtained from: TrustedBSD Project
Sponsored by: Apple, Inc.
2008-08-23 15:26:36 +00:00
|
|
|
mac_execve_interpreter_exit(interpvplabel);
|
2002-11-05 17:51:56 +00:00
|
|
|
#endif
|
2006-02-06 22:06:54 +00:00
|
|
|
exec_free_args(args);
|
|
|
|
|
2016-05-27 15:03:38 +00:00
|
|
|
/*
|
|
|
|
* Handle deferred decrement of ref counts.
|
|
|
|
*/
|
|
|
|
if (oldtextvp != NULL)
|
|
|
|
vrele(oldtextvp);
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (tracevp != NULL)
|
|
|
|
vrele(tracevp);
|
|
|
|
if (tracecred != NULL)
|
|
|
|
crfree(tracecred);
|
|
|
|
#endif
|
|
|
|
pargs_drop(oldargs);
|
|
|
|
pargs_drop(newargs);
|
|
|
|
if (oldsigacts != NULL)
|
|
|
|
sigacts_free(oldsigacts);
|
|
|
|
if (euip != NULL)
|
|
|
|
uifree(euip);
|
|
|
|
|
2006-02-06 22:06:54 +00:00
|
|
|
if (error && imgp->vmspace_destroyed) {
|
|
|
|
/* sorry, no more process anymore. exit gracefully */
|
2015-07-18 09:02:50 +00:00
|
|
|
exit1(td, 0, SIGABRT);
|
2006-02-06 22:06:54 +00:00
|
|
|
/* NOT REACHED */
|
|
|
|
}
|
2011-02-25 22:05:33 +00:00
|
|
|
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (error == 0)
|
|
|
|
ktrprocctor(p);
|
|
|
|
#endif
|
|
|
|
|
2017-11-24 07:35:08 +00:00
|
|
|
/*
|
|
|
|
* We don't want cpu_set_syscall_retval() to overwrite any of
|
|
|
|
* the register values put in place by exec_setregs().
|
|
|
|
* Implementations of cpu_set_syscall_retval() will leave
|
|
|
|
* registers unmodified when returning EJUSTRETURN.
|
|
|
|
*/
|
|
|
|
return (error == 0 ? EJUSTRETURN : error);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
1998-01-11 21:35:38 +00:00
|
|
|
int
|
2018-06-01 13:26:45 +00:00
|
|
|
exec_map_first_page(struct image_params *imgp)
|
1998-01-11 21:35:38 +00:00
|
|
|
{
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
int rv, i, after, initial_pagein;
|
1998-02-05 03:32:49 +00:00
|
|
|
vm_page_t ma[VM_INITIAL_PAGEIN];
|
1998-01-11 21:35:38 +00:00
|
|
|
vm_object_t object;
|
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->firstpage != NULL)
|
1998-01-11 21:35:38 +00:00
|
|
|
exec_unmap_first_page(imgp);
|
|
|
|
|
2005-01-25 00:40:01 +00:00
|
|
|
object = imgp->vp->v_object;
|
2005-05-01 00:58:19 +00:00
|
|
|
if (object == NULL)
|
|
|
|
return (EACCES);
|
2013-03-09 02:32:23 +00:00
|
|
|
VM_OBJECT_WLOCK(object);
|
2007-12-29 19:53:04 +00:00
|
|
|
#if VM_NRESERVLEVEL > 0
|
2015-03-21 17:56:55 +00:00
|
|
|
vm_object_color(object, 0);
|
2007-12-29 19:53:04 +00:00
|
|
|
#endif
|
2016-08-14 22:00:45 +00:00
|
|
|
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
|
2009-06-07 19:38:26 +00:00
|
|
|
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
|
2016-08-14 22:00:45 +00:00
|
|
|
vm_page_xbusy(ma[0]);
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
if (!vm_pager_has_page(object, 0, NULL, &after)) {
|
|
|
|
vm_page_lock(ma[0]);
|
|
|
|
vm_page_free(ma[0]);
|
|
|
|
vm_page_unlock(ma[0]);
|
|
|
|
VM_OBJECT_WUNLOCK(object);
|
|
|
|
return (EIO);
|
|
|
|
}
|
|
|
|
initial_pagein = min(after, VM_INITIAL_PAGEIN);
|
|
|
|
KASSERT(initial_pagein <= object->size,
|
|
|
|
("%s: initial_pagein %d object->size %ju",
|
|
|
|
__func__, initial_pagein, (uintmax_t )object->size));
|
1998-02-05 03:32:49 +00:00
|
|
|
for (i = 1; i < initial_pagein; i++) {
|
2010-07-02 15:50:30 +00:00
|
|
|
if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
|
2003-10-04 22:47:20 +00:00
|
|
|
if (ma[i]->valid)
|
|
|
|
break;
|
2018-02-07 21:51:59 +00:00
|
|
|
if (!vm_page_tryxbusy(ma[i]))
|
1998-02-05 03:32:49 +00:00
|
|
|
break;
|
|
|
|
} else {
|
2002-08-25 20:48:45 +00:00
|
|
|
ma[i] = vm_page_alloc(object, i,
|
2016-11-15 18:22:50 +00:00
|
|
|
VM_ALLOC_NORMAL);
|
1998-02-05 03:32:49 +00:00
|
|
|
if (ma[i] == NULL)
|
|
|
|
break;
|
|
|
|
}
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
1998-02-05 03:32:49 +00:00
|
|
|
initial_pagein = i;
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
|
2015-06-12 11:32:20 +00:00
|
|
|
if (rv != VM_PAGER_OK) {
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
for (i = 0; i < initial_pagein; i++) {
|
|
|
|
vm_page_lock(ma[i]);
|
|
|
|
vm_page_free(ma[i]);
|
|
|
|
vm_page_unlock(ma[i]);
|
|
|
|
}
|
2013-03-09 02:32:23 +00:00
|
|
|
VM_OBJECT_WUNLOCK(object);
|
2002-08-24 22:01:40 +00:00
|
|
|
return (EIO);
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
2016-08-14 22:00:45 +00:00
|
|
|
vm_page_xunbusy(ma[0]);
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
for (i = 1; i < initial_pagein; i++)
|
|
|
|
vm_page_readahead_finish(ma[i]);
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
2010-04-30 00:46:43 +00:00
|
|
|
vm_page_lock(ma[0]);
|
2013-08-05 08:55:35 +00:00
|
|
|
vm_page_hold(ma[0]);
|
2014-08-13 05:44:08 +00:00
|
|
|
vm_page_activate(ma[0]);
|
2010-04-30 00:46:43 +00:00
|
|
|
vm_page_unlock(ma[0]);
|
2013-03-09 02:32:23 +00:00
|
|
|
VM_OBJECT_WUNLOCK(object);
|
1998-01-11 21:35:38 +00:00
|
|
|
|
2004-04-23 03:01:40 +00:00
|
|
|
imgp->firstpage = sf_buf_alloc(ma[0], 0);
|
|
|
|
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
|
1998-01-11 21:35:38 +00:00
|
|
|
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_unmap_first_page(struct image_params *imgp)
|
1998-01-11 21:35:38 +00:00
|
|
|
{
|
2004-04-23 03:01:40 +00:00
|
|
|
vm_page_t m;
|
2001-05-19 01:28:09 +00:00
|
|
|
|
2003-12-28 04:37:59 +00:00
|
|
|
if (imgp->firstpage != NULL) {
|
2004-04-23 03:01:40 +00:00
|
|
|
m = sf_buf_page(imgp->firstpage);
|
|
|
|
sf_buf_free(imgp->firstpage);
|
|
|
|
imgp->firstpage = NULL;
|
2010-04-30 00:46:43 +00:00
|
|
|
vm_page_lock(m);
|
2013-08-05 08:55:35 +00:00
|
|
|
vm_page_unhold(m);
|
2010-04-30 00:46:43 +00:00
|
|
|
vm_page_unlock(m);
|
1998-01-11 21:35:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2017-07-03 20:44:01 +00:00
|
|
|
* Destroy old address space, and allocate a new stack.
|
|
|
|
* The new stack is only sgrowsiz large because it is grown
|
|
|
|
* automatically on a page fault.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
|
|
|
int error;
|
2002-03-31 00:05:30 +00:00
|
|
|
struct proc *p = imgp->proc;
|
|
|
|
struct vmspace *vmspace = p->p_vmspace;
|
2011-01-08 16:13:44 +00:00
|
|
|
vm_object_t obj;
|
2015-04-15 08:13:53 +00:00
|
|
|
struct rlimit rlim_stack;
|
2009-10-02 17:48:51 +00:00
|
|
|
vm_offset_t sv_minuser, stack_addr;
|
2002-09-21 22:07:17 +00:00
|
|
|
vm_map_t map;
|
2007-07-12 18:01:31 +00:00
|
|
|
u_long ssiz;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-11-06 12:52:37 +00:00
|
|
|
imgp->vmspace_destroyed = 1;
|
2006-08-15 12:10:57 +00:00
|
|
|
imgp->sysent = sv;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2007-03-25 10:05:44 +00:00
|
|
|
/* May be called with Giant held */
|
2017-11-09 22:51:48 +00:00
|
|
|
EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
|
2002-01-13 19:36:35 +00:00
|
|
|
|
1997-04-11 23:37:23 +00:00
|
|
|
/*
|
|
|
|
* Blow away entire process VM, if address space not shared,
|
|
|
|
* otherwise, create a new VM space so that other threads are
|
|
|
|
* not disrupted
|
|
|
|
*/
|
2002-09-21 22:07:17 +00:00
|
|
|
map = &vmspace->vm_map;
|
2009-10-02 17:48:51 +00:00
|
|
|
if (map_at_zero)
|
|
|
|
sv_minuser = sv->sv_minuser;
|
|
|
|
else
|
|
|
|
sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
|
|
|
|
if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser &&
|
2002-09-21 22:07:17 +00:00
|
|
|
vm_map_max(map) == sv->sv_maxuser) {
|
2003-01-13 23:04:32 +00:00
|
|
|
shmexit(vmspace);
|
2006-04-03 21:16:10 +00:00
|
|
|
pmap_remove_pages(vmspace_pmap(vmspace));
|
2002-09-21 22:07:17 +00:00
|
|
|
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
|
2017-06-30 15:49:36 +00:00
|
|
|
/* An exec terminates mlockall(MCL_FUTURE). */
|
|
|
|
vm_map_lock(map);
|
|
|
|
vm_map_modflags(map, 0, MAP_WIREFUTURE);
|
|
|
|
vm_map_unlock(map);
|
1997-04-11 23:37:23 +00:00
|
|
|
} else {
|
2009-10-02 17:48:51 +00:00
|
|
|
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
|
2007-11-05 11:36:16 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-03-31 00:05:30 +00:00
|
|
|
vmspace = p->p_vmspace;
|
2002-09-21 22:07:17 +00:00
|
|
|
map = &vmspace->vm_map;
|
1997-04-11 23:37:23 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2011-01-08 16:13:44 +00:00
|
|
|
/* Map a shared page */
|
|
|
|
obj = sv->sv_shared_page_obj;
|
|
|
|
if (obj != NULL) {
|
|
|
|
vm_object_reference(obj);
|
|
|
|
error = vm_map_fixed(map, obj, 0,
|
|
|
|
sv->sv_shared_page_base, sv->sv_shared_page_len,
|
2013-06-03 04:32:53 +00:00
|
|
|
VM_PROT_READ | VM_PROT_EXECUTE,
|
|
|
|
VM_PROT_READ | VM_PROT_EXECUTE,
|
|
|
|
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
|
2017-07-03 20:44:01 +00:00
|
|
|
if (error != KERN_SUCCESS) {
|
2011-01-08 16:13:44 +00:00
|
|
|
vm_object_deallocate(obj);
|
2017-07-03 20:44:01 +00:00
|
|
|
return (vm_mmap_to_errno(error));
|
2011-01-08 16:13:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/* Allocate a new stack */
|
2015-04-15 08:13:53 +00:00
|
|
|
if (imgp->stack_sz != 0) {
|
2015-04-23 11:27:21 +00:00
|
|
|
ssiz = trunc_page(imgp->stack_sz);
|
2015-04-15 08:13:53 +00:00
|
|
|
PROC_LOCK(p);
|
2015-06-10 10:48:12 +00:00
|
|
|
lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
|
2015-04-15 08:13:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
if (ssiz > rlim_stack.rlim_max)
|
|
|
|
ssiz = rlim_stack.rlim_max;
|
|
|
|
if (ssiz > rlim_stack.rlim_cur) {
|
|
|
|
rlim_stack.rlim_cur = ssiz;
|
|
|
|
kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
|
|
|
|
}
|
|
|
|
} else if (sv->sv_maxssiz != NULL) {
|
2007-07-12 18:01:31 +00:00
|
|
|
ssiz = *sv->sv_maxssiz;
|
2015-04-15 08:13:53 +00:00
|
|
|
} else {
|
2007-07-12 18:01:31 +00:00
|
|
|
ssiz = maxssiz;
|
2015-04-15 08:13:53 +00:00
|
|
|
}
|
2007-07-12 18:01:31 +00:00
|
|
|
stack_addr = sv->sv_usrstack - ssiz;
|
|
|
|
error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
|
2011-01-08 16:13:44 +00:00
|
|
|
obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
|
2017-07-03 20:44:01 +00:00
|
|
|
sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
|
|
|
|
if (error != KERN_SUCCESS)
|
|
|
|
return (vm_mmap_to_errno(error));
|
1999-01-06 23:05:42 +00:00
|
|
|
|
2014-06-09 00:15:16 +00:00
|
|
|
/*
|
|
|
|
* vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
|
|
|
|
* are still used to enforce the stack rlimit on the process stack.
|
1999-01-06 23:05:42 +00:00
|
|
|
*/
|
2001-10-10 23:06:54 +00:00
|
|
|
vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
|
2015-06-30 15:22:47 +00:00
|
|
|
vmspace->vm_maxsaddr = (char *)stack_addr;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Copy out argument and environment strings from the old process address
|
|
|
|
* space into the temporary string buffer.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
|
|
|
int
|
2018-11-02 20:50:22 +00:00
|
|
|
exec_copyin_args(struct image_args *args, const char *fname,
|
2005-01-29 23:12:00 +00:00
|
|
|
enum uio_seg segflg, char **argv, char **envv)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2014-10-28 15:28:20 +00:00
|
|
|
u_long argp, envp;
|
2005-01-29 23:12:00 +00:00
|
|
|
int error;
|
|
|
|
size_t length;
|
|
|
|
|
|
|
|
bzero(args, sizeof(*args));
|
|
|
|
if (argv == NULL)
|
|
|
|
return (EFAULT);
|
2010-07-27 17:31:03 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2010-07-27 17:31:03 +00:00
|
|
|
* Allocate demand-paged memory for the file name, argument, and
|
|
|
|
* environment strings.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2010-07-27 17:31:03 +00:00
|
|
|
error = exec_alloc_args(args);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
Change the order in which the file name, arguments, environment, and
shell command are stored in exec*()'s demand-paged string buffer. For
a "buildworld" on an 8GB amd64 multiprocessor, the new order reduces
the number of global TLB shootdowns by 31%. It also eliminates about
330k page faults on the kernel address space.
Change exec_shell_imgact() to use "args->begin_argv" consistently as
the start of the argument and environment strings. Previously, it
would sometimes use "args->buf", which is the start of the overall
buffer, but no longer the start of the argument and environment
strings. While I'm here, eliminate unnecessary passing of "&length"
to copystr(), where we don't actually care about the length of the
copied string.
Clean up the initialization of the exec map. In particular, use the
correct size for an entry, and express that size in the same way that
is used when an entry is allocated. The old size was one page too
large. (This discrepancy originated in 2004 when I rewrote
exec_map_first_page() to use sf_buf_alloc() instead of the exec map
for mapping the first page of the executable.)
Reviewed by: kib
2010-07-25 17:43:38 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
/*
|
|
|
|
* Copy the file name.
|
|
|
|
*/
|
2008-03-31 12:05:52 +00:00
|
|
|
if (fname != NULL) {
|
2010-07-27 17:31:03 +00:00
|
|
|
args->fname = args->buf;
|
2008-03-31 12:05:52 +00:00
|
|
|
error = (segflg == UIO_SYSSPACE) ?
|
|
|
|
copystr(fname, args->fname, PATH_MAX, &length) :
|
|
|
|
copyinstr(fname, args->fname, PATH_MAX, &length);
|
|
|
|
if (error != 0)
|
|
|
|
goto err_exit;
|
|
|
|
} else
|
Change the order in which the file name, arguments, environment, and
shell command are stored in exec*()'s demand-paged string buffer. For
a "buildworld" on an 8GB amd64 multiprocessor, the new order reduces
the number of global TLB shootdowns by 31%. It also eliminates about
330k page faults on the kernel address space.
Change exec_shell_imgact() to use "args->begin_argv" consistently as
the start of the argument and environment strings. Previously, it
would sometimes use "args->buf", which is the start of the overall
buffer, but no longer the start of the argument and environment
strings. While I'm here, eliminate unnecessary passing of "&length"
to copystr(), where we don't actually care about the length of the
copied string.
Clean up the initialization of the exec map. In particular, use the
correct size for an entry, and express that size in the same way that
is used when an entry is allocated. The old size was one page too
large. (This discrepancy originated in 2004 when I rewrote
exec_map_first_page() to use sf_buf_alloc() instead of the exec map
for mapping the first page of the executable.)
Reviewed by: kib
2010-07-25 17:43:38 +00:00
|
|
|
length = 0;
|
|
|
|
|
2010-07-27 17:31:03 +00:00
|
|
|
args->begin_argv = args->buf + length;
|
Change the order in which the file name, arguments, environment, and
shell command are stored in exec*()'s demand-paged string buffer. For
a "buildworld" on an 8GB amd64 multiprocessor, the new order reduces
the number of global TLB shootdowns by 31%. It also eliminates about
330k page faults on the kernel address space.
Change exec_shell_imgact() to use "args->begin_argv" consistently as
the start of the argument and environment strings. Previously, it
would sometimes use "args->buf", which is the start of the overall
buffer, but no longer the start of the argument and environment
strings. While I'm here, eliminate unnecessary passing of "&length"
to copystr(), where we don't actually care about the length of the
copied string.
Clean up the initialization of the exec map. In particular, use the
correct size for an entry, and express that size in the same way that
is used when an entry is allocated. The old size was one page too
large. (This discrepancy originated in 2004 when I rewrote
exec_map_first_page() to use sf_buf_alloc() instead of the exec map
for mapping the first page of the executable.)
Reviewed by: kib
2010-07-25 17:43:38 +00:00
|
|
|
args->endp = args->begin_argv;
|
|
|
|
args->stringspace = ARG_MAX;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
/*
|
|
|
|
* extract arguments first
|
|
|
|
*/
|
2014-10-28 15:28:20 +00:00
|
|
|
for (;;) {
|
|
|
|
error = fueword(argv++, &argp);
|
|
|
|
if (error == -1) {
|
2006-03-08 20:21:54 +00:00
|
|
|
error = EFAULT;
|
|
|
|
goto err_exit;
|
|
|
|
}
|
2014-10-28 15:28:20 +00:00
|
|
|
if (argp == 0)
|
|
|
|
break;
|
|
|
|
error = copyinstr((void *)(uintptr_t)argp, args->endp,
|
|
|
|
args->stringspace, &length);
|
|
|
|
if (error != 0) {
|
2006-03-08 20:21:54 +00:00
|
|
|
if (error == ENAMETOOLONG)
|
|
|
|
error = E2BIG;
|
|
|
|
goto err_exit;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
2005-01-29 23:12:00 +00:00
|
|
|
args->stringspace -= length;
|
|
|
|
args->endp += length;
|
|
|
|
args->argc++;
|
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
args->begin_envv = args->endp;
|
1999-11-16 20:31:58 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* extract environment strings
|
|
|
|
*/
|
1994-08-24 10:53:53 +00:00
|
|
|
if (envv) {
|
2014-10-28 15:28:20 +00:00
|
|
|
for (;;) {
|
|
|
|
error = fueword(envv++, &envp);
|
|
|
|
if (error == -1) {
|
2006-03-08 20:21:54 +00:00
|
|
|
error = EFAULT;
|
|
|
|
goto err_exit;
|
|
|
|
}
|
2014-10-28 15:28:20 +00:00
|
|
|
if (envp == 0)
|
|
|
|
break;
|
|
|
|
error = copyinstr((void *)(uintptr_t)envp,
|
|
|
|
args->endp, args->stringspace, &length);
|
|
|
|
if (error != 0) {
|
1994-08-24 10:53:53 +00:00
|
|
|
if (error == ENAMETOOLONG)
|
2006-03-08 20:21:54 +00:00
|
|
|
error = E2BIG;
|
|
|
|
goto err_exit;
|
1994-08-24 10:53:53 +00:00
|
|
|
}
|
2005-01-29 23:12:00 +00:00
|
|
|
args->stringspace -= length;
|
|
|
|
args->endp += length;
|
|
|
|
args->envc++;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
1994-08-24 10:53:53 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
return (0);
|
2006-03-08 20:21:54 +00:00
|
|
|
|
|
|
|
err_exit:
|
|
|
|
exec_free_args(args);
|
|
|
|
return (error);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
int
|
|
|
|
exec_copyin_data_fds(struct thread *td, struct image_args *args,
|
|
|
|
const void *data, size_t datalen, const int *fds, size_t fdslen)
|
|
|
|
{
|
|
|
|
struct filedesc *ofdp;
|
|
|
|
const char *p;
|
|
|
|
int *kfds;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
memset(args, '\0', sizeof(*args));
|
|
|
|
ofdp = td->td_proc->p_fd;
|
|
|
|
if (datalen >= ARG_MAX || fdslen > ofdp->fd_lastfile + 1)
|
|
|
|
return (E2BIG);
|
|
|
|
error = exec_alloc_args(args);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
args->begin_argv = args->buf;
|
|
|
|
args->stringspace = ARG_MAX;
|
|
|
|
|
|
|
|
if (datalen > 0) {
|
|
|
|
/*
|
|
|
|
* Argument buffer has been provided. Copy it into the
|
|
|
|
* kernel as a single string and add a terminating null
|
|
|
|
* byte.
|
|
|
|
*/
|
|
|
|
error = copyin(data, args->begin_argv, datalen);
|
|
|
|
if (error != 0)
|
|
|
|
goto err_exit;
|
|
|
|
args->begin_argv[datalen] = '\0';
|
|
|
|
args->endp = args->begin_argv + datalen + 1;
|
|
|
|
args->stringspace -= datalen + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Traditional argument counting. Count the number of
|
|
|
|
* null bytes.
|
|
|
|
*/
|
|
|
|
for (p = args->begin_argv; p < args->endp; ++p)
|
|
|
|
if (*p == '\0')
|
|
|
|
++args->argc;
|
|
|
|
} else {
|
|
|
|
/* No argument buffer provided. */
|
|
|
|
args->endp = args->begin_argv;
|
|
|
|
}
|
|
|
|
/* There are no environment variables. */
|
|
|
|
args->begin_envv = args->endp;
|
|
|
|
|
|
|
|
/* Create new file descriptor table. */
|
|
|
|
kfds = malloc(fdslen * sizeof(int), M_TEMP, M_WAITOK);
|
|
|
|
error = copyin(fds, kfds, fdslen * sizeof(int));
|
|
|
|
if (error != 0) {
|
|
|
|
free(kfds, M_TEMP);
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
error = fdcopy_remapped(ofdp, kfds, fdslen, &args->fdp);
|
|
|
|
free(kfds, M_TEMP);
|
|
|
|
if (error != 0)
|
|
|
|
goto err_exit;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
err_exit:
|
|
|
|
exec_free_args(args);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2017-01-05 01:44:12 +00:00
|
|
|
struct exec_args_kva {
|
|
|
|
vm_offset_t addr;
|
2017-02-15 01:50:58 +00:00
|
|
|
u_int gen;
|
2017-01-05 01:44:12 +00:00
|
|
|
SLIST_ENTRY(exec_args_kva) next;
|
|
|
|
};
|
|
|
|
|
2018-07-05 17:13:37 +00:00
|
|
|
DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
|
2017-01-05 01:44:12 +00:00
|
|
|
|
|
|
|
static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
|
|
|
|
static struct mtx exec_args_kva_mtx;
|
2017-02-15 01:50:58 +00:00
|
|
|
static u_int exec_args_gen;
|
2017-01-05 01:44:12 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
exec_prealloc_args_kva(void *arg __unused)
|
|
|
|
{
|
|
|
|
struct exec_args_kva *argkva;
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
SLIST_INIT(&exec_args_kva_freelist);
|
|
|
|
mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
|
|
|
|
for (i = 0; i < exec_map_entries; i++) {
|
|
|
|
argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
|
|
|
|
argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
|
2017-02-15 01:50:58 +00:00
|
|
|
argkva->gen = exec_args_gen;
|
2017-01-05 01:44:12 +00:00
|
|
|
SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
|
|
|
|
|
|
|
|
static vm_offset_t
|
|
|
|
exec_alloc_args_kva(void **cookie)
|
|
|
|
{
|
|
|
|
struct exec_args_kva *argkva;
|
|
|
|
|
|
|
|
argkva = (void *)atomic_readandclear_ptr(
|
|
|
|
(uintptr_t *)DPCPU_PTR(exec_args_kva));
|
|
|
|
if (argkva == NULL) {
|
|
|
|
mtx_lock(&exec_args_kva_mtx);
|
|
|
|
while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
|
|
|
|
(void)mtx_sleep(&exec_args_kva_freelist,
|
|
|
|
&exec_args_kva_mtx, 0, "execkva", 0);
|
|
|
|
SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
|
|
|
|
mtx_unlock(&exec_args_kva_mtx);
|
|
|
|
}
|
|
|
|
*(struct exec_args_kva **)cookie = argkva;
|
|
|
|
return (argkva->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-02-15 01:50:58 +00:00
|
|
|
exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
|
2017-01-05 01:44:12 +00:00
|
|
|
{
|
|
|
|
vm_offset_t base;
|
|
|
|
|
|
|
|
base = argkva->addr;
|
2017-02-15 01:50:58 +00:00
|
|
|
if (argkva->gen != gen) {
|
Use a single, consistent approach to returning success versus failure in
vm_map_madvise(). Previously, vm_map_madvise() used a traditional Unix-
style "return (0);" to indicate success in the common case, but Mach-
style return values in the edge cases. Since KERN_SUCCESS equals zero,
the only problem with this inconsistency was stylistic. vm_map_madvise()
has exactly two callers in the entire source tree, and only one of them
cares about the return value. That caller, kern_madvise(), can be
simplified if vm_map_madvise() consistently uses Unix-style return
values.
Since vm_map_madvise() uses the variable modify_map as a Boolean, make it
one.
Eliminate a redundant error check from kern_madvise(). Add a comment
explaining where the check is performed.
Explicitly note that exec_release_args_kva() doesn't care about
vm_map_madvise()'s return value. Since MADV_FREE is passed as the
behavior, the return value will always be zero.
Reviewed by: kib, markj
MFC after: 7 days
2018-06-04 16:28:06 +00:00
|
|
|
(void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
|
2017-02-15 01:50:58 +00:00
|
|
|
MADV_FREE);
|
|
|
|
argkva->gen = gen;
|
|
|
|
}
|
2017-01-05 01:44:12 +00:00
|
|
|
if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
|
|
|
|
(uintptr_t)NULL, (uintptr_t)argkva)) {
|
|
|
|
mtx_lock(&exec_args_kva_mtx);
|
|
|
|
SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
|
|
|
|
wakeup_one(&exec_args_kva_freelist);
|
|
|
|
mtx_unlock(&exec_args_kva_mtx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-15 01:50:58 +00:00
|
|
|
static void
|
|
|
|
exec_free_args_kva(void *cookie)
|
|
|
|
{
|
|
|
|
|
|
|
|
exec_release_args_kva(cookie, exec_args_gen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
exec_args_kva_lowmem(void *arg __unused)
|
|
|
|
{
|
|
|
|
SLIST_HEAD(, exec_args_kva) head;
|
|
|
|
struct exec_args_kva *argkva;
|
|
|
|
u_int gen;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force an madvise of each KVA range. Any currently allocated ranges
|
|
|
|
* will have MADV_FREE applied once they are freed.
|
|
|
|
*/
|
|
|
|
SLIST_INIT(&head);
|
|
|
|
mtx_lock(&exec_args_kva_mtx);
|
|
|
|
SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
|
|
|
|
mtx_unlock(&exec_args_kva_mtx);
|
|
|
|
while ((argkva = SLIST_FIRST(&head)) != NULL) {
|
|
|
|
SLIST_REMOVE_HEAD(&head, next);
|
|
|
|
exec_release_args_kva(argkva, gen);
|
|
|
|
}
|
|
|
|
|
|
|
|
CPU_FOREACH(i) {
|
|
|
|
argkva = (void *)atomic_readandclear_ptr(
|
|
|
|
(uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
|
|
|
|
if (argkva != NULL)
|
|
|
|
exec_release_args_kva(argkva, gen);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
|
|
|
|
2010-07-27 17:31:03 +00:00
|
|
|
/*
|
|
|
|
* Allocate temporary demand-paged, zero-filled memory for the file name,
|
2017-01-05 01:44:12 +00:00
|
|
|
* argument, and environment strings.
|
2010-07-27 17:31:03 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
exec_alloc_args(struct image_args *args)
|
|
|
|
{
|
|
|
|
|
2017-01-05 01:44:12 +00:00
|
|
|
args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
|
|
|
|
return (0);
|
2010-07-27 17:31:03 +00:00
|
|
|
}
|
|
|
|
|
2010-07-23 18:58:27 +00:00
|
|
|
void
|
2005-01-29 23:12:00 +00:00
|
|
|
exec_free_args(struct image_args *args)
|
|
|
|
{
|
|
|
|
|
2010-07-23 18:58:27 +00:00
|
|
|
if (args->buf != NULL) {
|
2017-01-05 01:44:12 +00:00
|
|
|
exec_free_args_kva(args->bufkva);
|
2005-01-29 23:12:00 +00:00
|
|
|
args->buf = NULL;
|
|
|
|
}
|
2010-07-28 04:47:40 +00:00
|
|
|
if (args->fname_buf != NULL) {
|
|
|
|
free(args->fname_buf, M_TEMP);
|
|
|
|
args->fname_buf = NULL;
|
|
|
|
}
|
Implement CloudABI's exec() call.
Summary:
In a runtime that is purely based on capability-based security, there is
a strong emphasis on how programs start their execution. We need to make
sure that we execute an new program with an exact set of file
descriptors, ensuring that credentials are not leaked into the process
accidentally.
Providing the right file descriptors is just half the problem. There
also needs to be a framework in place that gives meaning to these file
descriptors. How does a CloudABI mail server know which of the file
descriptors corresponds to the socket that receives incoming emails?
Furthermore, how will this mail server acquire its configuration
parameters, as it cannot open a configuration file from a global path on
disk?
CloudABI solves this problem by replacing traditional string command
line arguments by tree-like data structure consisting of scalars,
sequences and mappings (similar to YAML/JSON). In this structure, file
descriptors are treated as a first-class citizen. When calling exec(),
file descriptors are passed on to the new executable if and only if they
are referenced from this tree structure. See the cloudabi-run(1) man
page for more details and examples (sysutils/cloudabi-utils).
Fortunately, the kernel does not need to care about this tree structure
at all. The C library is responsible for serializing and deserializing,
but also for extracting the list of referenced file descriptors. The
system call only receives a copy of the serialized data and a layout of
what the new file descriptor table should look like:
int proc_exec(int execfd, const void *data, size_t datalen, const int *fds,
size_t fdslen);
This change introduces a set of fd*_remapped() functions:
- fdcopy_remapped() pulls a copy of a file descriptor table, remapping
all of the file descriptors according to the provided mapping table.
- fdinstall_remapped() replaces the file descriptor table of the process
by the copy created by fdcopy_remapped().
- fdescfree_remapped() frees the table in case we aborted before
fdinstall_remapped().
We then add a function exec_copyin_data_fds() that builds on top these
functions. It copies in the data and constructs a new remapped file
descriptor. This is used by cloudabi_sys_proc_exec().
Test Plan:
cloudabi-run(1) is capable of spawning processes successfully, providing
it data and file descriptors. procstat -f seems to confirm all is good.
Regular FreeBSD processes also work properly.
Reviewers: kib, mjg
Reviewed By: mjg
Subscribers: imp
Differential Revision: https://reviews.freebsd.org/D3079
2015-07-16 07:05:42 +00:00
|
|
|
if (args->fdp != NULL)
|
|
|
|
fdescfree_remapped(args->fdp);
|
2005-01-29 23:12:00 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Copy strings out to the new process address space, constructing new arg
|
|
|
|
* and env vector tables. Return a pointer to the base so that it can be used
|
|
|
|
* as the initial stack pointer.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
1999-12-27 10:42:55 +00:00
|
|
|
register_t *
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_copyout_strings(struct image_params *imgp)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
|
|
|
int argc, envc;
|
|
|
|
char **vectp;
|
2014-03-19 12:35:04 +00:00
|
|
|
char *stringp;
|
|
|
|
uintptr_t destp;
|
1999-12-27 10:42:55 +00:00
|
|
|
register_t *stack_base;
|
1994-08-06 09:06:31 +00:00
|
|
|
struct ps_strings *arginfo;
|
2002-08-29 01:28:27 +00:00
|
|
|
struct proc *p;
|
2009-03-17 12:53:28 +00:00
|
|
|
size_t execpath_len;
|
2010-08-17 08:55:45 +00:00
|
|
|
int szsigcode, szps;
|
|
|
|
char canary[sizeof(long) * 8];
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2010-08-17 08:55:45 +00:00
|
|
|
szps = sizeof(pagesizes[0]) * MAXPAGESIZES;
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* Calculate string base and vector table pointers.
|
Mega-commit for Linux emulator update.. This has been stress tested under
netscape-2.0 for Linux running all the Java stuff. The scrollbars are now
working, at least on my machine. (whew! :-)
I'm uncomfortable with the size of this commit, but it's too
inter-dependant to easily seperate out.
The main changes:
COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386
machine dependent section into the linux emulator itself. The int 0x80
syscall code was almost identical to the lcall 7,0 code and a minor tweak
allows them to both be used with the same C code. All kernels can now
just modload the lkm and it'll DTRT without having to rebuild the kernel
first. Like IBCS2, you can statically compile it in with "options LINUX".
A pile of new syscalls implemented, including getdents(), llseek(),
readv(), writev(), msync(), personality(). The Linux-ELF libraries want
to use some of these.
linux_select() now obeys Linux semantics, ie: returns the time remaining
of the timeout value rather than leaving it the original value.
Quite a few bugs removed, including incorrect arguments being used in
syscalls.. eg: mixups between passing the sigset as an int, vs passing
it as a pointer and doing a copyin(), missing return values, unhandled
cases, SIOC* ioctls, etc.
The build for the code has changed. i386/conf/files now knows how
to build linux_genassym and generate linux_assym.h on the fly.
Supporting changes elsewhere in the kernel:
The user-mode signal trampoline has moved from the U area to immediately
below the top of the stack (below PS_STRINGS). This allows the different
binary emulations to have their own signal trampoline code (which gets rid
of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so
that the emulator can provide the exact "struct sigcontext *" argument to
the program's signal handlers.
The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which
have the same values as the re-used SA_DISABLE and SA_ONSTACK which are
intended for sigaction only. This enables the support of a SA_RESETHAND
flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal
semantics where the signal handler is reset when it's triggered.
makesyscalls.sh no longer appends the struct sysentvec on the end of the
generated init_sysent.c code. It's a lot saner to have it in a seperate
file rather than trying to update the structure inside the awk script. :-)
At exec time, the dozen bytes or so of signal trampoline code are copied
to the top of the user's stack, rather than obtaining the trampoline code
the old way by getting a clone of the parent's user area. This allows
Linux and native binaries to freely exec each other without getting
trampolines mixed up.
1996-03-02 19:38:20 +00:00
|
|
|
* Also deal with signal trampoline code for this exec type.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2009-03-17 12:53:28 +00:00
|
|
|
if (imgp->execpath != NULL && imgp->auxargs != NULL)
|
|
|
|
execpath_len = strlen(imgp->execpath) + 1;
|
|
|
|
else
|
|
|
|
execpath_len = 0;
|
2002-08-29 01:28:27 +00:00
|
|
|
p = imgp->proc;
|
|
|
|
szsigcode = 0;
|
2002-09-21 22:07:17 +00:00
|
|
|
arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
|
2011-01-08 16:13:44 +00:00
|
|
|
if (p->p_sysent->sv_sigcode_base == 0) {
|
|
|
|
if (p->p_sysent->sv_szsigcode != NULL)
|
|
|
|
szsigcode = *(p->p_sysent->sv_szsigcode);
|
|
|
|
}
|
2014-03-19 12:35:04 +00:00
|
|
|
destp = (uintptr_t)arginfo;
|
1995-12-09 04:29:11 +00:00
|
|
|
|
Mega-commit for Linux emulator update.. This has been stress tested under
netscape-2.0 for Linux running all the Java stuff. The scrollbars are now
working, at least on my machine. (whew! :-)
I'm uncomfortable with the size of this commit, but it's too
inter-dependant to easily seperate out.
The main changes:
COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386
machine dependent section into the linux emulator itself. The int 0x80
syscall code was almost identical to the lcall 7,0 code and a minor tweak
allows them to both be used with the same C code. All kernels can now
just modload the lkm and it'll DTRT without having to rebuild the kernel
first. Like IBCS2, you can statically compile it in with "options LINUX".
A pile of new syscalls implemented, including getdents(), llseek(),
readv(), writev(), msync(), personality(). The Linux-ELF libraries want
to use some of these.
linux_select() now obeys Linux semantics, ie: returns the time remaining
of the timeout value rather than leaving it the original value.
Quite a few bugs removed, including incorrect arguments being used in
syscalls.. eg: mixups between passing the sigset as an int, vs passing
it as a pointer and doing a copyin(), missing return values, unhandled
cases, SIOC* ioctls, etc.
The build for the code has changed. i386/conf/files now knows how
to build linux_genassym and generate linux_assym.h on the fly.
Supporting changes elsewhere in the kernel:
The user-mode signal trampoline has moved from the U area to immediately
below the top of the stack (below PS_STRINGS). This allows the different
binary emulations to have their own signal trampoline code (which gets rid
of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so
that the emulator can provide the exact "struct sigcontext *" argument to
the program's signal handlers.
The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which
have the same values as the re-used SA_DISABLE and SA_ONSTACK which are
intended for sigaction only. This enables the support of a SA_RESETHAND
flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal
semantics where the signal handler is reset when it's triggered.
makesyscalls.sh no longer appends the struct sysentvec on the end of the
generated init_sysent.c code. It's a lot saner to have it in a seperate
file rather than trying to update the structure inside the awk script. :-)
At exec time, the dozen bytes or so of signal trampoline code are copied
to the top of the user's stack, rather than obtaining the trampoline code
the old way by getting a clone of the parent's user area. This allows
Linux and native binaries to freely exec each other without getting
trampolines mixed up.
1996-03-02 19:38:20 +00:00
|
|
|
/*
|
|
|
|
* install sigcode
|
|
|
|
*/
|
2014-03-19 12:35:04 +00:00
|
|
|
if (szsigcode != 0) {
|
|
|
|
destp -= szsigcode;
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
|
|
|
copyout(p->p_sysent->sv_sigcode, (void *)destp, szsigcode);
|
|
|
|
}
|
Mega-commit for Linux emulator update.. This has been stress tested under
netscape-2.0 for Linux running all the Java stuff. The scrollbars are now
working, at least on my machine. (whew! :-)
I'm uncomfortable with the size of this commit, but it's too
inter-dependant to easily seperate out.
The main changes:
COMPAT_LINUX is *GONE*. Most of the code has been moved out of the i386
machine dependent section into the linux emulator itself. The int 0x80
syscall code was almost identical to the lcall 7,0 code and a minor tweak
allows them to both be used with the same C code. All kernels can now
just modload the lkm and it'll DTRT without having to rebuild the kernel
first. Like IBCS2, you can statically compile it in with "options LINUX".
A pile of new syscalls implemented, including getdents(), llseek(),
readv(), writev(), msync(), personality(). The Linux-ELF libraries want
to use some of these.
linux_select() now obeys Linux semantics, ie: returns the time remaining
of the timeout value rather than leaving it the original value.
Quite a few bugs removed, including incorrect arguments being used in
syscalls.. eg: mixups between passing the sigset as an int, vs passing
it as a pointer and doing a copyin(), missing return values, unhandled
cases, SIOC* ioctls, etc.
The build for the code has changed. i386/conf/files now knows how
to build linux_genassym and generate linux_assym.h on the fly.
Supporting changes elsewhere in the kernel:
The user-mode signal trampoline has moved from the U area to immediately
below the top of the stack (below PS_STRINGS). This allows the different
binary emulations to have their own signal trampoline code (which gets rid
of the hardwired syscall 103 (sigreturn on BSD, syslog on Linux)) and so
that the emulator can provide the exact "struct sigcontext *" argument to
the program's signal handlers.
The sigstack's "ss_flags" now uses SS_DISABLE and SS_ONSTACK flags, which
have the same values as the re-used SA_DISABLE and SA_ONSTACK which are
intended for sigaction only. This enables the support of a SA_RESETHAND
flag to sigaction to implement the gross SYSV and Linux SA_ONESHOT signal
semantics where the signal handler is reset when it's triggered.
makesyscalls.sh no longer appends the struct sysentvec on the end of the
generated init_sysent.c code. It's a lot saner to have it in a seperate
file rather than trying to update the structure inside the awk script. :-)
At exec time, the dozen bytes or so of signal trampoline code are copied
to the top of the user's stack, rather than obtaining the trampoline code
the old way by getting a clone of the parent's user area. This allows
Linux and native binaries to freely exec each other without getting
trampolines mixed up.
1996-03-02 19:38:20 +00:00
|
|
|
|
2009-03-17 12:53:28 +00:00
|
|
|
/*
|
|
|
|
* Copy the image path for the rtld.
|
|
|
|
*/
|
|
|
|
if (execpath_len != 0) {
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= execpath_len;
|
2018-07-13 11:32:27 +00:00
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
2014-03-19 12:35:04 +00:00
|
|
|
imgp->execpathp = destp;
|
|
|
|
copyout(imgp->execpath, (void *)destp, execpath_len);
|
2009-03-17 12:53:28 +00:00
|
|
|
}
|
|
|
|
|
2010-08-17 08:55:45 +00:00
|
|
|
/*
|
|
|
|
* Prepare the canary for SSP.
|
|
|
|
*/
|
|
|
|
arc4rand(canary, sizeof(canary), 0);
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= sizeof(canary);
|
|
|
|
imgp->canary = destp;
|
|
|
|
copyout(canary, (void *)destp, sizeof(canary));
|
2010-08-17 08:55:45 +00:00
|
|
|
imgp->canarylen = sizeof(canary);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare the pagesizes array.
|
|
|
|
*/
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= szps;
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
|
|
|
imgp->pagesizes = destp;
|
|
|
|
copyout(pagesizes, (void *)destp, szps);
|
2010-08-17 08:55:45 +00:00
|
|
|
imgp->pagesizeslen = szps;
|
|
|
|
|
2014-03-19 12:35:04 +00:00
|
|
|
destp -= ARG_MAX - imgp->args->stringspace;
|
|
|
|
destp = rounddown2(destp, sizeof(void *));
|
|
|
|
|
2018-04-19 16:00:34 +00:00
|
|
|
vectp = (char **)destp;
|
2000-09-26 05:09:21 +00:00
|
|
|
if (imgp->auxargs) {
|
|
|
|
/*
|
2018-04-19 16:00:34 +00:00
|
|
|
* Allocate room on the stack for the ELF auxargs
|
|
|
|
* array. It has up to AT_COUNT entries.
|
2000-09-26 05:09:21 +00:00
|
|
|
*/
|
2018-04-19 16:00:34 +00:00
|
|
|
vectp -= howmany(AT_COUNT * sizeof(Elf_Auxinfo),
|
|
|
|
sizeof(*vectp));
|
2005-01-29 23:12:00 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2018-04-19 16:00:34 +00:00
|
|
|
/*
|
|
|
|
* Allocate room for the argv[] and env vectors including the
|
|
|
|
* terminating NULL pointers.
|
|
|
|
*/
|
|
|
|
vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* vectp also becomes our initial stack base
|
|
|
|
*/
|
1999-12-27 10:42:55 +00:00
|
|
|
stack_base = (register_t *)vectp;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2005-01-29 23:12:00 +00:00
|
|
|
stringp = imgp->args->begin_argv;
|
|
|
|
argc = imgp->args->argc;
|
|
|
|
envc = imgp->args->envc;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1995-03-01 04:09:50 +00:00
|
|
|
/*
|
|
|
|
* Copy out strings - arguments and environment.
|
|
|
|
*/
|
2014-03-19 12:35:04 +00:00
|
|
|
copyout(stringp, (void *)destp, ARG_MAX - imgp->args->stringspace);
|
1995-03-01 04:09:50 +00:00
|
|
|
|
1994-08-06 09:06:31 +00:00
|
|
|
/*
|
|
|
|
* Fill in "ps_strings" struct for ps, w, etc.
|
|
|
|
*/
|
1998-07-15 06:19:33 +00:00
|
|
|
suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
|
2010-03-24 03:13:24 +00:00
|
|
|
suword32(&arginfo->ps_nargvstr, argc);
|
1994-08-06 09:06:31 +00:00
|
|
|
|
|
|
|
/*
|
1995-03-01 04:09:50 +00:00
|
|
|
* Fill in argument portion of vector table.
|
1994-08-06 09:06:31 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
for (; argc > 0; --argc) {
|
1998-07-15 06:19:33 +00:00
|
|
|
suword(vectp++, (long)(intptr_t)destp);
|
1995-03-01 04:09:50 +00:00
|
|
|
while (*stringp++ != 0)
|
|
|
|
destp++;
|
|
|
|
destp++;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
2001-02-06 11:21:58 +00:00
|
|
|
/* a null vector table pointer separates the argp's from the envp's */
|
1996-07-12 04:12:25 +00:00
|
|
|
suword(vectp++, 0);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1998-07-15 06:19:33 +00:00
|
|
|
suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
|
2010-03-24 03:13:24 +00:00
|
|
|
suword32(&arginfo->ps_nenvstr, envc);
|
1994-08-06 09:06:31 +00:00
|
|
|
|
|
|
|
/*
|
1995-03-01 04:09:50 +00:00
|
|
|
* Fill in environment portion of vector table.
|
1994-08-06 09:06:31 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
for (; envc > 0; --envc) {
|
1998-07-15 06:19:33 +00:00
|
|
|
suword(vectp++, (long)(intptr_t)destp);
|
1995-03-01 04:09:50 +00:00
|
|
|
while (*stringp++ != 0)
|
|
|
|
destp++;
|
|
|
|
destp++;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* end of vector table is a null pointer */
|
1996-07-12 04:12:25 +00:00
|
|
|
suword(vectp, 0);
|
1994-05-25 09:21:21 +00:00
|
|
|
|
|
|
|
return (stack_base);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1994-05-25 09:21:21 +00:00
|
|
|
* Check permissions of file to execute.
|
2000-11-30 21:06:05 +00:00
|
|
|
* Called with imgp->vp locked.
|
1994-05-25 09:21:21 +00:00
|
|
|
* Return 0 for success or error code on failure.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1998-03-02 05:47:58 +00:00
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_check_permissions(struct image_params *imgp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-11-06 12:52:37 +00:00
|
|
|
struct vnode *vp = imgp->vp;
|
|
|
|
struct vattr *attr = imgp->attr;
|
2002-02-27 18:32:23 +00:00
|
|
|
struct thread *td;
|
2012-11-02 13:56:36 +00:00
|
|
|
int error, writecount;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
2008-03-12 10:12:01 +00:00
|
|
|
td = curthread;
|
2002-08-01 14:31:58 +00:00
|
|
|
|
2003-01-21 03:26:28 +00:00
|
|
|
/* Get file attributes */
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, attr, td->td_ucred);
|
2003-01-21 03:26:28 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2002-08-01 14:31:58 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
|
2002-08-01 14:31:58 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
#endif
|
2010-08-30 16:30:18 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
2010-08-30 16:30:18 +00:00
|
|
|
* 1) Check if file execution is disabled for the filesystem that
|
|
|
|
* this file resides on.
|
|
|
|
* 2) Ensure that at least one execute bit is on. Otherwise, a
|
|
|
|
* privileged user will always succeed, and we don't want this
|
|
|
|
* to happen unless the file really is executable.
|
|
|
|
* 3) Ensure that the file is a regular file.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
1995-11-06 12:52:37 +00:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
|
2010-08-30 16:30:18 +00:00
|
|
|
(attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
|
2002-02-27 18:32:23 +00:00
|
|
|
(attr->va_type != VREG))
|
1994-05-25 09:21:21 +00:00
|
|
|
return (EACCES);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1994-05-25 09:21:21 +00:00
|
|
|
* Zero length files can't be exec'd
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
if (attr->va_size == 0)
|
|
|
|
return (ENOEXEC);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for execute permission to file based on current credentials.
|
|
|
|
*/
|
2002-02-27 18:32:23 +00:00
|
|
|
error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
|
1994-05-25 09:21:21 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
1997-04-04 04:17:11 +00:00
|
|
|
/*
|
|
|
|
* Check number of open-for-writes on the file and deny execution
|
|
|
|
* if there are any.
|
|
|
|
*/
|
2012-11-02 13:56:36 +00:00
|
|
|
error = VOP_GET_WRITECOUNT(vp, &writecount);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
if (writecount != 0)
|
1997-04-04 04:17:11 +00:00
|
|
|
return (ETXTBSY);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call filesystem specific open routine (which does nothing in the
|
|
|
|
* general case).
|
|
|
|
*/
|
2007-05-31 11:51:53 +00:00
|
|
|
error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
|
2008-07-17 16:44:07 +00:00
|
|
|
if (error == 0)
|
|
|
|
imgp->opened = 1;
|
2002-02-27 18:32:23 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-10-16 03:55:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Exec handler registration
|
|
|
|
*/
|
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_register(const struct execsw *execsw_arg)
|
1998-10-16 03:55:01 +00:00
|
|
|
{
|
|
|
|
const struct execsw **es, **xs, **newexecsw;
|
2018-01-22 02:08:10 +00:00
|
|
|
u_int count = 2; /* New slot and trailing NULL */
|
1998-10-16 03:55:01 +00:00
|
|
|
|
|
|
|
if (execsw)
|
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
count++;
|
2003-02-19 05:47:46 +00:00
|
|
|
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
|
1998-10-16 03:55:01 +00:00
|
|
|
xs = newexecsw;
|
|
|
|
if (execsw)
|
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
*xs++ = *es;
|
|
|
|
*xs++ = execsw_arg;
|
|
|
|
*xs = NULL;
|
|
|
|
if (execsw)
|
|
|
|
free(execsw, M_TEMP);
|
|
|
|
execsw = newexecsw;
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1998-10-16 03:55:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-05-17 00:34:34 +00:00
|
|
|
exec_unregister(const struct execsw *execsw_arg)
|
1998-10-16 03:55:01 +00:00
|
|
|
{
|
|
|
|
const struct execsw **es, **xs, **newexecsw;
|
|
|
|
int count = 1;
|
|
|
|
|
|
|
|
if (execsw == NULL)
|
|
|
|
panic("unregister with no handlers left?\n");
|
|
|
|
|
|
|
|
for (es = execsw; *es; es++) {
|
|
|
|
if (*es == execsw_arg)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (*es == NULL)
|
2002-08-24 22:01:40 +00:00
|
|
|
return (ENOENT);
|
1998-10-16 03:55:01 +00:00
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
if (*es != execsw_arg)
|
|
|
|
count++;
|
2003-02-19 05:47:46 +00:00
|
|
|
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
|
1998-10-16 03:55:01 +00:00
|
|
|
xs = newexecsw;
|
|
|
|
for (es = execsw; *es; es++)
|
|
|
|
if (*es != execsw_arg)
|
|
|
|
*xs++ = *es;
|
|
|
|
*xs = NULL;
|
|
|
|
if (execsw)
|
|
|
|
free(execsw, M_TEMP);
|
|
|
|
execsw = newexecsw;
|
2002-08-24 22:01:40 +00:00
|
|
|
return (0);
|
1998-10-16 03:55:01 +00:00
|
|
|
}
|