Add (but don't activate) code for a special VM option to make

downward growing stacks more general.
Add (but don't activate) code to use the new stack facility
when running threads, (specifically the linux threads support).
This allows people to use both linux compiled linuxthreads, and also the
native FreeBSD linux-threads port.

The code is conditional on VM_STACK. Not using this will
produce the old heavily tested system.

Submitted by: Richard Seaman <dick@tar.com>
This commit is contained in:
Julian Elischer 1999-01-06 23:05:42 +00:00
parent f6b387c28e
commit 2267af789e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=42360
17 changed files with 513 additions and 147 deletions

View File

@ -25,7 +25,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: linux_sysvec.c,v 1.41 1998/12/19 02:55:33 julian Exp $
* $Id: linux_sysvec.c,v 1.42 1998/12/19 19:05:57 sos Exp $
*/
/* XXX we use functions that might not exist. */
@ -50,10 +50,6 @@
#include <vm/vm_prot.h>
#include <vm/vm_page.h>
#include <vm/vm_extern.h>
#ifdef COMPAT_LINUX_THREADS
#include <sys/lock.h> /* needed, for now, by vm_map.h */
#include <vm/vm_map.h> /* needed, for now, for VM_STACK defines */
#endif /* COMPAT_LINUX_THREADS */
#include <sys/exec.h>
#include <sys/kernel.h>
#include <sys/module.h>
@ -221,24 +217,11 @@ linux_sendsig(sig_t catcher, int sig, int mask, u_long code)
* and the stack can not be grown. useracc will return FALSE
* if access is denied.
*/
#ifdef COMPAT_LINUX_THREADS
#ifdef USE_VM_STACK
#ifndef USE_VM_STACK_FOR_EXEC
if ((((caddr_t)fp > p->p_vmspace->vm_maxsaddr &&
(caddr_t)fp < (caddr_t)USRSTACK &&
grow(p, (int)fp) == FALSE) ||
(((caddr_t)fp <= p->p_vmspace->vm_maxsaddr ||
(caddr_t)fp >= (caddr_t)USRSTACK) &&
grow_stack (p, (int)fp) == FALSE)) ||
#else
#ifdef VM_STACK
if ((grow_stack (p, (int)fp) == FALSE) ||
#endif /* USE_VM_STACK_FOR_EXEC */
#else
if ((grow(p, (int)fp) == FALSE) ||
#endif /* USE_VM_STACK */
#else
if ((grow(p, (int)fp) == FALSE) ||
#endif /* COMPAT_LINUX_THREADS */
#endif
(useracc((caddr_t)fp, sizeof (struct linux_sigframe), B_WRITE) == FALSE)) {
/*
* Process has trashed its stack; give it an illegal

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.318 1998/12/10 01:49:01 steve Exp $
* $Id: machdep.c,v 1.319 1998/12/16 16:28:56 bde Exp $
*/
#include "apm.h"
@ -525,7 +525,11 @@ sendsig(catcher, sig, mask, code)
* and the stack can not be grown. useracc will return FALSE
* if access is denied.
*/
#ifdef VM_STACK
if ((grow_stack (p, (int)fp) == FALSE) ||
#else
if ((grow(p, (int)fp) == FALSE) ||
#endif
(useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) {
/*
* Process has trashed its stack; give it an illegal

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.131 1998/12/16 15:21:50 bde Exp $
* $Id: trap.c,v 1.132 1998/12/28 23:02:56 msmith Exp $
*/
/*
@ -665,6 +665,7 @@ trap_pfault(frame, usermode, eva)
/*
* Grow the stack if necessary
*/
#ifndef VM_STACK
if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
@ -673,6 +674,20 @@ trap_pfault(frame, usermode, eva)
}
}
#else
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (p, va)) {
rv = KERN_FAILURE;
--p->p_lock;
goto nogo;
}
#endif
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
@ -775,6 +790,7 @@ trap_pfault(frame, usermode, eva)
/*
* Grow the stack if necessary
*/
#ifndef VM_STACK
if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
@ -782,6 +798,19 @@ trap_pfault(frame, usermode, eva)
goto nogo;
}
}
#else
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (p, va)) {
rv = KERN_FAILURE;
--p->p_lock;
goto nogo;
}
#endif
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
@ -969,12 +998,19 @@ int trapwrite(addr)
++p->p_lock;
#ifndef VM_STACK
if ((caddr_t)va >= vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
--p->p_lock;
return (1);
}
}
#else
if (!grow_stack (p, va)) {
--p->p_lock;
return (1);
}
#endif
/*
* fault the data page

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.113 1998/10/31 17:21:30 peter Exp $
* $Id: vm_machdep.c,v 1.114 1998/12/16 15:21:51 bde Exp $
*/
#include "npx.h"
@ -507,6 +507,7 @@ cpu_reset_real()
while(1);
}
#ifndef VM_STACK
/*
* Grow the user stack to allow for 'sp'. This version grows the stack in
* chunks of SGROWSIZ.
@ -559,6 +560,22 @@ grow(p, sp)
return (1);
}
#else
int
grow_stack(p, sp)
struct proc *p;
u_int sp;
{
int rv;
rv = vm_map_growstack (p, sp);
if (rv != KERN_SUCCESS)
return (0);
return (1);
}
#endif
static int cnt_prezero;

View File

@ -25,7 +25,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: linux_misc.c,v 1.49 1998/12/24 21:21:20 julian Exp $
* $Id: linux_misc.c,v 1.50 1998/12/30 21:01:33 sos Exp $
*/
#include <sys/param.h>
@ -688,45 +688,44 @@ linux_mmap(struct proc *p, struct linux_mmap_args *args)
bsd_args.len = linux_args.len;
#else
/*#if !defined(USE_VM_STACK) && !defined(USE_VM_STACK_FOR_EXEC)*/
#ifndef VM_STACK
/* Linux Threads will map into the proc stack space, unless
we prevent it. This causes problems if we're not using
our VM_STACK options.
*/
* we prevent it. This causes problems if we're not using
* our VM_STACK options.
*/
if ((unsigned int)linux_args.addr + linux_args.len > (USRSTACK - MAXSSIZ))
return (EINVAL);
/*#endif*/
return (EINVAL);
#endif
if (linux_args.flags & LINUX_MAP_GROWSDOWN) {
#ifdef USE_VM_STACK
/* USE_VM_STACK is defined (or not) in vm/vm_map.h */
bsd_args.flags |= MAP_STACK;
#ifdef VM_STACK
bsd_args.flags |= MAP_STACK;
#endif
/* The linux MAP_GROWSDOWN option does not limit auto
growth of the region. Linux mmap with this option
takes as addr the inital BOS, and as len, the initial
region size. It can then grow down from addr without
limit. However, linux threads has an implicit internal
limit to stack size of STACK_SIZE. Its just not
enforced explicitly in linux. But, here we impose
a limit of (STACK_SIZE - GUARD_SIZE) on the stack
region, since we can do this with our mmap.
Our mmap with MAP_STACK takes addr as the maximum
downsize limit on BOS, and as len the max size of
the region. It them maps the top SGROWSIZ bytes,
and autgrows the region down, up to the limit
in addr.
If we don't use the MAP_STACK option, the effect
of this code is to allocate a stack region of a
fixed size of (STACK_SIZE - GUARD_SIZE).
*/
* growth of the region. Linux mmap with this option
* takes as addr the inital BOS, and as len, the initial
* region size. It can then grow down from addr without
* limit. However, linux threads has an implicit internal
* limit to stack size of STACK_SIZE. Its just not
* enforced explicitly in linux. But, here we impose
* a limit of (STACK_SIZE - GUARD_SIZE) on the stack
* region, since we can do this with our mmap.
*
* Our mmap with MAP_STACK takes addr as the maximum
* downsize limit on BOS, and as len the max size of
* the region. It them maps the top SGROWSIZ bytes,
* and autgrows the region down, up to the limit
* in addr.
*
* If we don't use the MAP_STACK option, the effect
* of this code is to allocate a stack region of a
* fixed size of (STACK_SIZE - GUARD_SIZE).
*/
/* This gives us TOS */
bsd_args.addr = linux_args.addr + linux_args.len;
bsd_args.addr = linux_args.addr + linux_args.len;
/* This gives us our maximum stack size */
if (linux_args.len > STACK_SIZE - GUARD_SIZE)
@ -735,15 +734,15 @@ linux_mmap(struct proc *p, struct linux_mmap_args *args)
bsd_args.len = STACK_SIZE - GUARD_SIZE;
/* This gives us a new BOS. If we're using VM_STACK, then
mmap will just map the top SGROWSIZ bytes, and let
the stack grow down to the limit at BOS. If we're
not using VM_STACK we map the full stack, since we
don't have a way to autogrow it.
*/
* mmap will just map the top SGROWSIZ bytes, and let
* the stack grow down to the limit at BOS. If we're
* not using VM_STACK we map the full stack, since we
* don't have a way to autogrow it.
*/
bsd_args.addr -= bsd_args.len;
} else {
bsd_args.addr = linux_args.addr;
bsd_args.addr = linux_args.addr;
bsd_args.len = linux_args.len;
}
#endif /* COMPAT_LINUX_THREADS */
@ -977,11 +976,11 @@ linux_waitpid(struct proc *p, struct linux_waitpid_args *args)
tmp.options = args->options;
#else
/* This filters out the linux option _WCLONE. I don't
think we need it, but I could be wrong. If we need
it, we need to fix wait4, since it will give us an
error return of EINVAL if we pass in _WCLONE, and
of course, it won't do anything with it.
*/
* think we need it, but I could be wrong. If we need
* it, we need to fix wait4, since it will give us an
* error return of EINVAL if we pass in _WCLONE, and
* of course, it won't do anything with it.
*/
tmp.options = (args->options & (WNOHANG | WUNTRACED));
#endif /* COMPAT_LINUX_THREADS */
tmp.rusage = NULL;
@ -990,7 +989,7 @@ linux_waitpid(struct proc *p, struct linux_waitpid_args *args)
#ifndef COMPAT_LINUX_THREADS
return error;
#else
return error;
return error;
#endif /* COMPAT_LINUX_THREADS */
if (args->status) {
if (error = copyin(args->status, &tmpstat, sizeof(int)))
@ -1028,11 +1027,11 @@ linux_wait4(struct proc *p, struct linux_wait4_args *args)
tmp.options = args->options;
#else
/* This filters out the linux option _WCLONE. I don't
think we need it, but I could be wrong. If we need
it, we need to fix wait4, since it will give us an
error return of EINVAL if we pass in _WCLONE, and
of course, it won't do anything with it.
*/
* think we need it, but I could be wrong. If we need
* it, we need to fix wait4, since it will give us an
* error return of EINVAL if we pass in _WCLONE, and
* of course, it won't do anything with it.
*/
tmp.options = (args->options & (WNOHANG | WUNTRACED));
#endif /* COMPAT_LINUX_THREADS */
tmp.rusage = args->rusage;

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.318 1998/12/10 01:49:01 steve Exp $
* $Id: machdep.c,v 1.319 1998/12/16 16:28:56 bde Exp $
*/
#include "apm.h"
@ -525,7 +525,11 @@ sendsig(catcher, sig, mask, code)
* and the stack can not be grown. useracc will return FALSE
* if access is denied.
*/
#ifdef VM_STACK
if ((grow_stack (p, (int)fp) == FALSE) ||
#else
if ((grow(p, (int)fp) == FALSE) ||
#endif
(useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) {
/*
* Process has trashed its stack; give it an illegal

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.131 1998/12/16 15:21:50 bde Exp $
* $Id: trap.c,v 1.132 1998/12/28 23:02:56 msmith Exp $
*/
/*
@ -665,6 +665,7 @@ trap_pfault(frame, usermode, eva)
/*
* Grow the stack if necessary
*/
#ifndef VM_STACK
if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
@ -673,6 +674,20 @@ trap_pfault(frame, usermode, eva)
}
}
#else
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (p, va)) {
rv = KERN_FAILURE;
--p->p_lock;
goto nogo;
}
#endif
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
@ -775,6 +790,7 @@ trap_pfault(frame, usermode, eva)
/*
* Grow the stack if necessary
*/
#ifndef VM_STACK
if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
@ -782,6 +798,19 @@ trap_pfault(frame, usermode, eva)
goto nogo;
}
}
#else
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (p, va)) {
rv = KERN_FAILURE;
--p->p_lock;
goto nogo;
}
#endif
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
@ -969,12 +998,19 @@ int trapwrite(addr)
++p->p_lock;
#ifndef VM_STACK
if ((caddr_t)va >= vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
--p->p_lock;
return (1);
}
}
#else
if (!grow_stack (p, va)) {
--p->p_lock;
return (1);
}
#endif
/*
* fault the data page

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.113 1998/10/31 17:21:30 peter Exp $
* $Id: vm_machdep.c,v 1.114 1998/12/16 15:21:51 bde Exp $
*/
#include "npx.h"
@ -507,6 +507,7 @@ cpu_reset_real()
while(1);
}
#ifndef VM_STACK
/*
* Grow the user stack to allow for 'sp'. This version grows the stack in
* chunks of SGROWSIZ.
@ -559,6 +560,22 @@ grow(p, sp)
return (1);
}
#else
int
grow_stack(p, sp)
struct proc *p;
u_int sp;
{
int rv;
rv = vm_map_growstack (p, sp);
if (rv != KERN_SUCCESS)
return (0);
return (1);
}
#endif
static int cnt_prezero;

View File

@ -25,7 +25,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: linux_misc.c,v 1.49 1998/12/24 21:21:20 julian Exp $
* $Id: linux_misc.c,v 1.50 1998/12/30 21:01:33 sos Exp $
*/
#include <sys/param.h>
@ -688,45 +688,44 @@ linux_mmap(struct proc *p, struct linux_mmap_args *args)
bsd_args.len = linux_args.len;
#else
/*#if !defined(USE_VM_STACK) && !defined(USE_VM_STACK_FOR_EXEC)*/
#ifndef VM_STACK
/* Linux Threads will map into the proc stack space, unless
we prevent it. This causes problems if we're not using
our VM_STACK options.
*/
* we prevent it. This causes problems if we're not using
* our VM_STACK options.
*/
if ((unsigned int)linux_args.addr + linux_args.len > (USRSTACK - MAXSSIZ))
return (EINVAL);
/*#endif*/
return (EINVAL);
#endif
if (linux_args.flags & LINUX_MAP_GROWSDOWN) {
#ifdef USE_VM_STACK
/* USE_VM_STACK is defined (or not) in vm/vm_map.h */
bsd_args.flags |= MAP_STACK;
#ifdef VM_STACK
bsd_args.flags |= MAP_STACK;
#endif
/* The linux MAP_GROWSDOWN option does not limit auto
growth of the region. Linux mmap with this option
takes as addr the inital BOS, and as len, the initial
region size. It can then grow down from addr without
limit. However, linux threads has an implicit internal
limit to stack size of STACK_SIZE. Its just not
enforced explicitly in linux. But, here we impose
a limit of (STACK_SIZE - GUARD_SIZE) on the stack
region, since we can do this with our mmap.
Our mmap with MAP_STACK takes addr as the maximum
downsize limit on BOS, and as len the max size of
the region. It them maps the top SGROWSIZ bytes,
and autgrows the region down, up to the limit
in addr.
If we don't use the MAP_STACK option, the effect
of this code is to allocate a stack region of a
fixed size of (STACK_SIZE - GUARD_SIZE).
*/
* growth of the region. Linux mmap with this option
* takes as addr the inital BOS, and as len, the initial
* region size. It can then grow down from addr without
* limit. However, linux threads has an implicit internal
* limit to stack size of STACK_SIZE. Its just not
* enforced explicitly in linux. But, here we impose
* a limit of (STACK_SIZE - GUARD_SIZE) on the stack
* region, since we can do this with our mmap.
*
* Our mmap with MAP_STACK takes addr as the maximum
* downsize limit on BOS, and as len the max size of
* the region. It them maps the top SGROWSIZ bytes,
* and autgrows the region down, up to the limit
* in addr.
*
* If we don't use the MAP_STACK option, the effect
* of this code is to allocate a stack region of a
* fixed size of (STACK_SIZE - GUARD_SIZE).
*/
/* This gives us TOS */
bsd_args.addr = linux_args.addr + linux_args.len;
bsd_args.addr = linux_args.addr + linux_args.len;
/* This gives us our maximum stack size */
if (linux_args.len > STACK_SIZE - GUARD_SIZE)
@ -735,15 +734,15 @@ linux_mmap(struct proc *p, struct linux_mmap_args *args)
bsd_args.len = STACK_SIZE - GUARD_SIZE;
/* This gives us a new BOS. If we're using VM_STACK, then
mmap will just map the top SGROWSIZ bytes, and let
the stack grow down to the limit at BOS. If we're
not using VM_STACK we map the full stack, since we
don't have a way to autogrow it.
*/
* mmap will just map the top SGROWSIZ bytes, and let
* the stack grow down to the limit at BOS. If we're
* not using VM_STACK we map the full stack, since we
* don't have a way to autogrow it.
*/
bsd_args.addr -= bsd_args.len;
} else {
bsd_args.addr = linux_args.addr;
bsd_args.addr = linux_args.addr;
bsd_args.len = linux_args.len;
}
#endif /* COMPAT_LINUX_THREADS */
@ -977,11 +976,11 @@ linux_waitpid(struct proc *p, struct linux_waitpid_args *args)
tmp.options = args->options;
#else
/* This filters out the linux option _WCLONE. I don't
think we need it, but I could be wrong. If we need
it, we need to fix wait4, since it will give us an
error return of EINVAL if we pass in _WCLONE, and
of course, it won't do anything with it.
*/
* think we need it, but I could be wrong. If we need
* it, we need to fix wait4, since it will give us an
* error return of EINVAL if we pass in _WCLONE, and
* of course, it won't do anything with it.
*/
tmp.options = (args->options & (WNOHANG | WUNTRACED));
#endif /* COMPAT_LINUX_THREADS */
tmp.rusage = NULL;
@ -990,7 +989,7 @@ linux_waitpid(struct proc *p, struct linux_waitpid_args *args)
#ifndef COMPAT_LINUX_THREADS
return error;
#else
return error;
return error;
#endif /* COMPAT_LINUX_THREADS */
if (args->status) {
if (error = copyin(args->status, &tmpstat, sizeof(int)))
@ -1028,11 +1027,11 @@ linux_wait4(struct proc *p, struct linux_wait4_args *args)
tmp.options = args->options;
#else
/* This filters out the linux option _WCLONE. I don't
think we need it, but I could be wrong. If we need
it, we need to fix wait4, since it will give us an
error return of EINVAL if we pass in _WCLONE, and
of course, it won't do anything with it.
*/
* think we need it, but I could be wrong. If we need
* it, we need to fix wait4, since it will give us an
* error return of EINVAL if we pass in _WCLONE, and
* of course, it won't do anything with it.
*/
tmp.options = (args->options & (WNOHANG | WUNTRACED));
#endif /* COMPAT_LINUX_THREADS */
tmp.rusage = args->rusage;

View File

@ -25,7 +25,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: linux_sysvec.c,v 1.41 1998/12/19 02:55:33 julian Exp $
* $Id: linux_sysvec.c,v 1.42 1998/12/19 19:05:57 sos Exp $
*/
/* XXX we use functions that might not exist. */
@ -50,10 +50,6 @@
#include <vm/vm_prot.h>
#include <vm/vm_page.h>
#include <vm/vm_extern.h>
#ifdef COMPAT_LINUX_THREADS
#include <sys/lock.h> /* needed, for now, by vm_map.h */
#include <vm/vm_map.h> /* needed, for now, for VM_STACK defines */
#endif /* COMPAT_LINUX_THREADS */
#include <sys/exec.h>
#include <sys/kernel.h>
#include <sys/module.h>
@ -221,24 +217,11 @@ linux_sendsig(sig_t catcher, int sig, int mask, u_long code)
* and the stack can not be grown. useracc will return FALSE
* if access is denied.
*/
#ifdef COMPAT_LINUX_THREADS
#ifdef USE_VM_STACK
#ifndef USE_VM_STACK_FOR_EXEC
if ((((caddr_t)fp > p->p_vmspace->vm_maxsaddr &&
(caddr_t)fp < (caddr_t)USRSTACK &&
grow(p, (int)fp) == FALSE) ||
(((caddr_t)fp <= p->p_vmspace->vm_maxsaddr ||
(caddr_t)fp >= (caddr_t)USRSTACK) &&
grow_stack (p, (int)fp) == FALSE)) ||
#else
#ifdef VM_STACK
if ((grow_stack (p, (int)fp) == FALSE) ||
#endif /* USE_VM_STACK_FOR_EXEC */
#else
if ((grow(p, (int)fp) == FALSE) ||
#endif /* USE_VM_STACK */
#else
if ((grow(p, (int)fp) == FALSE) ||
#endif /* COMPAT_LINUX_THREADS */
#endif
(useracc((caddr_t)fp, sizeof (struct linux_sigframe), B_WRITE) == FALSE)) {
/*
* Process has trashed its stack; give it an illegal

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: kern_exec.c,v 1.91 1998/12/27 18:03:29 dfr Exp $
* $Id: kern_exec.c,v 1.92 1998/12/30 10:38:59 dfr Exp $
*/
#include <sys/param.h>
@ -426,7 +426,11 @@ exec_new_vmspace(imgp)
{
int error;
struct vmspace *vmspace = imgp->proc->p_vmspace;
#ifdef VM_STACK
caddr_t stack_addr = (caddr_t) (USRSTACK - MAXSSIZ);
#else
caddr_t stack_addr = (caddr_t) (USRSTACK - SGROWSIZ);
#endif
vm_map_t map = &vmspace->vm_map;
imgp->vmspace_destroyed = 1;
@ -448,6 +452,19 @@ exec_new_vmspace(imgp)
}
/* Allocate a new stack */
#ifdef VM_STACK
error = vm_map_stack (&vmspace->vm_map, (vm_offset_t)stack_addr,
(vm_size_t)MAXSSIZ, VM_PROT_ALL, VM_PROT_ALL, 0);
if (error)
return (error);
/* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
* VM_STACK case, but they are still used to monitor the size of the
* process stack so we can check the stack rlimit.
*/
vmspace->vm_ssize = SGROWSIZ >> PAGE_SHIFT;
vmspace->vm_maxsaddr = (char *)USRSTACK - MAXSSIZ;
#else
error = vm_map_insert(&vmspace->vm_map, NULL, 0,
(vm_offset_t) stack_addr, (vm_offset_t) USRSTACK,
VM_PROT_ALL, VM_PROT_ALL, 0);
@ -458,6 +475,7 @@ exec_new_vmspace(imgp)
/* Initialize maximum stack address */
vmspace->vm_maxsaddr = (char *)USRSTACK - MAXSSIZ;
#endif
return(0);
}

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.131 1998/12/16 15:21:50 bde Exp $
* $Id: trap.c,v 1.132 1998/12/28 23:02:56 msmith Exp $
*/
/*
@ -665,6 +665,7 @@ trap_pfault(frame, usermode, eva)
/*
* Grow the stack if necessary
*/
#ifndef VM_STACK
if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
@ -673,6 +674,20 @@ trap_pfault(frame, usermode, eva)
}
}
#else
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (p, va)) {
rv = KERN_FAILURE;
--p->p_lock;
goto nogo;
}
#endif
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
@ -775,6 +790,7 @@ trap_pfault(frame, usermode, eva)
/*
* Grow the stack if necessary
*/
#ifndef VM_STACK
if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
rv = KERN_FAILURE;
@ -782,6 +798,19 @@ trap_pfault(frame, usermode, eva)
goto nogo;
}
}
#else
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (p, va)) {
rv = KERN_FAILURE;
--p->p_lock;
goto nogo;
}
#endif
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
@ -969,12 +998,19 @@ int trapwrite(addr)
++p->p_lock;
#ifndef VM_STACK
if ((caddr_t)va >= vm->vm_maxsaddr && va < USRSTACK) {
if (!grow(p, va)) {
--p->p_lock;
return (1);
}
}
#else
if (!grow_stack (p, va)) {
--p->p_lock;
return (1);
}
#endif
/*
* fault the data page

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)mman.h 8.2 (Berkeley) 1/9/95
* $Id: mman.h,v 1.22 1998/03/08 17:25:33 dufault Exp $
* $Id: mman.h,v 1.23 1998/03/28 11:50:38 dufault Exp $
*/
#ifndef _SYS_MMAN_H_
@ -64,6 +64,9 @@
#define MAP_INHERIT 0x0080 /* region is retained after exec */
#define MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */
#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */
#ifdef VM_STACK
#define MAP_STACK 0x0400 /* region grows down, like a stack */
#endif
#ifdef _P1003_1B_VISIBLE
/*

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
* $Id: vm_extern.h,v 1.37 1998/01/22 17:30:32 dyson Exp $
* $Id: vm_extern.h,v 1.38 1998/06/07 17:13:09 dfr Exp $
*/
#ifndef _VM_EXTERN_H_
@ -61,7 +61,11 @@ int swapon __P((struct proc *, void *, int *));
#endif
void faultin __P((struct proc *p));
#ifndef VM_STACK
int grow __P((struct proc *, size_t));
#else
int grow_stack __P((struct proc *, size_t));
#endif
int kernacc __P((caddr_t, int, int));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.137 1998/10/13 08:24:43 dg Exp $
* $Id: vm_map.c,v 1.138 1998/10/25 17:44:58 phk Exp $
*/
/*
@ -75,6 +75,9 @@
#include <sys/vmmeter.h>
#include <sys/mman.h>
#include <sys/vnode.h>
#ifdef VM_STACK
#include <sys/resourcevar.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -538,6 +541,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
new_entry->offset = offset;
#ifdef VM_STACK
new_entry->avail_ssize = 0;
#endif
if (object) {
if ((object->ref_count > 1) || (object->shadow_count != 0)) {
vm_object_clear_flag(object, OBJ_ONEMAPPING);
@ -570,6 +577,204 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
return (KERN_SUCCESS);
}
#ifdef VM_STACK
int
vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_prot_t prot, vm_prot_t max, int cow)
{
vm_map_entry_t prev_entry;
vm_map_entry_t new_stack_entry;
vm_size_t init_ssize;
int rv;
if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
return (KERN_NO_SPACE);
if (max_ssize < SGROWSIZ)
init_ssize = max_ssize;
else
init_ssize = SGROWSIZ;
vm_map_lock(map);
/* If addr is already mapped, no go */
if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
/* If we can't accomodate max_ssize in the current mapping,
* no go. However, we need to be aware that subsequent user
* mappings might map into the space we have reserved for
* stack, and currently this space is not protected.
*
* Hopefully we will at least detect this condition
* when we try to grow the stack.
*/
if ((prev_entry->next != &map->header) &&
(prev_entry->next->start < addrbos + max_ssize)) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
/* We initially map a stack of only init_ssize. We will
* grow as needed later. Since this is to be a grow
* down stack, we map at the top of the range.
*
* Note: we would normally expect prot and max to be
* VM_PROT_ALL, and cow to be 0. Possibly we should
* eliminate these as input parameters, and just
* pass these values here in the insert call.
*/
rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
addrbos + max_ssize, prot, max, cow);
/* Now set the avail_ssize amount */
if (rv == KERN_SUCCESS){
new_stack_entry = prev_entry->next;
if (new_stack_entry->end != addrbos + max_ssize ||
new_stack_entry->start != addrbos + max_ssize - init_ssize)
panic ("Bad entry start/end for new stack entry");
else
new_stack_entry->avail_ssize = max_ssize - init_ssize;
}
vm_map_unlock(map);
return (rv);
}
/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the
* desired address is already mapped, or if we successfully grow
* the stack. Also returns KERN_SUCCESS if addr is outside the
* stack range (this is strange, but preserves compatibility with
* the grow function in vm_machdep.c).
*/
int
vm_map_growstack (struct proc *p, vm_offset_t addr)
{
vm_map_entry_t prev_entry;
vm_map_entry_t stack_entry;
vm_map_entry_t new_stack_entry;
struct vmspace *vm = p->p_vmspace;
vm_map_t map = &vm->vm_map;
vm_offset_t end;
int grow_amount;
int rv;
int is_procstack = 0;
vm_map_lock(map);
/* If addr is already in the entry range, no need to grow.*/
if (vm_map_lookup_entry(map, addr, &prev_entry)) {
vm_map_unlock(map);
return (KERN_SUCCESS);
}
if ((stack_entry = prev_entry->next) == &map->header) {
vm_map_unlock(map);
return (KERN_SUCCESS);
}
if (prev_entry == &map->header)
end = stack_entry->start - stack_entry->avail_ssize;
else
end = prev_entry->end;
/* This next test mimics the old grow function in vm_machdep.c.
* It really doesn't quite make sense, but we do it anyway
* for compatibility.
*
* If not growable stack, return success. This signals the
* caller to proceed as he would normally with normal vm.
*/
if (stack_entry->avail_ssize < 1 ||
addr >= stack_entry->start ||
addr < stack_entry->start - stack_entry->avail_ssize) {
vm_map_unlock(map);
return (KERN_SUCCESS);
}
/* Find the minimum grow amount */
grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
if (grow_amount > stack_entry->avail_ssize) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
/* If there is no longer enough space between the entries
* nogo, and adjust the available space. Note: this
* should only happen if the user has mapped into the
* stack area after the stack was created, and is
* probably an error.
*
* This also effectively destroys any guard page the user
* might have intended by limiting the stack size.
*/
if (grow_amount > stack_entry->start - end) {
stack_entry->avail_ssize = stack_entry->start - end;
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
if (addr >= (vm_offset_t)vm->vm_maxsaddr)
is_procstack = 1;
/* If this is the main process stack, see if we're over the
* stack limit.
*/
if (is_procstack && (vm->vm_ssize + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
/* Round up the grow amount modulo SGROWSIZ */
grow_amount = roundup (grow_amount, SGROWSIZ);
if (grow_amount > stack_entry->avail_ssize) {
grow_amount = stack_entry->avail_ssize;
}
if (is_procstack && (vm->vm_ssize + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
vm->vm_ssize;
}
/* Get the preliminary new entry start value */
addr = stack_entry->start - grow_amount;
/* If this puts us into the previous entry, cut back our growth
* to the available space. Also, see the note above.
*/
if (addr < end) {
stack_entry->avail_ssize = stack_entry->start - end;
addr = end;
}
rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
stack_entry->protection,
stack_entry->max_protection,
0);
/* Adjust the available stack space by the amount we grew. */
if (rv == KERN_SUCCESS) {
new_stack_entry = prev_entry->next;
if (new_stack_entry->end != stack_entry->start ||
new_stack_entry->start != addr)
panic ("Bad stack grow start/end in new stack entry");
else {
new_stack_entry->avail_ssize = stack_entry->avail_ssize -
(new_stack_entry->end -
new_stack_entry->start);
vm->vm_ssize += new_stack_entry->end -
new_stack_entry->start;
}
}
vm_map_unlock(map);
return (rv);
}
#endif
/*
* Find sufficient space for `length' bytes in the given map, starting at
* `start'. The map must be locked. Returns 0 on success, 1 on no space.

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.h,v 1.31 1998/01/17 09:16:52 dyson Exp $
* $Id: vm_map.h,v 1.32 1998/01/22 17:30:38 dyson Exp $
*/
/*
@ -102,6 +102,9 @@ struct vm_map_entry {
struct vm_map_entry *next; /* next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
#ifdef VM_STACK
vm_offset_t avail_ssize; /* amt can grow if this is a stack */
#endif
union vm_map_object object; /* object I point to */
vm_ooffset_t offset; /* offset into object */
u_char eflags; /* map entry flags */
@ -335,6 +338,10 @@ void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
#ifdef VM_STACK
int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
#endif
#endif
#endif /* _VM_MAP_ */

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.84 1998/10/13 08:24:44 dg Exp $
* $Id: vm_mmap.c,v 1.85 1998/12/09 20:22:21 dt Exp $
*/
/*
@ -177,6 +177,15 @@ mmap(p, uap)
((flags & MAP_ANON) && uap->fd != -1))
return (EINVAL);
#ifdef VM_STACK
if (flags & MAP_STACK) {
if ((uap->fd != -1) ||
((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
return (EINVAL);
flags |= MAP_ANON;
pos = 0;
}
#endif
/*
* Align the file position to a page boundary,
* and save its page offset component.
@ -1016,6 +1025,12 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
*addr = pmap_addr_hint(object, *addr, size);
}
#ifdef VM_STACK
if (flags & MAP_STACK)
rv = vm_map_stack (map, *addr, size, prot,
maxprot, docow);
else
#endif
rv = vm_map_find(map, object, foff, addr, size, fitit,
prot, maxprot, docow);