Nuke #if 0'ed "setredzone()" stub. We never used it, and probably
never will. I've implemented an optional redzone as part of the KSE upage breakup.
This commit is contained in:
parent
01706d206f
commit
b53f9c45f9
@ -292,23 +292,6 @@ cpu_coredump(p, vp, cred)
|
||||
p));
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
setredzone(pte, vaddr)
|
||||
u_short *pte;
|
||||
caddr_t vaddr;
|
||||
{
|
||||
/* eventually do this by setting up an expand-down stack segment
|
||||
for ss0: selector, allowing stack access down to top of u.
|
||||
this means though that protection violations need to be handled
|
||||
thru a double fault exception that must do an integral task
|
||||
switch to a known good context, within which a dump can be
|
||||
taken. a sensible scheme might be to save the initial context
|
||||
used by sched (that has physical memory mapped 1:1 at bottom)
|
||||
and take the dump while still in mapped mode */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Map an IO request into kernel virtual address space.
|
||||
*
|
||||
|
@ -333,23 +333,6 @@ cpu_coredump(p, vp, cred)
|
||||
return error;
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
setredzone(pte, vaddr)
|
||||
u_short *pte;
|
||||
caddr_t vaddr;
|
||||
{
|
||||
/* eventually do this by setting up an expand-down stack segment
|
||||
for ss0: selector, allowing stack access down to top of u.
|
||||
this means though that protection violations need to be handled
|
||||
thru a double fault exception that must do an integral task
|
||||
switch to a known good context, within which a dump can be
|
||||
taken. a sensible scheme might be to save the initial context
|
||||
used by sched (that has physical memory mapped 1:1 at bottom)
|
||||
and take the dump while still in mapped mode */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Convert kernel VA to physical address
|
||||
*/
|
||||
|
@ -333,23 +333,6 @@ cpu_coredump(p, vp, cred)
|
||||
return error;
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
setredzone(pte, vaddr)
|
||||
u_short *pte;
|
||||
caddr_t vaddr;
|
||||
{
|
||||
/* eventually do this by setting up an expand-down stack segment
|
||||
for ss0: selector, allowing stack access down to top of u.
|
||||
this means though that protection violations need to be handled
|
||||
thru a double fault exception that must do an integral task
|
||||
switch to a known good context, within which a dump can be
|
||||
taken. a sensible scheme might be to save the initial context
|
||||
used by sched (that has physical memory mapped 1:1 at bottom)
|
||||
and take the dump while still in mapped mode */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Convert kernel VA to physical address
|
||||
*/
|
||||
|
@ -335,23 +335,6 @@ cpu_coredump(p, vp, cred)
|
||||
p));
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
setredzone(pte, vaddr)
|
||||
u_short *pte;
|
||||
caddr_t vaddr;
|
||||
{
|
||||
/* eventually do this by setting up an expand-down stack segment
|
||||
for ss0: selector, allowing stack access down to top of u.
|
||||
this means though that protection violations need to be handled
|
||||
thru a double fault exception that must do an integral task
|
||||
switch to a known good context, within which a dump can be
|
||||
taken. a sensible scheme might be to save the initial context
|
||||
used by sched (that has physical memory mapped 1:1 at bottom)
|
||||
and take the dump while still in mapped mode */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Map an IO request into kernel virtual address space.
|
||||
*
|
||||
|
@ -208,23 +208,6 @@ cpu_coredump(p, vp, cred)
|
||||
p));
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
setredzone(pte, vaddr)
|
||||
u_short *pte;
|
||||
caddr_t vaddr;
|
||||
{
|
||||
/* eventually do this by setting up an expand-down stack segment
|
||||
for ss0: selector, allowing stack access down to top of u.
|
||||
this means though that protection violations need to be handled
|
||||
thru a double fault exception that must do an integral task
|
||||
switch to a known good context, within which a dump can be
|
||||
taken. a sensible scheme might be to save the initial context
|
||||
used by sched (that has physical memory mapped 1:1 at bottom)
|
||||
and take the dump while still in mapped mode */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Map an IO request into kernel virtual address space.
|
||||
*
|
||||
|
@ -208,23 +208,6 @@ cpu_coredump(p, vp, cred)
|
||||
p));
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
setredzone(pte, vaddr)
|
||||
u_short *pte;
|
||||
caddr_t vaddr;
|
||||
{
|
||||
/* eventually do this by setting up an expand-down stack segment
|
||||
for ss0: selector, allowing stack access down to top of u.
|
||||
this means though that protection violations need to be handled
|
||||
thru a double fault exception that must do an integral task
|
||||
switch to a known good context, within which a dump can be
|
||||
taken. a sensible scheme might be to save the initial context
|
||||
used by sched (that has physical memory mapped 1:1 at bottom)
|
||||
and take the dump while still in mapped mode */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Map an IO request into kernel virtual address space.
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user