GC an alternate trap_pfault() which has rotted away behind an "#ifdef notyet"

since 21-Mar-95 .
This commit is contained in:
Poul-Henning Kamp 2001-12-30 19:43:59 +00:00
parent 230ce3668b
commit e42d6981f2
2 changed files with 0 additions and 236 deletions

View File

@ -652,124 +652,6 @@ trap(frame)
return;
}
#ifdef notyet
/*
* This version doesn't allow a page fault to user space while
* in the kernel. The rest of the kernel needs to be made "safe"
* before this can be used. I think the only things remaining
* to be made safe are the iBCS2 code and the process tracing/
* debugging code.
*/
static int
trap_pfault(frame, usermode, eva)
struct trapframe *frame;
int usermode;
vm_offset_t eva;
{
vm_offset_t va;
struct vmspace *vm = NULL;
vm_map_t map = 0;
int rv = 0;
vm_prot_t ftype;
struct proc *p = curproc;
if (frame->tf_err & PGEX_W)
ftype = VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
va = trunc_page(eva);
if (va < VM_MIN_KERNEL_ADDRESS) {
vm_offset_t v;
vm_page_t mpte;
if (p == NULL ||
(!usermode && va < VM_MAXUSER_ADDRESS &&
(td->td_intr_nesting_level != 0 ||
PCPU_GET(curpcb) == NULL ||
PCPU_GET(curpcb)->pcb_onfault == NULL))) {
trap_fatal(frame, eva);
return (-1);
}
/*
* This is a fault on non-kernel virtual memory.
* vm is initialized above to NULL. If curproc is NULL
* or curproc->p_vmspace is NULL the fault is fatal.
*/
vm = p->p_vmspace;
if (vm == NULL)
goto nogo;
mtx_lock(&Giant);
map = &vm->vm_map;
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
/*
* Grow the stack if necessary
*/
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (td, va))
rv = KERN_FAILURE;
else
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
: VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
} else {
/*
* Don't allow user-mode faults in kernel address space.
*/
if (usermode)
goto nogo;
mtx_lock(&Giant);
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
* the page.
*/
rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
}
mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
return (0);
nogo:
if (!usermode) {
if (td->td_intr_nesting_level == 0 &&
PCPU_GET(curpcb) != NULL &&
PCPU_GET(curpcb)->pcb_onfault != NULL) {
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
return (0);
}
trap_fatal(frame, eva);
return (-1);
}
/* kludge to pass faulting virtual address to sendsig */
frame->tf_err = eva;
return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
}
#endif
int
trap_pfault(frame, usermode, eva)
struct trapframe *frame;

View File

@ -652,124 +652,6 @@ trap(frame)
return;
}
#ifdef notyet
/*
* This version doesn't allow a page fault to user space while
* in the kernel. The rest of the kernel needs to be made "safe"
* before this can be used. I think the only things remaining
* to be made safe are the iBCS2 code and the process tracing/
* debugging code.
*/
static int
trap_pfault(frame, usermode, eva)
struct trapframe *frame;
int usermode;
vm_offset_t eva;
{
vm_offset_t va;
struct vmspace *vm = NULL;
vm_map_t map = 0;
int rv = 0;
vm_prot_t ftype;
struct proc *p = curproc;
if (frame->tf_err & PGEX_W)
ftype = VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
va = trunc_page(eva);
if (va < VM_MIN_KERNEL_ADDRESS) {
vm_offset_t v;
vm_page_t mpte;
if (p == NULL ||
(!usermode && va < VM_MAXUSER_ADDRESS &&
(td->td_intr_nesting_level != 0 ||
PCPU_GET(curpcb) == NULL ||
PCPU_GET(curpcb)->pcb_onfault == NULL))) {
trap_fatal(frame, eva);
return (-1);
}
/*
* This is a fault on non-kernel virtual memory.
* vm is initialized above to NULL. If curproc is NULL
* or curproc->p_vmspace is NULL the fault is fatal.
*/
vm = p->p_vmspace;
if (vm == NULL)
goto nogo;
mtx_lock(&Giant);
map = &vm->vm_map;
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
/*
* Grow the stack if necessary
*/
/* grow_stack returns false only if va falls into
* a growable stack region and the stack growth
* fails. It returns true if va was not within
* a growable stack region, or if the stack
* growth succeeded.
*/
if (!grow_stack (td, va))
rv = KERN_FAILURE;
else
/* Fault in the user page: */
rv = vm_fault(map, va, ftype,
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
: VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
} else {
/*
* Don't allow user-mode faults in kernel address space.
*/
if (usermode)
goto nogo;
mtx_lock(&Giant);
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
* the page.
*/
rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
}
mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
return (0);
nogo:
if (!usermode) {
if (td->td_intr_nesting_level == 0 &&
PCPU_GET(curpcb) != NULL &&
PCPU_GET(curpcb)->pcb_onfault != NULL) {
frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
return (0);
}
trap_fatal(frame, eva);
return (-1);
}
/* kludge to pass faulting virtual address to sendsig */
frame->tf_err = eva;
return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
}
#endif
int
trap_pfault(frame, usermode, eva)
struct trapframe *frame;