Fix the gdb executable modify problem. Thanks to the detective work
by Alan Cox <alc@cs.rice.edu>, and his description of the problem. The bug was primarily in procfs_mem, but the mistake likely happened due to the lack of vm system support for the operation. I added better support for selective marking of page dirty flags so that vm_map_pageable(wiring) will not cause this problem again. The code in procfs_mem is now less bogus (but maybe still a little so.)
This commit is contained in:
parent
6bd5a8ae49
commit
22d3427970
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* $Id$
|
||||
* $Id: trap.c,v 1.88 1997/02/22 09:32:55 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -529,7 +529,8 @@ trap_pfault(frame, usermode)
|
||||
}
|
||||
|
||||
/* Fault in the user page: */
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
rv = vm_fault(map, va, ftype,
|
||||
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
|
||||
|
||||
--p->p_lock;
|
||||
} else {
|
||||
@ -630,14 +631,13 @@ trap_pfault(frame, usermode)
|
||||
}
|
||||
|
||||
/* Fault in the user page: */
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
rv = vm_fault(map, va, ftype,
|
||||
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
|
||||
|
||||
--p->p_lock;
|
||||
} else {
|
||||
/*
|
||||
* Since we know that kernel virtual address addresses
|
||||
* always have pte pages mapped, we just have to fault
|
||||
* the page.
|
||||
* Don't have to worry about process locking or stacks in the kernel.
|
||||
*/
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
}
|
||||
@ -808,7 +808,7 @@ int trapwrite(addr)
|
||||
/*
|
||||
* fault the data page
|
||||
*/
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_DIRTY);
|
||||
|
||||
--p->p_lock;
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
||||
*
|
||||
* @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
|
||||
*
|
||||
* $Id$
|
||||
* $Id: procfs_mem.c,v 1.23 1997/02/22 09:40:28 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -74,10 +74,11 @@ procfs_rwmem(p, uio)
|
||||
int error;
|
||||
int writing;
|
||||
struct vmspace *vm;
|
||||
int fix_prot = 0;
|
||||
vm_map_t map;
|
||||
vm_object_t object = NULL;
|
||||
vm_offset_t pageno = 0; /* page number */
|
||||
vm_prot_t reqprot;
|
||||
vm_offset_t kva;
|
||||
|
||||
/*
|
||||
* if the vmspace is in the midst of being deallocated or the
|
||||
@ -94,6 +95,9 @@ procfs_rwmem(p, uio)
|
||||
map = &vm->vm_map;
|
||||
|
||||
writing = uio->uio_rw == UIO_WRITE;
|
||||
reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ;
|
||||
|
||||
kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Only map in one page at a time. We don't have to, but it
|
||||
@ -101,7 +105,6 @@ procfs_rwmem(p, uio)
|
||||
*/
|
||||
do {
|
||||
vm_map_t tmap;
|
||||
vm_offset_t kva = 0;
|
||||
vm_offset_t uva;
|
||||
int page_offset; /* offset into page */
|
||||
vm_map_entry_t out_entry;
|
||||
@ -109,8 +112,8 @@ procfs_rwmem(p, uio)
|
||||
boolean_t wired, single_use;
|
||||
vm_pindex_t pindex;
|
||||
u_int len;
|
||||
vm_page_t m;
|
||||
|
||||
fix_prot = 0;
|
||||
object = NULL;
|
||||
|
||||
uva = (vm_offset_t) uio->uio_offset;
|
||||
@ -127,6 +130,8 @@ procfs_rwmem(p, uio)
|
||||
len = min(PAGE_SIZE - page_offset, uio->uio_resid);
|
||||
|
||||
if (uva >= VM_MAXUSER_ADDRESS) {
|
||||
vm_offset_t tkva;
|
||||
|
||||
if (writing || (uva >= (VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))) {
|
||||
error = 0;
|
||||
break;
|
||||
@ -148,10 +153,10 @@ procfs_rwmem(p, uio)
|
||||
fill_eproc (p, &p->p_addr->u_kproc.kp_eproc);
|
||||
|
||||
/* locate the in-core address */
|
||||
kva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS;
|
||||
tkva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS;
|
||||
|
||||
/* transfer it */
|
||||
error = uiomove((caddr_t)kva, len, uio);
|
||||
error = uiomove((caddr_t)tkva, len, uio);
|
||||
|
||||
/* let the pages go */
|
||||
PRELE(p);
|
||||
@ -160,33 +165,12 @@ procfs_rwmem(p, uio)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the permissions for the area we're interested
|
||||
* in.
|
||||
* Fault the page on behalf of the process
|
||||
*/
|
||||
if (writing) {
|
||||
fix_prot = !vm_map_check_protection(map, pageno,
|
||||
pageno + PAGE_SIZE, VM_PROT_WRITE);
|
||||
|
||||
if (fix_prot) {
|
||||
/*
|
||||
* If the page is not writable, we make it so.
|
||||
* XXX It is possible that a page may *not* be
|
||||
* read/executable, if a process changes that!
|
||||
* We will assume, for now, that a page is either
|
||||
* VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
|
||||
*/
|
||||
error = vm_map_protect(map, pageno,
|
||||
pageno + PAGE_SIZE, VM_PROT_ALL, 0);
|
||||
if (error) {
|
||||
/*
|
||||
* We don't have to undo something
|
||||
* that didn't work, so we clear the
|
||||
* flag.
|
||||
*/
|
||||
fix_prot = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
error = vm_fault(map, pageno, reqprot, FALSE);
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -196,20 +180,40 @@ procfs_rwmem(p, uio)
|
||||
* vm_map_lookup() can change the map argument.
|
||||
*/
|
||||
tmap = map;
|
||||
error = vm_map_lookup(&tmap, pageno,
|
||||
writing ? VM_PROT_WRITE : VM_PROT_READ,
|
||||
error = vm_map_lookup(&tmap, pageno, reqprot,
|
||||
&out_entry, &object, &pindex, &out_prot,
|
||||
&wired, &single_use);
|
||||
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
|
||||
/*
|
||||
* Make sure that there is no residue in 'object' from
|
||||
* an error return on vm_map_lookup.
|
||||
*/
|
||||
object = NULL;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
m = vm_page_lookup(object, pindex);
|
||||
if (m == NULL) {
|
||||
error = EFAULT;
|
||||
|
||||
/*
|
||||
* Make sure that there is no residue in 'object' from
|
||||
* an error return on vm_map_lookup.
|
||||
*/
|
||||
object = NULL;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wire the page into memory
|
||||
*/
|
||||
vm_page_wire(m);
|
||||
|
||||
/*
|
||||
* We're done with tmap now.
|
||||
* But reference the object first, so that we won't loose
|
||||
@ -218,66 +222,29 @@ procfs_rwmem(p, uio)
|
||||
vm_object_reference(object);
|
||||
vm_map_lookup_done(tmap, out_entry);
|
||||
|
||||
/*
|
||||
* Fault the page in...
|
||||
*/
|
||||
if (writing && object->backing_object) {
|
||||
error = vm_fault(map, pageno,
|
||||
VM_PROT_WRITE, FALSE);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Find space in kernel_map for the page we're interested in */
|
||||
error = vm_map_find(kernel_map, object,
|
||||
IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 1,
|
||||
VM_PROT_ALL, VM_PROT_ALL, 0);
|
||||
if (error) {
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the page we just found as pageable.
|
||||
*/
|
||||
error = vm_map_pageable(kernel_map, kva,
|
||||
kva + PAGE_SIZE, 0);
|
||||
if (error) {
|
||||
vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
|
||||
object = NULL;
|
||||
break;
|
||||
}
|
||||
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
/*
|
||||
* Now do the i/o move.
|
||||
*/
|
||||
error = uiomove((caddr_t)(kva + page_offset),
|
||||
len, uio);
|
||||
error = uiomove((caddr_t)(kva + page_offset), len, uio);
|
||||
|
||||
pmap_kremove(kva);
|
||||
|
||||
/*
|
||||
* vm_map_remove gets rid of the object reference, so
|
||||
* we need to get rid of our 'object' pointer if there
|
||||
* is subsequently an error.
|
||||
* release the page and the object
|
||||
*/
|
||||
vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
|
||||
vm_page_unwire(m);
|
||||
vm_object_deallocate(object);
|
||||
|
||||
object = NULL;
|
||||
|
||||
/*
|
||||
* Undo the protection 'damage'.
|
||||
*/
|
||||
if (fix_prot) {
|
||||
vm_map_protect(map, pageno, pageno + PAGE_SIZE,
|
||||
VM_PROT_READ|VM_PROT_EXECUTE, 0);
|
||||
fix_prot = 0;
|
||||
}
|
||||
} while (error == 0 && uio->uio_resid > 0);
|
||||
|
||||
if (object)
|
||||
vm_object_deallocate(object);
|
||||
|
||||
if (fix_prot)
|
||||
vm_map_protect(map, pageno, pageno + PAGE_SIZE,
|
||||
VM_PROT_READ|VM_PROT_EXECUTE, 0);
|
||||
|
||||
kmem_free(kernel_map, kva, PAGE_SIZE);
|
||||
vmspace_free(vm);
|
||||
return (error);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* $Id$
|
||||
* $Id: trap.c,v 1.88 1997/02/22 09:32:55 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -529,7 +529,8 @@ trap_pfault(frame, usermode)
|
||||
}
|
||||
|
||||
/* Fault in the user page: */
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
rv = vm_fault(map, va, ftype,
|
||||
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
|
||||
|
||||
--p->p_lock;
|
||||
} else {
|
||||
@ -630,14 +631,13 @@ trap_pfault(frame, usermode)
|
||||
}
|
||||
|
||||
/* Fault in the user page: */
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
rv = vm_fault(map, va, ftype,
|
||||
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
|
||||
|
||||
--p->p_lock;
|
||||
} else {
|
||||
/*
|
||||
* Since we know that kernel virtual address addresses
|
||||
* always have pte pages mapped, we just have to fault
|
||||
* the page.
|
||||
* Don't have to worry about process locking or stacks in the kernel.
|
||||
*/
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
}
|
||||
@ -808,7 +808,7 @@ int trapwrite(addr)
|
||||
/*
|
||||
* fault the data page
|
||||
*/
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_DIRTY);
|
||||
|
||||
--p->p_lock;
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* $Id$
|
||||
* $Id: trap.c,v 1.88 1997/02/22 09:32:55 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -529,7 +529,8 @@ trap_pfault(frame, usermode)
|
||||
}
|
||||
|
||||
/* Fault in the user page: */
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
rv = vm_fault(map, va, ftype,
|
||||
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
|
||||
|
||||
--p->p_lock;
|
||||
} else {
|
||||
@ -630,14 +631,13 @@ trap_pfault(frame, usermode)
|
||||
}
|
||||
|
||||
/* Fault in the user page: */
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
rv = vm_fault(map, va, ftype,
|
||||
(ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
|
||||
|
||||
--p->p_lock;
|
||||
} else {
|
||||
/*
|
||||
* Since we know that kernel virtual address addresses
|
||||
* always have pte pages mapped, we just have to fault
|
||||
* the page.
|
||||
* Don't have to worry about process locking or stacks in the kernel.
|
||||
*/
|
||||
rv = vm_fault(map, va, ftype, FALSE);
|
||||
}
|
||||
@ -808,7 +808,7 @@ int trapwrite(addr)
|
||||
/*
|
||||
* fault the data page
|
||||
*/
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_DIRTY);
|
||||
|
||||
--p->p_lock;
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
||||
*
|
||||
* @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
|
||||
*
|
||||
* $Id$
|
||||
* $Id: procfs_mem.c,v 1.23 1997/02/22 09:40:28 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -74,10 +74,11 @@ procfs_rwmem(p, uio)
|
||||
int error;
|
||||
int writing;
|
||||
struct vmspace *vm;
|
||||
int fix_prot = 0;
|
||||
vm_map_t map;
|
||||
vm_object_t object = NULL;
|
||||
vm_offset_t pageno = 0; /* page number */
|
||||
vm_prot_t reqprot;
|
||||
vm_offset_t kva;
|
||||
|
||||
/*
|
||||
* if the vmspace is in the midst of being deallocated or the
|
||||
@ -94,6 +95,9 @@ procfs_rwmem(p, uio)
|
||||
map = &vm->vm_map;
|
||||
|
||||
writing = uio->uio_rw == UIO_WRITE;
|
||||
reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ;
|
||||
|
||||
kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Only map in one page at a time. We don't have to, but it
|
||||
@ -101,7 +105,6 @@ procfs_rwmem(p, uio)
|
||||
*/
|
||||
do {
|
||||
vm_map_t tmap;
|
||||
vm_offset_t kva = 0;
|
||||
vm_offset_t uva;
|
||||
int page_offset; /* offset into page */
|
||||
vm_map_entry_t out_entry;
|
||||
@ -109,8 +112,8 @@ procfs_rwmem(p, uio)
|
||||
boolean_t wired, single_use;
|
||||
vm_pindex_t pindex;
|
||||
u_int len;
|
||||
vm_page_t m;
|
||||
|
||||
fix_prot = 0;
|
||||
object = NULL;
|
||||
|
||||
uva = (vm_offset_t) uio->uio_offset;
|
||||
@ -127,6 +130,8 @@ procfs_rwmem(p, uio)
|
||||
len = min(PAGE_SIZE - page_offset, uio->uio_resid);
|
||||
|
||||
if (uva >= VM_MAXUSER_ADDRESS) {
|
||||
vm_offset_t tkva;
|
||||
|
||||
if (writing || (uva >= (VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))) {
|
||||
error = 0;
|
||||
break;
|
||||
@ -148,10 +153,10 @@ procfs_rwmem(p, uio)
|
||||
fill_eproc (p, &p->p_addr->u_kproc.kp_eproc);
|
||||
|
||||
/* locate the in-core address */
|
||||
kva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS;
|
||||
tkva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS;
|
||||
|
||||
/* transfer it */
|
||||
error = uiomove((caddr_t)kva, len, uio);
|
||||
error = uiomove((caddr_t)tkva, len, uio);
|
||||
|
||||
/* let the pages go */
|
||||
PRELE(p);
|
||||
@ -160,33 +165,12 @@ procfs_rwmem(p, uio)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the permissions for the area we're interested
|
||||
* in.
|
||||
* Fault the page on behalf of the process
|
||||
*/
|
||||
if (writing) {
|
||||
fix_prot = !vm_map_check_protection(map, pageno,
|
||||
pageno + PAGE_SIZE, VM_PROT_WRITE);
|
||||
|
||||
if (fix_prot) {
|
||||
/*
|
||||
* If the page is not writable, we make it so.
|
||||
* XXX It is possible that a page may *not* be
|
||||
* read/executable, if a process changes that!
|
||||
* We will assume, for now, that a page is either
|
||||
* VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
|
||||
*/
|
||||
error = vm_map_protect(map, pageno,
|
||||
pageno + PAGE_SIZE, VM_PROT_ALL, 0);
|
||||
if (error) {
|
||||
/*
|
||||
* We don't have to undo something
|
||||
* that didn't work, so we clear the
|
||||
* flag.
|
||||
*/
|
||||
fix_prot = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
error = vm_fault(map, pageno, reqprot, FALSE);
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -196,20 +180,40 @@ procfs_rwmem(p, uio)
|
||||
* vm_map_lookup() can change the map argument.
|
||||
*/
|
||||
tmap = map;
|
||||
error = vm_map_lookup(&tmap, pageno,
|
||||
writing ? VM_PROT_WRITE : VM_PROT_READ,
|
||||
error = vm_map_lookup(&tmap, pageno, reqprot,
|
||||
&out_entry, &object, &pindex, &out_prot,
|
||||
&wired, &single_use);
|
||||
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
|
||||
/*
|
||||
* Make sure that there is no residue in 'object' from
|
||||
* an error return on vm_map_lookup.
|
||||
*/
|
||||
object = NULL;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
m = vm_page_lookup(object, pindex);
|
||||
if (m == NULL) {
|
||||
error = EFAULT;
|
||||
|
||||
/*
|
||||
* Make sure that there is no residue in 'object' from
|
||||
* an error return on vm_map_lookup.
|
||||
*/
|
||||
object = NULL;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wire the page into memory
|
||||
*/
|
||||
vm_page_wire(m);
|
||||
|
||||
/*
|
||||
* We're done with tmap now.
|
||||
* But reference the object first, so that we won't loose
|
||||
@ -218,66 +222,29 @@ procfs_rwmem(p, uio)
|
||||
vm_object_reference(object);
|
||||
vm_map_lookup_done(tmap, out_entry);
|
||||
|
||||
/*
|
||||
* Fault the page in...
|
||||
*/
|
||||
if (writing && object->backing_object) {
|
||||
error = vm_fault(map, pageno,
|
||||
VM_PROT_WRITE, FALSE);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Find space in kernel_map for the page we're interested in */
|
||||
error = vm_map_find(kernel_map, object,
|
||||
IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 1,
|
||||
VM_PROT_ALL, VM_PROT_ALL, 0);
|
||||
if (error) {
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the page we just found as pageable.
|
||||
*/
|
||||
error = vm_map_pageable(kernel_map, kva,
|
||||
kva + PAGE_SIZE, 0);
|
||||
if (error) {
|
||||
vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
|
||||
object = NULL;
|
||||
break;
|
||||
}
|
||||
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
/*
|
||||
* Now do the i/o move.
|
||||
*/
|
||||
error = uiomove((caddr_t)(kva + page_offset),
|
||||
len, uio);
|
||||
error = uiomove((caddr_t)(kva + page_offset), len, uio);
|
||||
|
||||
pmap_kremove(kva);
|
||||
|
||||
/*
|
||||
* vm_map_remove gets rid of the object reference, so
|
||||
* we need to get rid of our 'object' pointer if there
|
||||
* is subsequently an error.
|
||||
* release the page and the object
|
||||
*/
|
||||
vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
|
||||
vm_page_unwire(m);
|
||||
vm_object_deallocate(object);
|
||||
|
||||
object = NULL;
|
||||
|
||||
/*
|
||||
* Undo the protection 'damage'.
|
||||
*/
|
||||
if (fix_prot) {
|
||||
vm_map_protect(map, pageno, pageno + PAGE_SIZE,
|
||||
VM_PROT_READ|VM_PROT_EXECUTE, 0);
|
||||
fix_prot = 0;
|
||||
}
|
||||
} while (error == 0 && uio->uio_resid > 0);
|
||||
|
||||
if (object)
|
||||
vm_object_deallocate(object);
|
||||
|
||||
if (fix_prot)
|
||||
vm_map_protect(map, pageno, pageno + PAGE_SIZE,
|
||||
VM_PROT_READ|VM_PROT_EXECUTE, 0);
|
||||
|
||||
kmem_free(kernel_map, kva, PAGE_SIZE);
|
||||
vmspace_free(vm);
|
||||
return (error);
|
||||
}
|
||||
|
@ -66,7 +66,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_fault.c,v 1.66 1997/02/22 09:48:15 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -123,11 +123,11 @@ int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *));
|
||||
* Caller may hold no locks.
|
||||
*/
|
||||
int
|
||||
vm_fault(map, vaddr, fault_type, change_wiring)
|
||||
vm_fault(map, vaddr, fault_type, fault_flags)
|
||||
vm_map_t map;
|
||||
vm_offset_t vaddr;
|
||||
vm_prot_t fault_type;
|
||||
boolean_t change_wiring;
|
||||
int fault_flags;
|
||||
{
|
||||
vm_object_t first_object;
|
||||
vm_pindex_t first_pindex;
|
||||
@ -210,7 +210,7 @@ RetryFault:;
|
||||
* to COW .text. We simply keep .text from ever being COW'ed
|
||||
* and take the heat that one cannot debug wired .text sections.
|
||||
*/
|
||||
if ((change_wiring == VM_FAULT_USER_WIRE) && (entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
|
||||
if (((fault_flags & VM_FAULT_WIRE_MASK) == VM_FAULT_USER_WIRE) && (entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
|
||||
if(entry->protection & VM_PROT_WRITE) {
|
||||
int tresult;
|
||||
vm_map_lookup_done(map, entry);
|
||||
@ -333,7 +333,7 @@ RetryFault:;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (((object->type != OBJT_DEFAULT) && (!change_wiring || wired))
|
||||
if (((object->type != OBJT_DEFAULT) && (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
|
||||
|| (object == first_object)) {
|
||||
|
||||
if (pindex >= object->size) {
|
||||
@ -354,7 +354,7 @@ RetryFault:;
|
||||
}
|
||||
}
|
||||
readrest:
|
||||
if (object->type != OBJT_DEFAULT && (!change_wiring || wired)) {
|
||||
if (object->type != OBJT_DEFAULT && (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) {
|
||||
int rv;
|
||||
int faultcount;
|
||||
int reqpage;
|
||||
@ -788,7 +788,7 @@ RetryFault:;
|
||||
* written NOW. This will save on the pmap_is_modified() calls
|
||||
* later.
|
||||
*/
|
||||
if (fault_type & VM_PROT_WRITE) {
|
||||
if (fault_flags & VM_FAULT_DIRTY) {
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
}
|
||||
@ -798,16 +798,18 @@ RetryFault:;
|
||||
m->flags &= ~PG_ZERO;
|
||||
|
||||
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
|
||||
if ((change_wiring == 0) && (wired == 0))
|
||||
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0))
|
||||
pmap_prefault(map->pmap, vaddr, entry, first_object);
|
||||
|
||||
m->flags |= PG_MAPPED|PG_REFERENCED;
|
||||
if (fault_flags & VM_FAULT_HOLD)
|
||||
vm_page_hold(m);
|
||||
|
||||
/*
|
||||
* If the page is not wired down, then put it where the pageout daemon
|
||||
* can find it.
|
||||
*/
|
||||
if (change_wiring) {
|
||||
if (fault_flags & VM_FAULT_WIRE_MASK) {
|
||||
if (wired)
|
||||
vm_page_wire(m);
|
||||
else
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_map.c,v 1.72 1997/02/22 09:48:23 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -2323,13 +2323,21 @@ RetryLookup:;
|
||||
vm_map_unlock_read(old_map);
|
||||
goto RetryLookup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether this task is allowed to have this page.
|
||||
* Note the special case for MAP_ENTRY_COW
|
||||
* pages with an override. This is to implement a forced
|
||||
* COW for debuggers.
|
||||
*/
|
||||
|
||||
prot = entry->protection;
|
||||
if ((fault_type & (prot)) != fault_type)
|
||||
RETURN(KERN_PROTECTION_FAILURE);
|
||||
if ((fault_type & VM_PROT_OVERRIDE_WRITE) == 0 ||
|
||||
(entry->eflags & MAP_ENTRY_COW) == 0 ||
|
||||
(entry->wired_count != 0)) {
|
||||
if ((fault_type & (prot)) != fault_type)
|
||||
RETURN(KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this page is not pageable, we have to get it for all possible
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_map.h,v 1.24 1997/02/22 09:48:24 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -243,9 +243,12 @@ typedef struct {
|
||||
/*
|
||||
* vm_fault option flags
|
||||
*/
|
||||
#define VM_FAULT_NORMAL 0
|
||||
#define VM_FAULT_CHANGE_WIRING 1
|
||||
#define VM_FAULT_USER_WIRE 2
|
||||
#define VM_FAULT_NORMAL 0 /* Nothing special */
|
||||
#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
|
||||
#define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
|
||||
#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
|
||||
#define VM_FAULT_HOLD 4 /* Hold the page */
|
||||
#define VM_FAULT_DIRTY 8 /* Dirty the page */
|
||||
|
||||
#ifdef KERNEL
|
||||
extern vm_offset_t kentry_data;
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_prot.h,v 1.7 1997/02/22 09:48:38 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -80,6 +80,7 @@
|
||||
#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */
|
||||
#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */
|
||||
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */
|
||||
#define VM_PROT_OVERRIDE_WRITE ((vm_prot_t) 0x08) /* write, overriding permission for COW */
|
||||
|
||||
/*
|
||||
* The default protection for newly-created virtual memory
|
||||
|
Loading…
Reference in New Issue
Block a user