Replace VM_PROT_OVERRIDE_WRITE by VM_PROT_COPY. VM_PROT_OVERRIDE_WRITE has

represented a write access that is allowed to override write protection.
Until now, VM_PROT_OVERRIDE_WRITE has been used to write breakpoints into
text pages.  Text pages are not just write protected but they are also
copy-on-write.  VM_PROT_OVERRIDE_WRITE overrides the write protection on the
text page and triggers the replication of the page so that the breakpoint
will be written to a private copy.  However, here is where things become
confused.  It is the debugger, not the process being debugged that requires
write access to the copied page.  Nonetheless, the copied page is being
mapped into the process with write access enabled.  In other words, once the
debugger sets a breakpoint within a text page, the program can write to its
private copy of that text page.  Whereas prior to setting the breakpoint, a
SIGSEGV would have occurred upon a write access.  VM_PROT_COPY addresses
this problem.  The combination of VM_PROT_READ and VM_PROT_COPY forces the
replication of a copy-on-write page even though the access is only for read.
Moreover, the replicated page is only mapped into the process with read
access, and not write access.

Reviewed by:	kib
MFC after:	4 weeks
This commit is contained in:
Alan Cox 2009-11-26 05:16:07 +00:00
parent ce8ad96abe
commit a6d42a0d62
4 changed files with 22 additions and 32 deletions

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_param.h>
#ifdef COMPAT_IA32
@ -213,10 +214,10 @@ int
proc_rwmem(struct proc *p, struct uio *uio)
{
vm_map_t map;
vm_object_t backing_object, object = NULL;
vm_offset_t pageno = 0; /* page number */
vm_object_t backing_object, object;
vm_offset_t pageno; /* page number */
vm_prot_t reqprot;
int error, fault_flags, writing;
int error, writing;
/*
* Assert that someone has locked this vmspace. (Should be
@ -232,9 +233,7 @@ proc_rwmem(struct proc *p, struct uio *uio)
map = &p->p_vmspace->vm_map;
writing = uio->uio_rw == UIO_WRITE;
reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
VM_PROT_READ;
fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
/*
* Only map in one page at a time. We don't have to, but it
@ -269,7 +268,7 @@ proc_rwmem(struct proc *p, struct uio *uio)
/*
* Fault the page on behalf of the process
*/
error = vm_fault(map, pageno, reqprot, fault_flags);
error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
if (error) {
if (error == KERN_RESOURCE_SHORTAGE)
error = ENOMEM;
@ -279,8 +278,8 @@ proc_rwmem(struct proc *p, struct uio *uio)
}
/*
* Now we need to get the page. out_entry, wired,
* and single_use aren't used. One would think the vm code
* Now we need to get the page. out_entry and wired
* aren't used. One would think the vm code
* would be a *bit* nicer... We use tmap because
* vm_map_lookup() can change the map argument.
*/
@ -303,6 +302,10 @@ proc_rwmem(struct proc *p, struct uio *uio)
VM_OBJECT_UNLOCK(object);
object = backing_object;
}
if (writing && m != NULL) {
vm_page_dirty(m);
vm_pager_page_unswapped(m);
}
VM_OBJECT_UNLOCK(object);
if (m == NULL) {
vm_map_lookup_done(tmap, out_entry);

@ -76,7 +76,7 @@ typedef u_char vm_prot_t; /* protection codes */
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
#define VM_PROT_OVERRIDE_WRITE ((vm_prot_t) 0x08) /* copy-on-write */
#define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
#define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE)

@ -702,7 +702,7 @@ vnode_locked:
/*
* We only really need to copy if we want to write it.
*/
if (fault_type & VM_PROT_WRITE) {
if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
/*
* This allows pages to be virtually copied from a
* backing_object into the first_object, where the

@ -3554,14 +3554,8 @@ RetryLookup:;
/*
* Check whether this task is allowed to have this page.
* Note the special case for MAP_ENTRY_COW
* pages with an override. This is to implement a forced
* COW for debuggers.
*/
if (fault_type & VM_PROT_OVERRIDE_WRITE)
prot = entry->max_protection;
else
prot = entry->protection;
prot = entry->protection;
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
vm_map_unlock_read(map);
@ -3569,8 +3563,7 @@ RetryLookup:;
}
if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
(entry->eflags & MAP_ENTRY_COW) &&
(fault_type & VM_PROT_WRITE) &&
(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
(fault_type & VM_PROT_WRITE)) {
vm_map_unlock_read(map);
return (KERN_PROTECTION_FAILURE);
}
@ -3581,7 +3574,7 @@ RetryLookup:;
*/
*wired = (entry->wired_count != 0);
if (*wired)
prot = fault_type = entry->protection;
fault_type = entry->protection;
size = entry->end - entry->start;
/*
* If the entry was copy-on-write, we either ...
@ -3594,7 +3587,8 @@ RetryLookup:;
* If we don't need to write the page, we just demote the
* permissions allowed.
*/
if (fault_type & VM_PROT_WRITE) {
if ((fault_type & VM_PROT_WRITE) != 0 ||
(fault_typea & VM_PROT_COPY) != 0) {
/*
* Make a new object, and place it in the object
* chain. Note that no new references have appeared
@ -3717,21 +3711,14 @@ vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */
/*
* Check whether this task is allowed to have this page.
* Note the special case for MAP_ENTRY_COW
* pages with an override. This is to implement a forced
* COW for debuggers.
*/
if (fault_type & VM_PROT_OVERRIDE_WRITE)
prot = entry->max_protection;
else
prot = entry->protection;
prot = entry->protection;
fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
if ((fault_type & prot) != fault_type)
return (KERN_PROTECTION_FAILURE);
if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
(entry->eflags & MAP_ENTRY_COW) &&
(fault_type & VM_PROT_WRITE) &&
(fault_typea & VM_PROT_OVERRIDE_WRITE) == 0)
(fault_type & VM_PROT_WRITE))
return (KERN_PROTECTION_FAILURE);
/*
@ -3740,7 +3727,7 @@ vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */
*/
*wired = (entry->wired_count != 0);
if (*wired)
prot = fault_type = entry->protection;
fault_type = entry->protection;
if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
/*