Handle wiring failures in vm_map_wire() with the new functions
pmap_unwire() and vm_object_unwire(). Retire vm_fault_{un,}wire(), since they are no longer used. (See r268327 and r269134 for the motivation behind this change.) Reviewed by: kib Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
1dcef10eac
commit
66cd575b28
@ -81,7 +81,6 @@ int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
|
||||
int fault_flags, vm_page_t *m_hold);
|
||||
int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
|
||||
vm_prot_t prot, vm_page_t *ma, int max_count);
|
||||
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
|
||||
void vm_waitproc(struct proc *);
|
||||
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
|
||||
|
@ -106,7 +106,6 @@ __FBSDID("$FreeBSD$");
|
||||
#define PFFOR 4
|
||||
|
||||
static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
|
||||
static void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||
|
||||
#define VM_FAULT_READ_BEHIND 8
|
||||
#define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX)
|
||||
@ -1154,68 +1153,6 @@ vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_fault_wire:
|
||||
*
|
||||
* Wire down a range of virtual addresses in a map.
|
||||
*/
|
||||
int
|
||||
vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
boolean_t fictitious)
|
||||
{
|
||||
vm_offset_t va;
|
||||
int rv;
|
||||
|
||||
/*
|
||||
* We simulate a fault to get the page and enter it in the physical
|
||||
* map. For user wiring, we only ask for read access on currently
|
||||
* read-only sections.
|
||||
*/
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING);
|
||||
if (rv) {
|
||||
if (va != start)
|
||||
vm_fault_unwire(map, start, va, fictitious);
|
||||
return (rv);
|
||||
}
|
||||
}
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_fault_unwire:
|
||||
*
|
||||
* Unwire a range of virtual addresses in a map.
|
||||
*/
|
||||
static void
|
||||
vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
boolean_t fictitious)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_offset_t va;
|
||||
vm_page_t m;
|
||||
pmap_t pmap;
|
||||
|
||||
pmap = vm_map_pmap(map);
|
||||
|
||||
/*
|
||||
* Since the pages are wired down, we must be able to get their
|
||||
* mappings from the physical map system.
|
||||
*/
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
pa = pmap_extract(pmap, va);
|
||||
if (pa != 0) {
|
||||
pmap_change_wiring(pmap, va, FALSE);
|
||||
if (!fictitious) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock(m);
|
||||
vm_page_unwire(m, PQ_ACTIVE);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine:
|
||||
* vm_fault_copy_entry
|
||||
|
@ -140,6 +140,8 @@ static void vmspace_zdtor(void *mem, int size, void *arg);
|
||||
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
|
||||
vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
|
||||
int cow);
|
||||
static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
|
||||
vm_offset_t failed_addr);
|
||||
|
||||
#define ENTRY_CHARGED(e) ((e)->cred != NULL || \
|
||||
((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
|
||||
@ -2417,6 +2419,42 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_wire_entry_failure:
|
||||
*
|
||||
* Handle a wiring failure on the given entry.
|
||||
*
|
||||
* The map should be locked.
|
||||
*/
|
||||
static void
|
||||
vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
|
||||
vm_offset_t failed_addr)
|
||||
{
|
||||
|
||||
VM_MAP_ASSERT_LOCKED(map);
|
||||
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
|
||||
entry->wired_count == 1,
|
||||
("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
|
||||
KASSERT(failed_addr < entry->end,
|
||||
("vm_map_wire_entry_failure: entry %p was fully wired", entry));
|
||||
|
||||
/*
|
||||
* If any pages at the start of this entry were successfully wired,
|
||||
* then unwire them.
|
||||
*/
|
||||
if (failed_addr > entry->start) {
|
||||
pmap_unwire(map->pmap, entry->start, failed_addr);
|
||||
vm_object_unwire(entry->object.vm_object, entry->offset,
|
||||
failed_addr - entry->start, PQ_ACTIVE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign an out-of-range value to represent the failure to wire this
|
||||
* entry.
|
||||
*/
|
||||
entry->wired_count = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_wire:
|
||||
*
|
||||
@ -2427,10 +2465,10 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
int flags)
|
||||
{
|
||||
vm_map_entry_t entry, first_entry, tmp_entry;
|
||||
vm_offset_t saved_end, saved_start;
|
||||
vm_offset_t faddr, saved_end, saved_start;
|
||||
unsigned int last_timestamp;
|
||||
int rv;
|
||||
boolean_t fictitious, need_wakeup, result, user_wire;
|
||||
boolean_t need_wakeup, result, user_wire;
|
||||
vm_prot_t prot;
|
||||
|
||||
if (start == end)
|
||||
@ -2523,17 +2561,24 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
entry->wired_count++;
|
||||
saved_start = entry->start;
|
||||
saved_end = entry->end;
|
||||
fictitious = entry->object.vm_object != NULL &&
|
||||
(entry->object.vm_object->flags &
|
||||
OBJ_FICTITIOUS) != 0;
|
||||
|
||||
/*
|
||||
* Release the map lock, relying on the in-transition
|
||||
* mark. Mark the map busy for fork.
|
||||
*/
|
||||
vm_map_busy(map);
|
||||
vm_map_unlock(map);
|
||||
rv = vm_fault_wire(map, saved_start, saved_end,
|
||||
fictitious);
|
||||
|
||||
for (faddr = saved_start; faddr < saved_end; faddr +=
|
||||
PAGE_SIZE) {
|
||||
/*
|
||||
* Simulate a fault to get the page and enter
|
||||
* it into the physical map.
|
||||
*/
|
||||
if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
|
||||
VM_FAULT_CHANGE_WIRING)) != KERN_SUCCESS)
|
||||
break;
|
||||
}
|
||||
vm_map_lock(map);
|
||||
vm_map_unbusy(map);
|
||||
if (last_timestamp + 1 != map->timestamp) {
|
||||
@ -2552,23 +2597,22 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
first_entry = NULL;
|
||||
entry = tmp_entry;
|
||||
while (entry->end < saved_end) {
|
||||
if (rv != KERN_SUCCESS) {
|
||||
KASSERT(entry->wired_count == 1,
|
||||
("vm_map_wire: bad count"));
|
||||
entry->wired_count = -1;
|
||||
}
|
||||
/*
|
||||
* In case of failure, handle entries
|
||||
* that were not fully wired here;
|
||||
* fully wired entries are handled
|
||||
* later.
|
||||
*/
|
||||
if (rv != KERN_SUCCESS &&
|
||||
faddr < entry->end)
|
||||
vm_map_wire_entry_failure(map,
|
||||
entry, faddr);
|
||||
entry = entry->next;
|
||||
}
|
||||
}
|
||||
last_timestamp = map->timestamp;
|
||||
if (rv != KERN_SUCCESS) {
|
||||
KASSERT(entry->wired_count == 1,
|
||||
("vm_map_wire: bad count"));
|
||||
/*
|
||||
* Assign an out-of-range value to represent
|
||||
* the failure to wire this entry.
|
||||
*/
|
||||
entry->wired_count = -1;
|
||||
vm_map_wire_entry_failure(map, entry, faddr);
|
||||
end = entry->end;
|
||||
goto done;
|
||||
}
|
||||
@ -2632,6 +2676,10 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
entry->wired_count = 0;
|
||||
} else if (!user_wire ||
|
||||
(entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
|
||||
/*
|
||||
* Undo the wiring. Wiring succeeded on this entry
|
||||
* but failed on a later entry.
|
||||
*/
|
||||
if (entry->wired_count == 1)
|
||||
vm_map_entry_unwire(map, entry);
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user