To date, unwiring a fictitious page has produced a panic. The reason

being that PHYS_TO_VM_PAGE() returns the wrong vm_page for fictitious
pages but unwiring uses PHYS_TO_VM_PAGE().  The resulting panic
reported an unexpected wired count.  Rather than attempting to fix
PHYS_TO_VM_PAGE(), this fix takes advantage of the properties of
fictitious pages.  Specifically, fictitious pages will never be
completely unwired.  Therefore, we can keep a fictitious page's wired
count forever set to one and thereby avoid the use of
PHYS_TO_VM_PAGE() when we know that we're working with a fictitious
page, just not which one.

In collaboration with: green@, tegge@
PR: kern/29915
This commit is contained in:
alc 2004-05-22 04:53:51 +00:00
parent 29c76b1201
commit 5d0912f6d8
4 changed files with 29 additions and 18 deletions

View File

@ -69,8 +69,8 @@ void swapout_procs(int);
int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
void vm_forkproc(struct thread *, struct proc *, struct thread *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t);

View File

@ -1038,10 +1038,8 @@ vm_fault_quick(caddr_t v, int prot)
* Wire down a range of virtual addresses in a map.
*/
int
vm_fault_wire(map, start, end, user_wire)
vm_map_t map;
vm_offset_t start, end;
boolean_t user_wire;
vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
boolean_t user_wire, boolean_t fictitious)
{
vm_offset_t va;
int rv;
@ -1057,7 +1055,7 @@ vm_fault_wire(map, start, end, user_wire)
user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va);
vm_fault_unwire(map, start, va, fictitious);
return (rv);
}
}
@ -1070,9 +1068,8 @@ vm_fault_wire(map, start, end, user_wire)
* Unwire a range of virtual addresses in a map.
*/
void
vm_fault_unwire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
boolean_t fictitious)
{
vm_paddr_t pa;
vm_offset_t va;
@ -1090,11 +1087,13 @@ vm_fault_unwire(map, start, end)
pa = pmap_extract(pmap, va);
if (pa != 0) {
pmap_change_wiring(pmap, va, FALSE);
if (!fictitious) {
vm_page_lock_queues();
vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
vm_page_unlock_queues();
}
}
}
if (pmap != kernel_pmap)
mtx_unlock(&Giant);
}

View File

@ -1726,7 +1726,9 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
/*
* Retain the map lock.
*/
vm_fault_unwire(map, entry->start, entry->end);
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
entry->object.vm_object->type == OBJT_DEVICE);
}
}
KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
@ -1758,7 +1760,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_offset_t saved_end, saved_start;
unsigned int last_timestamp;
int rv;
boolean_t need_wakeup, result, user_wire;
boolean_t fictitious, need_wakeup, result, user_wire;
user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
vm_map_lock(map);
@ -1834,13 +1836,15 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
entry->wired_count++;
saved_start = entry->start;
saved_end = entry->end;
fictitious = entry->object.vm_object != NULL &&
entry->object.vm_object->type == OBJT_DEVICE;
/*
* Release the map lock, relying on the in-transition
* mark.
*/
vm_map_unlock(map);
rv = vm_fault_wire(map, saved_start, saved_end,
user_wire);
user_wire, fictitious);
vm_map_lock(map);
if (last_timestamp + 1 != map->timestamp) {
/*
@ -1924,7 +1928,9 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
/*
* Retain the map lock.
*/
vm_fault_unwire(map, entry->start, entry->end);
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
entry->object.vm_object->type == OBJT_DEVICE);
}
}
KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
@ -2048,7 +2054,9 @@ vm_map_sync(
static void
vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
{
vm_fault_unwire(map, entry->start, entry->end);
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
entry->object.vm_object->type == OBJT_DEVICE);
entry->wired_count = 0;
}

View File

@ -1167,6 +1167,8 @@ vm_page_wire(vm_page_t m)
*/
s = splvm();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->flags & PG_FICTITIOUS)
return;
if (m->wire_count == 0) {
if ((m->flags & PG_UNMANAGED) == 0)
vm_pageq_remove(m);
@ -1212,6 +1214,8 @@ vm_page_unwire(vm_page_t m, int activate)
s = splvm();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->flags & PG_FICTITIOUS)
return;
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {