In general, we call pmap_remove_all() before calling vm_page_cache(). So,
the call to pmap_remove_all() within vm_page_cache() is usually redundant. This change eliminates that call to pmap_remove_all() and introduces a call to pmap_remove_all() before vm_page_cache() in the one place where it didn't already exist. When iterating over a paging queue, if the object containing the current page has a zero reference count, then the page can't have any managed mappings. So, a call to pmap_remove_all() is pointless. Change a panic() call in vm_page_cache() to a KASSERT(). MFC after: 6 weeks
This commit is contained in:
parent
cd9e9d1bc2
commit
9fc4739d2a
@ -2277,9 +2277,9 @@ vm_page_cache(vm_page_t m)
|
||||
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
|
||||
m->hold_count || m->wire_count)
|
||||
panic("vm_page_cache: attempting to cache busy page");
|
||||
pmap_remove_all(m);
|
||||
if (m->dirty != 0)
|
||||
panic("vm_page_cache: page %p is dirty", m);
|
||||
KASSERT(!pmap_page_is_mapped(m),
|
||||
("vm_page_cache: page %p is mapped", m));
|
||||
KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m));
|
||||
if (m->valid == 0 || object->type == OBJT_DEFAULT ||
|
||||
(object->type == OBJT_SWAP &&
|
||||
!vm_pager_has_page(object, m->pindex, NULL, NULL))) {
|
||||
|
@ -594,7 +594,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
|
||||
continue;
|
||||
}
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty == 0)
|
||||
if (m->dirty == 0 && object->ref_count != 0)
|
||||
pmap_remove_all(m);
|
||||
if (m->dirty != 0) {
|
||||
vm_page_unlock(m);
|
||||
@ -1059,31 +1059,16 @@ vm_pageout_scan(int pass)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the upper level VM system does not believe that the page
|
||||
* is fully dirty, but it is mapped for write access, then we
|
||||
* consult the pmap to see if the page's dirty status should
|
||||
* be updated.
|
||||
* If the page appears to be clean at the machine-independent
|
||||
* layer, then remove all of its mappings from the pmap in
|
||||
* anticipation of placing it onto the cache queue. If,
|
||||
* however, any of the page's mappings allow write access,
|
||||
* then the page may still be modified until the last of those
|
||||
* mappings are removed.
|
||||
*/
|
||||
if (m->dirty != VM_PAGE_BITS_ALL &&
|
||||
pmap_page_is_write_mapped(m)) {
|
||||
/*
|
||||
* Avoid a race condition: Unless write access is
|
||||
* removed from the page, another processor could
|
||||
* modify it before all access is removed by the call
|
||||
* to vm_page_cache() below. If vm_page_cache() finds
|
||||
* that the page has been modified when it removes all
|
||||
* access, it panics because it cannot cache dirty
|
||||
* pages. In principle, we could eliminate just write
|
||||
* access here rather than all access. In the expected
|
||||
* case, when there are no last instant modifications
|
||||
* to the page, removing all access will be cheaper
|
||||
* overall.
|
||||
*/
|
||||
if (pmap_is_modified(m))
|
||||
vm_page_dirty(m);
|
||||
else if (m->dirty == 0)
|
||||
pmap_remove_all(m);
|
||||
}
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty == 0 && object->ref_count != 0)
|
||||
pmap_remove_all(m);
|
||||
|
||||
if (m->valid == 0) {
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user