Merge portions of r208645 and supporting code from the i386 pmap:

When I pushed down the page queues lock into pmap_is_modified(), I created
  an ordering dependence: A pmap operation that clears PG_WRITEABLE and calls
  vm_page_dirty() must perform the call first.  Otherwise, pmap_is_modified()
  could return FALSE without acquiring the page queues lock because the page
  is not (currently) writeable, and the caller to pmap_is_modified() might
  believe that the page's dirty field is clear because it has not seen the
  effect of the vm_page_dirty() call.

  When I pushed down the page queues lock into pmap_is_modified(), I
  overlooked one place where this ordering dependence is violated:
  pmap_enter().  In a rare situation pmap_enter() can be called to replace a
  dirty mapping to one page with a mapping to another page.  (I say rare
  because replacements generally occur as a result of a copy-on-write fault,
  and so the old page is not dirty.)  This change delays clearing PG_WRITEABLE
  until after vm_page_dirty() has been called.

  Fixing the ordering dependency also makes it easy to introduce a small
  optimization: When pmap_enter() used to replace a mapping to one page with a
  mapping to another page, it freed the pv entry for the first mapping and
  later called the pv entry allocator for the new mapping.  Now, pmap_enter()
  attempts to recycle the old pv entry, saving two calls to the pv entry
  allocator.
This commit is contained in:
Alan Cox 2010-05-31 01:43:02 +00:00
parent c68c71f9b8
commit c027892779
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=208665

View File

@ -163,6 +163,9 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t locked_pmap);
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
@ -171,9 +174,6 @@ static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
static boolean_t pmap_testbit(vm_page_t m, int bit);
static void
pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
vm_page_t m, boolean_t wired);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
vm_offset_t va, vm_page_t m);
@ -1385,15 +1385,15 @@ get_pv_entry(pmap_t locked_pmap)
* the entry. In either case we free the now unused entry.
*/
static void
pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va)
static pv_entry_t
pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
{
pv_entry_t pv;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
break;
}
@ -1403,39 +1403,33 @@ pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va)
break;
}
}
KASSERT(pv != NULL, ("pmap_remove_entry: pv not found, pa %lx va %lx",
(u_long)VM_PAGE_TO_PHYS(m), (u_long)va));
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
vm_page_flag_clear(m, PG_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
if (pv != NULL) {
TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
pvh->pv_list_count--;
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
}
return (pv);
}
/*
* Create a pv entry for page at pa for
* (pmap, va).
*/
static void
pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m,
boolean_t wired)
pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
{
pv_entry_t pv;
pv = get_pv_entry(pmap);
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
pv->pv_wired = wired;
pv = pmap_pvh_remove(pvh, pmap, va);
KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
(u_long)VM_PAGE_TO_PHYS(m), (u_long)va));
free_pv_entry(pv);
}
static void
pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
@ -1740,6 +1734,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_offset_t pa, opa;
register pt_entry_t *pte;
pt_entry_t origpte, newpte;
pv_entry_t pv;
vm_page_t mpte, om;
int rw = 0;
@ -1815,6 +1810,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
}
goto validate;
}
pv = NULL;
/*
* Mapping has changed, invalidate old range and fall through to
* handle validating new mapping.
@ -1825,7 +1823,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (page_is_managed(opa)) {
om = PHYS_TO_VM_PAGE(opa);
pmap_remove_entry(pmap, om, va);
pv = pmap_pvh_remove(&om->md, pmap, va);
}
if (mpte != NULL) {
mpte->wire_count--;
@ -1844,8 +1842,18 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
pmap_insert_entry(pmap, va, mpte, m, wired);
}
if (pv == NULL)
pv = get_pv_entry(pmap);
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
pv->pv_wired = wired;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
} else if (pv != NULL)
free_pv_entry(pv);
/*
* Increment counters
*/
@ -1897,6 +1905,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (page_is_managed(opa))
vm_page_dirty(om);
}
if (page_is_managed(opa) &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_flag_clear(om, PG_WRITEABLE);
} else {
*pte = newpte;
}