Merge portions of r208645 and supporting code from the i386 pmap:

When I pushed down the page queues lock into pmap_is_modified(), I created
  an ordering dependence: A pmap operation that clears PG_WRITEABLE and calls
  vm_page_dirty() must perform the call first.  Otherwise, pmap_is_modified()
  could return FALSE without acquiring the page queues lock because the page
  is not (currently) writeable, and the caller to pmap_is_modified() might
  believe that the page's dirty field is clear because it has not seen the
  effect of the vm_page_dirty() call.

  When I pushed down the page queues lock into pmap_is_modified(), I
  overlooked one place where this ordering dependence is violated:
  pmap_enter().  In a rare situation pmap_enter() can be called to replace a
  dirty mapping to one page with a mapping to another page.  (I say rare
  because replacements generally occur as a result of a copy-on-write fault,
  and so the old page is not dirty.)  This change delays clearing PG_WRITEABLE
  until after vm_page_dirty() has been called.

  Fixing the ordering dependency also makes it easy to introduce a small
  optimization: When pmap_enter() used to replace a mapping to one page with a
  mapping to another page, it freed the pv entry for the first mapping and
  later called the pv entry allocator for the new mapping.  Now, pmap_enter()
  attempts to recycle the old pv entry, saving two calls to the pv entry
  allocator.

  There is no point in setting PG_WRITEABLE on unmanaged pages, so don't.
This commit is contained in:
Alan Cox 2010-06-01 05:18:48 +00:00
parent ef1315e957
commit 6039d0b777

View File

@ -189,6 +189,9 @@ if (pmap_debug) printf
static void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t locked_pmap);
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
@ -1077,6 +1080,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
{
vm_paddr_t pa, opa;
uint64_t tte_data, otte_data;
pv_entry_t pv;
vm_page_t om;
int invlva;
@ -1103,6 +1107,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pmap_add_tte(pmap, va, m, &tte_data, wired);
} else if (pa != opa) {
pv = NULL;
/*
* Mapping has changed, handle validating new mapping.
*
@ -1112,10 +1117,23 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (otte_data & VTD_MANAGED) {
om = PHYS_TO_VM_PAGE(opa);
pmap_remove_entry(pmap, om, va);
pv = pmap_pvh_remove(&om->md, pmap, va);
}
pmap_add_tte(pmap, va, m, &tte_data, wired);
if (wired)
pmap->pm_stats.wired_count++;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
if (pv == NULL)
pv = get_pv_entry(pmap);
pv->pv_va = va;
pv->pv_pmap = pmap;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
tte_data |= VTD_MANAGED;
} else if (pv != NULL)
free_pv_entry(pv);
} else /* (pa == opa) */ {
/*
@ -1144,7 +1162,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
*/
if ((prot & VM_PROT_WRITE) != 0) {
tte_data |= VTD_SW_W;
vm_page_flag_set(m, PG_WRITEABLE);
if ((tte_data & VTD_MANAGED) != 0)
vm_page_flag_set(m, PG_WRITEABLE);
}
if ((prot & VM_PROT_EXECUTE) != 0)
tte_data |= VTD_X;
@ -1168,6 +1187,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if ((pa & VTD_SW_W) != 0)
invlva = TRUE;
}
if ((otte_data & VTD_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_flag_clear(om, PG_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va, TRUE);
}
@ -2047,16 +2069,17 @@ pmap_remove_all(vm_page_t m)
vm_page_unlock_queues();
}
static void
pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
static pv_entry_t
pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
{
pv_entry_t pv;
if (pmap != kernel_pmap)
DPRINTF("pmap_remove_entry(va=0x%lx, pa=0x%lx)\n", va, VM_PAGE_TO_PHYS(m));
DPRINTF("pmap_pvh_remove(va=0x%lx, pa=0x%lx)\n", va,
VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
break;
}
@ -2066,13 +2089,33 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
break;
}
}
KASSERT(pv != NULL, ("pmap_remove_entry: pv not found va=0x%lx pa=0x%lx", va, VM_PAGE_TO_PHYS(m)));
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (pv != NULL) {
TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
pvh->pv_list_count--;
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
}
return (pv);
}
static void
pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
{
pv_entry_t pv;
pv = pmap_pvh_remove(pvh, pmap, va);
KASSERT(pv != NULL, ("pmap_pvh_free: pv not found va=0x%lx pa=0x%lx",
va, VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh))));
free_pv_entry(pv);
}
static void
pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_pvh_free(&m->md, pmap, va);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
}