Flesh out bus_dmamap_sync.

This commit is contained in:
Benno Rice 2003-01-27 04:27:01 +00:00
parent 8519f3983c
commit 622cfbd033
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=109919
5 changed files with 544 additions and 63 deletions

View File

@ -203,6 +203,8 @@ vm_offset_t msgbuf_phys;
vm_offset_t avail_start;
vm_offset_t avail_end;
int pmap_pagedaemon_waken;
/*
* Map of physical memory regions.
*/
@ -317,7 +319,7 @@ static void pmap_pa_map(struct pvo_entry *, vm_offset_t,
static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
static void pmap_syncicache(vm_offset_t, vm_size_t);
static boolean_t pmap_query_bit(vm_page_t, int);
static boolean_t pmap_clear_bit(vm_page_t, int);
static u_int pmap_clear_bit(vm_page_t, int, int *);
static void tlbia(void);
static __inline int
@ -862,12 +864,8 @@ pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
}
void
pmap_clear_modify(vm_page_t m)
pmap_collect(void)
{
if (m->flags * PG_FICTITIOUS)
return;
pmap_clear_bit(m, PTE_CHG);
}
void
@ -991,7 +989,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pg = NULL;
was_exec = PTE_EXEC;
} else {
pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg);
pvo_head = vm_page_to_pvoh(m);
pg = m;
zone = pmap_mpvo_zone;
pvo_flags = PVO_MANAGED;
was_exec = 0;
@ -1106,7 +1105,7 @@ boolean_t
pmap_is_modified(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
return (FALSE);
return (pmap_query_bit(m, PTE_CHG));
@ -1115,7 +1114,19 @@ pmap_is_modified(vm_page_t m)
void
pmap_clear_reference(vm_page_t m)
{
TODO;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
pmap_clear_bit(m, PTE_REF, NULL);
}
void
pmap_clear_modify(vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
pmap_clear_bit(m, PTE_CHG, NULL);
}
/*
@ -1130,12 +1141,51 @@ pmap_clear_reference(vm_page_t m)
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*/
static int query_debug = 0;
int
pmap_ts_referenced(vm_page_t m)
{
TODO;
return (0);
int count;
int pgcount;
int test;
static int ts_panic_count = 0;
struct pvo_entry *pvo;
struct pte *pt;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (0);
/* count phys pages */
pgcount = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pgcount++;
}
query_debug = 1;
test = pmap_query_bit(m, PTE_REF);
query_debug = 0;
count = pmap_clear_bit(m, PTE_REF, NULL);
if (!count && test) {
int i;
printf("pmap_ts: invalid zero count, ref %x, pgs %d\n",
PTE_REF, pgcount);
printf(" vm_page ref: %x\n", pmap_attr_fetch(m));
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
printf(" pvo - flag %x", pvo->pvo_pte.pte_lo);
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
printf(" pte %x\n", pt->pte_lo);
} else {
printf(" pte (null)\n");
}
}
if (++ts_panic_count > 3)
panic("pmap_ts: panicing");
}
return (count);
}
/*
@ -1315,8 +1365,21 @@ pmap_page_protect(vm_page_t m, vm_prot_t prot)
boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
int loops;
struct pvo_entry *pvo;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
loops = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (pvo->pvo_pmap == pmap)
return (TRUE);
if (++loops >= 16)
break;
}
return (FALSE);
}
static u_int pmap_vsidcontext;
@ -1512,6 +1575,30 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
}
}
/*
* Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
* will reflect changes in pte's back to the vm_page.
*/
void
pmap_remove_all(vm_page_t m)
{
struct pvo_head *pvo_head;
struct pvo_entry *pvo, *next_pvo;
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
("pv_remove_all: illegal for unmanaged page %#x",
VM_PAGE_TO_PHYS(m)));
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_pvo_remove(pvo, -1);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* Remove all pages from specified address space, this aids process exit
* speeds. This is much faster than pmap_remove in the case of running down
@ -1581,9 +1668,11 @@ pmap_new_thread(struct thread *td, int pages)
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
@ -1979,7 +2068,7 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
PVO_PTEGIDX_CLR(pvo);
} else {
pmap_pte_overflow--;
}
}
/*
* Update our statistics.
@ -2283,6 +2372,67 @@ pmap_query_bit(vm_page_t m, int ptebit)
struct pvo_entry *pvo;
struct pte *pt;
if (pmap_attr_fetch(m) & ptebit) {
if (query_debug)
printf("query_bit: attr %x, bit %x\n",
pmap_attr_fetch(m), ptebit);
return (TRUE);
}
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* See if we saved the bit off. If so, cache it and return
* success.
*/
if (pvo->pvo_pte.pte_lo & ptebit) {
pmap_attr_save(m, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
if (query_debug)
printf("query_bit: pte cache %x, bit %x\n",
pvo->pvo_pte.pte_lo, ptebit);
return (TRUE);
}
}
/*
* No luck, now go through the hard part of looking at the PTEs
* themselves. Sync so that any pending REF/CHG bits are flushed to
* the PTEs.
*/
SYNC();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* See if this pvo has a valid PTE. if so, fetch the
* REF/CHG bits from the valid PTE. If the appropriate
* ptebit is set, cache it and return success.
*/
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
pmap_pte_synch(pt, &pvo->pvo_pte);
if (pvo->pvo_pte.pte_lo & ptebit) {
pmap_attr_save(m, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
if (query_debug)
printf("query_bit: real pte %x, bit %x\n",
pvo->pvo_pte.pte_lo, ptebit);
return (TRUE);
}
}
}
return (FALSE);
}
static boolean_t
pmap_query_bit_orig(vm_page_t m, int ptebit)
{
struct pvo_entry *pvo;
struct pte *pt;
if (pmap_attr_fetch(m) & ptebit)
return (TRUE);
@ -2328,9 +2478,10 @@ pmap_query_bit(vm_page_t m, int ptebit)
return (TRUE);
}
static boolean_t
pmap_clear_bit(vm_page_t m, int ptebit)
static u_int
pmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
{
u_int count;
struct pvo_entry *pvo;
struct pte *pt;
int rv;
@ -2354,20 +2505,27 @@ pmap_clear_bit(vm_page_t m, int ptebit)
* For each pvo entry, clear the pvo's ptebit. If this pvo has a
* valid pte clear the ptebit from the valid pte.
*/
count = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
pmap_pte_synch(pt, &pvo->pvo_pte);
if (pvo->pvo_pte.pte_lo & ptebit)
if (pvo->pvo_pte.pte_lo & ptebit) {
count++;
pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
}
}
rv |= pvo->pvo_pte.pte_lo;
pvo->pvo_pte.pte_lo &= ~ptebit;
PMAP_PVO_CHECK(pvo); /* sanity check */
}
return ((rv & ptebit) != 0);
if (origbit != NULL) {
*origbit = rv;
}
return (count);
}
/*

View File

@ -52,6 +52,7 @@ static const char rcsid[] =
#include <vm/vm_map.h>
#include <machine/bus.h>
#include <machine/cpufunc.h>
struct bus_dma_tag {
bus_dma_tag_t parent;
@ -512,4 +513,8 @@ bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
void
bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{}
{
if ((op == BUS_DMASYNC_PREREAD) || (op == BUS_DMASYNC_PREWRITE))
powerpc_mb();
}

View File

@ -203,6 +203,8 @@ vm_offset_t msgbuf_phys;
vm_offset_t avail_start;
vm_offset_t avail_end;
int pmap_pagedaemon_waken;
/*
* Map of physical memory regions.
*/
@ -317,7 +319,7 @@ static void pmap_pa_map(struct pvo_entry *, vm_offset_t,
static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
static void pmap_syncicache(vm_offset_t, vm_size_t);
static boolean_t pmap_query_bit(vm_page_t, int);
static boolean_t pmap_clear_bit(vm_page_t, int);
static u_int pmap_clear_bit(vm_page_t, int, int *);
static void tlbia(void);
static __inline int
@ -862,12 +864,8 @@ pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
}
void
pmap_clear_modify(vm_page_t m)
pmap_collect(void)
{
if (m->flags * PG_FICTITIOUS)
return;
pmap_clear_bit(m, PTE_CHG);
}
void
@ -991,7 +989,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pg = NULL;
was_exec = PTE_EXEC;
} else {
pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg);
pvo_head = vm_page_to_pvoh(m);
pg = m;
zone = pmap_mpvo_zone;
pvo_flags = PVO_MANAGED;
was_exec = 0;
@ -1106,7 +1105,7 @@ boolean_t
pmap_is_modified(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
return (FALSE);
return (pmap_query_bit(m, PTE_CHG));
@ -1115,7 +1114,19 @@ pmap_is_modified(vm_page_t m)
void
pmap_clear_reference(vm_page_t m)
{
TODO;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
pmap_clear_bit(m, PTE_REF, NULL);
}
void
pmap_clear_modify(vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
pmap_clear_bit(m, PTE_CHG, NULL);
}
/*
@ -1130,12 +1141,51 @@ pmap_clear_reference(vm_page_t m)
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*/
static int query_debug = 0;
int
pmap_ts_referenced(vm_page_t m)
{
TODO;
return (0);
int count;
int pgcount;
int test;
static int ts_panic_count = 0;
struct pvo_entry *pvo;
struct pte *pt;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (0);
/* count phys pages */
pgcount = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pgcount++;
}
query_debug = 1;
test = pmap_query_bit(m, PTE_REF);
query_debug = 0;
count = pmap_clear_bit(m, PTE_REF, NULL);
if (!count && test) {
int i;
printf("pmap_ts: invalid zero count, ref %x, pgs %d\n",
PTE_REF, pgcount);
printf(" vm_page ref: %x\n", pmap_attr_fetch(m));
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
printf(" pvo - flag %x", pvo->pvo_pte.pte_lo);
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
printf(" pte %x\n", pt->pte_lo);
} else {
printf(" pte (null)\n");
}
}
if (++ts_panic_count > 3)
panic("pmap_ts: panicing");
}
return (count);
}
/*
@ -1315,8 +1365,21 @@ pmap_page_protect(vm_page_t m, vm_prot_t prot)
boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
int loops;
struct pvo_entry *pvo;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
loops = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (pvo->pvo_pmap == pmap)
return (TRUE);
if (++loops >= 16)
break;
}
return (FALSE);
}
static u_int pmap_vsidcontext;
@ -1512,6 +1575,30 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
}
}
/*
* Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
* will reflect changes in pte's back to the vm_page.
*/
void
pmap_remove_all(vm_page_t m)
{
struct pvo_head *pvo_head;
struct pvo_entry *pvo, *next_pvo;
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
("pv_remove_all: illegal for unmanaged page %#x",
VM_PAGE_TO_PHYS(m)));
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_pvo_remove(pvo, -1);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* Remove all pages from specified address space, this aids process exit
* speeds. This is much faster than pmap_remove in the case of running down
@ -1581,9 +1668,11 @@ pmap_new_thread(struct thread *td, int pages)
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
@ -1979,7 +2068,7 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
PVO_PTEGIDX_CLR(pvo);
} else {
pmap_pte_overflow--;
}
}
/*
* Update our statistics.
@ -2283,6 +2372,67 @@ pmap_query_bit(vm_page_t m, int ptebit)
struct pvo_entry *pvo;
struct pte *pt;
if (pmap_attr_fetch(m) & ptebit) {
if (query_debug)
printf("query_bit: attr %x, bit %x\n",
pmap_attr_fetch(m), ptebit);
return (TRUE);
}
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* See if we saved the bit off. If so, cache it and return
* success.
*/
if (pvo->pvo_pte.pte_lo & ptebit) {
pmap_attr_save(m, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
if (query_debug)
printf("query_bit: pte cache %x, bit %x\n",
pvo->pvo_pte.pte_lo, ptebit);
return (TRUE);
}
}
/*
* No luck, now go through the hard part of looking at the PTEs
* themselves. Sync so that any pending REF/CHG bits are flushed to
* the PTEs.
*/
SYNC();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* See if this pvo has a valid PTE. if so, fetch the
* REF/CHG bits from the valid PTE. If the appropriate
* ptebit is set, cache it and return success.
*/
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
pmap_pte_synch(pt, &pvo->pvo_pte);
if (pvo->pvo_pte.pte_lo & ptebit) {
pmap_attr_save(m, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
if (query_debug)
printf("query_bit: real pte %x, bit %x\n",
pvo->pvo_pte.pte_lo, ptebit);
return (TRUE);
}
}
}
return (FALSE);
}
static boolean_t
pmap_query_bit_orig(vm_page_t m, int ptebit)
{
struct pvo_entry *pvo;
struct pte *pt;
if (pmap_attr_fetch(m) & ptebit)
return (TRUE);
@ -2328,9 +2478,10 @@ pmap_query_bit(vm_page_t m, int ptebit)
return (TRUE);
}
static boolean_t
pmap_clear_bit(vm_page_t m, int ptebit)
static u_int
pmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
{
u_int count;
struct pvo_entry *pvo;
struct pte *pt;
int rv;
@ -2354,20 +2505,27 @@ pmap_clear_bit(vm_page_t m, int ptebit)
* For each pvo entry, clear the pvo's ptebit. If this pvo has a
* valid pte clear the ptebit from the valid pte.
*/
count = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
pmap_pte_synch(pt, &pvo->pvo_pte);
if (pvo->pvo_pte.pte_lo & ptebit)
if (pvo->pvo_pte.pte_lo & ptebit) {
count++;
pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
}
}
rv |= pvo->pvo_pte.pte_lo;
pvo->pvo_pte.pte_lo &= ~ptebit;
PMAP_PVO_CHECK(pvo); /* sanity check */
}
return ((rv & ptebit) != 0);
if (origbit != NULL) {
*origbit = rv;
}
return (count);
}
/*

View File

@ -183,9 +183,9 @@ openpic_probe(device_t dev)
}
sc->sc_ncpu = ((val & OPENPIC_FEATURE_LAST_CPU_MASK) >>
OPENPIC_FEATURE_LAST_CPU_SHIFT);
OPENPIC_FEATURE_LAST_CPU_SHIFT) + 1;
sc->sc_nirq = ((val & OPENPIC_FEATURE_LAST_IRQ_MASK) >>
OPENPIC_FEATURE_LAST_IRQ_SHIFT);
OPENPIC_FEATURE_LAST_IRQ_SHIFT) + 1;
device_set_desc(dev, "OpenPIC interrupt controller");
return (0);
@ -201,14 +201,14 @@ openpic_attach(device_t dev)
softc = sc;
device_printf(dev,
"Version %s, supports up to %d CPUs and up to %d irqs\n",
sc->sc_version, sc->sc_ncpu+1, sc->sc_nirq+1);
"Version %s, supports %d CPUs and %d irqs\n",
sc->sc_version, sc->sc_ncpu, sc->sc_nirq);
sc->sc_rman.rm_type = RMAN_ARRAY;
sc->sc_rman.rm_descr = device_get_nameunit(dev);
if (rman_init(&sc->sc_rman) != 0 ||
rman_manage_region(&sc->sc_rman, 0, sc->sc_nirq) != 0) {
rman_manage_region(&sc->sc_rman, 0, sc->sc_nirq - 1) != 0) {
device_printf(dev, "could not set up resource management");
return (ENXIO);
}
@ -458,6 +458,8 @@ openpic_intr(void)
/*mtmsr(msr | PSL_EE);*/
/* do the interrupt thang */
if (irq != 41 && irq != 19 && irq != 20)
printf("openpic_intr: irq %d\n", irq);
intr_handle(irq);
mtmsr(msr);

View File

@ -203,6 +203,8 @@ vm_offset_t msgbuf_phys;
vm_offset_t avail_start;
vm_offset_t avail_end;
int pmap_pagedaemon_waken;
/*
* Map of physical memory regions.
*/
@ -317,7 +319,7 @@ static void pmap_pa_map(struct pvo_entry *, vm_offset_t,
static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
static void pmap_syncicache(vm_offset_t, vm_size_t);
static boolean_t pmap_query_bit(vm_page_t, int);
static boolean_t pmap_clear_bit(vm_page_t, int);
static u_int pmap_clear_bit(vm_page_t, int, int *);
static void tlbia(void);
static __inline int
@ -862,12 +864,8 @@ pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
}
void
pmap_clear_modify(vm_page_t m)
pmap_collect(void)
{
if (m->flags * PG_FICTITIOUS)
return;
pmap_clear_bit(m, PTE_CHG);
}
void
@ -991,7 +989,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pg = NULL;
was_exec = PTE_EXEC;
} else {
pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg);
pvo_head = vm_page_to_pvoh(m);
pg = m;
zone = pmap_mpvo_zone;
pvo_flags = PVO_MANAGED;
was_exec = 0;
@ -1106,7 +1105,7 @@ boolean_t
pmap_is_modified(vm_page_t m)
{
if (m->flags & PG_FICTITIOUS)
if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
return (FALSE);
return (pmap_query_bit(m, PTE_CHG));
@ -1115,7 +1114,19 @@ pmap_is_modified(vm_page_t m)
void
pmap_clear_reference(vm_page_t m)
{
TODO;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
pmap_clear_bit(m, PTE_REF, NULL);
}
void
pmap_clear_modify(vm_page_t m)
{
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return;
pmap_clear_bit(m, PTE_CHG, NULL);
}
/*
@ -1130,12 +1141,51 @@ pmap_clear_reference(vm_page_t m)
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*/
static int query_debug = 0;
int
pmap_ts_referenced(vm_page_t m)
{
TODO;
return (0);
int count;
int pgcount;
int test;
static int ts_panic_count = 0;
struct pvo_entry *pvo;
struct pte *pt;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (0);
/* count phys pages */
pgcount = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pgcount++;
}
query_debug = 1;
test = pmap_query_bit(m, PTE_REF);
query_debug = 0;
count = pmap_clear_bit(m, PTE_REF, NULL);
if (!count && test) {
int i;
printf("pmap_ts: invalid zero count, ref %x, pgs %d\n",
PTE_REF, pgcount);
printf(" vm_page ref: %x\n", pmap_attr_fetch(m));
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
printf(" pvo - flag %x", pvo->pvo_pte.pte_lo);
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
printf(" pte %x\n", pt->pte_lo);
} else {
printf(" pte (null)\n");
}
}
if (++ts_panic_count > 3)
panic("pmap_ts: panicing");
}
return (count);
}
/*
@ -1315,8 +1365,21 @@ pmap_page_protect(vm_page_t m, vm_prot_t prot)
boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
int loops;
struct pvo_entry *pvo;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
loops = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (pvo->pvo_pmap == pmap)
return (TRUE);
if (++loops >= 16)
break;
}
return (FALSE);
}
static u_int pmap_vsidcontext;
@ -1512,6 +1575,30 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
}
}
/*
* Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
* will reflect changes in pte's back to the vm_page.
*/
void
pmap_remove_all(vm_page_t m)
{
struct pvo_head *pvo_head;
struct pvo_entry *pvo, *next_pvo;
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
("pv_remove_all: illegal for unmanaged page %#x",
VM_PAGE_TO_PHYS(m)));
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_pvo_remove(pvo, -1);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* Remove all pages from specified address space, this aids process exit
* speeds. This is much faster than pmap_remove in the case of running down
@ -1581,9 +1668,11 @@ pmap_new_thread(struct thread *td, int pages)
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
@ -1979,7 +2068,7 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
PVO_PTEGIDX_CLR(pvo);
} else {
pmap_pte_overflow--;
}
}
/*
* Update our statistics.
@ -2283,6 +2372,67 @@ pmap_query_bit(vm_page_t m, int ptebit)
struct pvo_entry *pvo;
struct pte *pt;
if (pmap_attr_fetch(m) & ptebit) {
if (query_debug)
printf("query_bit: attr %x, bit %x\n",
pmap_attr_fetch(m), ptebit);
return (TRUE);
}
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* See if we saved the bit off. If so, cache it and return
* success.
*/
if (pvo->pvo_pte.pte_lo & ptebit) {
pmap_attr_save(m, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
if (query_debug)
printf("query_bit: pte cache %x, bit %x\n",
pvo->pvo_pte.pte_lo, ptebit);
return (TRUE);
}
}
/*
* No luck, now go through the hard part of looking at the PTEs
* themselves. Sync so that any pending REF/CHG bits are flushed to
* the PTEs.
*/
SYNC();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* See if this pvo has a valid PTE. if so, fetch the
* REF/CHG bits from the valid PTE. If the appropriate
* ptebit is set, cache it and return success.
*/
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
pmap_pte_synch(pt, &pvo->pvo_pte);
if (pvo->pvo_pte.pte_lo & ptebit) {
pmap_attr_save(m, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
if (query_debug)
printf("query_bit: real pte %x, bit %x\n",
pvo->pvo_pte.pte_lo, ptebit);
return (TRUE);
}
}
}
return (FALSE);
}
static boolean_t
pmap_query_bit_orig(vm_page_t m, int ptebit)
{
struct pvo_entry *pvo;
struct pte *pt;
if (pmap_attr_fetch(m) & ptebit)
return (TRUE);
@ -2328,9 +2478,10 @@ pmap_query_bit(vm_page_t m, int ptebit)
return (TRUE);
}
static boolean_t
pmap_clear_bit(vm_page_t m, int ptebit)
static u_int
pmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
{
u_int count;
struct pvo_entry *pvo;
struct pte *pt;
int rv;
@ -2354,20 +2505,27 @@ pmap_clear_bit(vm_page_t m, int ptebit)
* For each pvo entry, clear the pvo's ptebit. If this pvo has a
* valid pte clear the ptebit from the valid pte.
*/
count = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
pmap_pte_synch(pt, &pvo->pvo_pte);
if (pvo->pvo_pte.pte_lo & ptebit)
if (pvo->pvo_pte.pte_lo & ptebit) {
count++;
pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
}
}
rv |= pvo->pvo_pte.pte_lo;
pvo->pvo_pte.pte_lo &= ~ptebit;
PMAP_PVO_CHECK(pvo); /* sanity check */
}
return ((rv & ptebit) != 0);
if (origbit != NULL) {
*origbit = rv;
}
return (count);
}
/*