Add pv list linkage and a pmap pointer to struct tte. Remove separately
allocated pv entries and use the linkage in the tte for pv operations.
This commit is contained in:
parent
35738638d6
commit
e793e4d0b3
@ -54,14 +54,10 @@
|
||||
|
||||
#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
|
||||
|
||||
struct pv_entry;
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
typedef struct pv_entry *pv_entry_t;
|
||||
|
||||
struct md_page {
|
||||
TAILQ_HEAD(, pv_entry) pv_list;
|
||||
int pv_list_count;
|
||||
STAILQ_HEAD(, tte) tte_list;
|
||||
int colors[DCACHE_COLORS];
|
||||
};
|
||||
|
||||
@ -73,14 +69,6 @@ struct pmap {
|
||||
struct pmap_statistics pm_stats;
|
||||
};
|
||||
|
||||
struct pv_entry {
|
||||
TAILQ_ENTRY(pv_entry) pv_list;
|
||||
TAILQ_ENTRY(pv_entry) pv_plist;
|
||||
pmap_t pv_pmap;
|
||||
vm_offset_t pv_va;
|
||||
vm_page_t pv_m;
|
||||
};
|
||||
|
||||
void pmap_bootstrap(vm_offset_t ekva);
|
||||
void pmap_context_rollover(void);
|
||||
vm_offset_t pmap_kextract(vm_offset_t va);
|
||||
|
@ -40,20 +40,10 @@
|
||||
#ifndef _MACHINE_PV_H_
|
||||
#define _MACHINE_PV_H_
|
||||
|
||||
extern uma_zone_t pvzone;
|
||||
extern struct vm_object pvzone_obj;
|
||||
extern int pv_entry_count;
|
||||
extern int pv_entry_max;
|
||||
extern int pv_entry_high_water;
|
||||
extern struct pv_entry *pvinit;
|
||||
struct tte;
|
||||
|
||||
void *pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
|
||||
pv_entry_t pv_alloc(void);
|
||||
void pv_free(pv_entry_t pv);
|
||||
|
||||
void pv_insert(pmap_t pm, vm_page_t m, vm_offset_t va);
|
||||
pv_entry_t pv_lookup(pmap_t pm, vm_page_t m, vm_offset_t va);
|
||||
void pv_remove(pmap_t pm, vm_page_t m, vm_offset_t va);
|
||||
void pv_insert(pmap_t pm, vm_page_t m, struct tte *tp);
|
||||
void pv_remove(pmap_t pm, vm_page_t m, struct tte *tp);
|
||||
int pv_page_exists(pmap_t pm, vm_page_t m);
|
||||
void pv_remove_all(vm_page_t m);
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
#ifndef _MACHINE_TTE_H_
|
||||
#define _MACHINE_TTE_H_
|
||||
|
||||
#define TTE_SHIFT (4)
|
||||
#define TTE_SHIFT (5)
|
||||
|
||||
#define TD_SIZE_SHIFT (61)
|
||||
#define TD_SOFT2_SHIFT (50)
|
||||
@ -98,10 +98,16 @@
|
||||
(((tp)->tte_data & TD_EXEC) ? (TLB_DTLB | TLB_ITLB) : TLB_DTLB)
|
||||
#define TTE_GET_VA(tp) \
|
||||
((tp)->tte_vpn << PAGE_SHIFT)
|
||||
#define TTE_GET_PMAP(tp) \
|
||||
((tp)->tte_pmap)
|
||||
#define TTE_ZERO(tp) \
|
||||
bzero(tp, sizeof(*tp))
|
||||
|
||||
struct tte {
|
||||
u_long tte_vpn;
|
||||
u_long tte_data;
|
||||
STAILQ_ENTRY(tte) tte_link;
|
||||
struct pmap *tte_pmap;
|
||||
};
|
||||
|
||||
static __inline int
|
||||
|
@ -521,8 +521,7 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
|
||||
vm_page_t m;
|
||||
|
||||
m = &vm_page_array[i];
|
||||
TAILQ_INIT(&m->md.pv_list);
|
||||
m->md.pv_list_count = 0;
|
||||
STAILQ_INIT(&m->md.tte_list);
|
||||
}
|
||||
|
||||
for (i = 0; i < translations_size; i++) {
|
||||
@ -536,10 +535,6 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
|
||||
panic("pmap_init: vm_map_find");
|
||||
}
|
||||
|
||||
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
uma_zone_set_allocf(pvzone, pv_allocf);
|
||||
uma_prealloc(pvzone, vm_page_array_size);
|
||||
pmap_initialized = TRUE;
|
||||
}
|
||||
|
||||
@ -551,13 +546,6 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
|
||||
void
|
||||
pmap_init2(void)
|
||||
{
|
||||
int shpgperproc;
|
||||
|
||||
shpgperproc = PMAP_SHPGPERPROC;
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -597,10 +585,7 @@ int
|
||||
pmap_cache_enter(vm_page_t m, vm_offset_t va)
|
||||
{
|
||||
struct tte *tp;
|
||||
vm_offset_t pa;
|
||||
pv_entry_t pv;
|
||||
int c;
|
||||
int i;
|
||||
int c, i;
|
||||
|
||||
CTR2(KTR_PMAP, "pmap_cache_enter: m=%p va=%#lx", m, va);
|
||||
PMAP_STATS_INC(pmap_ncache_enter);
|
||||
@ -619,15 +604,12 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
|
||||
return (0);
|
||||
}
|
||||
CTR0(KTR_PMAP, "pmap_cache_enter: marking uncacheable");
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL) {
|
||||
atomic_clear_long(&tp->tte_data, TD_CV);
|
||||
tlb_page_demap(TLB_DTLB | TLB_ITLB, pv->pv_pmap,
|
||||
pv->pv_va);
|
||||
}
|
||||
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
tp->tte_data &= ~TD_CV;
|
||||
tlb_page_demap(TLB_DTLB | TLB_ITLB, TTE_GET_PMAP(tp),
|
||||
TTE_GET_VA(tp));
|
||||
}
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
dcache_page_inval(pa);
|
||||
dcache_page_inval(VM_PAGE_TO_PHYS(m));
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -700,9 +682,8 @@ pmap_kremove(vm_offset_t va)
|
||||
tp = tsb_kvtotte(va);
|
||||
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
|
||||
tp->tte_data);
|
||||
atomic_clear_long(&tp->tte_data, TD_V);
|
||||
tp->tte_vpn = 0;
|
||||
tp->tte_data = 0;
|
||||
tp->tte_vpn = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1202,23 +1183,6 @@ pmap_growkernel(vm_offset_t addr)
|
||||
void
|
||||
pmap_collect(void)
|
||||
{
|
||||
static int warningdone;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
if (pmap_pagedaemon_waken == 0)
|
||||
return;
|
||||
if (warningdone++ < 5)
|
||||
printf("pmap_collect: collecting pv entries -- suggest"
|
||||
"increasing PMAP_SHPGPERPROC\n");
|
||||
for (i = 0; i < vm_page_array_size; i++) {
|
||||
m = &vm_page_array[i];
|
||||
if (m->wire_count || m->hold_count || m->busy ||
|
||||
(m->flags & (PG_BUSY | PG_UNMANAGED)))
|
||||
continue;
|
||||
pv_remove_all(m);
|
||||
}
|
||||
pmap_pagedaemon_waken = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1236,12 +1200,10 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
|
||||
vm_page_dirty(m);
|
||||
if ((tp->tte_data & TD_REF) != 0)
|
||||
vm_page_flag_set(m, PG_REFERENCED);
|
||||
pv_remove(pm, m, va);
|
||||
pv_remove(pm, m, tp);
|
||||
pmap_cache_remove(m, va);
|
||||
}
|
||||
atomic_clear_long(&tp->tte_data, TD_V);
|
||||
tp->tte_vpn = 0;
|
||||
tp->tte_data = 0;
|
||||
TTE_ZERO(tp);
|
||||
if (PMAP_REMOVE_DONE(pm))
|
||||
return (0);
|
||||
return (1);
|
||||
@ -1411,7 +1373,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
CTR0(KTR_PMAP, "pmap_enter: replace");
|
||||
PMAP_STATS_INC(pmap_enter_nreplace);
|
||||
pmap_remove_tte(pm, NULL, tp, va);
|
||||
tlb_tte_demap(tp, pm);
|
||||
tlb_page_demap(TLB_DTLB | TLB_ITLB, pm, va);
|
||||
} else {
|
||||
CTR0(KTR_PMAP, "pmap_enter: new");
|
||||
PMAP_STATS_INC(pmap_enter_nnew);
|
||||
@ -1442,17 +1404,6 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
data |= TD_W;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter on the pv list if part of our managed memory.
|
||||
*/
|
||||
if (pmap_initialized &&
|
||||
(m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
|
||||
pv_insert(pm, m, va);
|
||||
data |= TD_PV;
|
||||
if (pmap_cache_enter(m, va) != 0)
|
||||
data |= TD_CV;
|
||||
}
|
||||
|
||||
tsb_tte_enter(pm, m, va, data);
|
||||
}
|
||||
}
|
||||
@ -1502,14 +1453,6 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
|
||||
data = tp->tte_data &
|
||||
~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
|
||||
m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
|
||||
if ((tp->tte_data & TD_PV) != 0) {
|
||||
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
|
||||
("pmap_enter: unmanaged pv page"));
|
||||
pv_insert(dst_pmap, m, va);
|
||||
data |= TD_PV;
|
||||
if (pmap_cache_enter(m, va) != 0)
|
||||
data |= TD_CV;
|
||||
}
|
||||
tsb_tte_enter(dst_pmap, m, va, data);
|
||||
}
|
||||
return (1);
|
||||
|
@ -61,105 +61,46 @@
|
||||
#include <machine/tlb.h>
|
||||
#include <machine/tsb.h>
|
||||
|
||||
uma_zone_t pvzone;
|
||||
struct vm_object pvzone_obj;
|
||||
int pv_entry_count;
|
||||
int pv_entry_max;
|
||||
int pv_entry_high_water;
|
||||
struct pv_entry *pvinit;
|
||||
|
||||
pv_entry_t
|
||||
pv_alloc(void)
|
||||
{
|
||||
|
||||
pv_entry_count++;
|
||||
if (pv_entry_high_water && (pv_entry_count > pv_entry_high_water) &&
|
||||
(pmap_pagedaemon_waken == 0)) {
|
||||
pmap_pagedaemon_waken = 1;
|
||||
wakeup(&vm_pages_needed);
|
||||
}
|
||||
return (uma_zalloc(pvzone, M_WAITOK));
|
||||
}
|
||||
|
||||
void *
|
||||
pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
|
||||
*flags = UMA_SLAB_PRIV;
|
||||
return (void *)kmem_alloc(kernel_map, bytes);
|
||||
}
|
||||
|
||||
void
|
||||
pv_free(pv_entry_t pv)
|
||||
{
|
||||
|
||||
pv_entry_count--;
|
||||
uma_zfree(pvzone, pv);
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a mapped stte at the tail of an address alias chain.
|
||||
*/
|
||||
void
|
||||
pv_insert(pmap_t pm, vm_page_t m, vm_offset_t va)
|
||||
pv_insert(pmap_t pm, vm_page_t m, struct tte *tp)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
|
||||
pv = pv_alloc();
|
||||
pv->pv_va = va;
|
||||
pv->pv_m = m;
|
||||
pv->pv_pmap = pm;
|
||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
|
||||
m->md.pv_list_count++;
|
||||
STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
|
||||
tp->tte_pmap = pm;
|
||||
pm->pm_stats.resident_count++;
|
||||
}
|
||||
|
||||
pv_entry_t
|
||||
pv_lookup(pmap_t pm, vm_page_t m, vm_offset_t va)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
|
||||
if (pm == pv->pv_pmap && va == pv->pv_va)
|
||||
break;
|
||||
return (pv);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a mapped tte from its address alias chain.
|
||||
*/
|
||||
void
|
||||
pv_remove(pmap_t pm, vm_page_t m, vm_offset_t va)
|
||||
pv_remove(pmap_t pm, vm_page_t m, struct tte *tp)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
|
||||
if ((pv = pv_lookup(pm, m, va)) != NULL) {
|
||||
m->md.pv_list_count--;
|
||||
pm->pm_stats.resident_count--;
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
|
||||
if (TAILQ_EMPTY(&m->md.pv_list))
|
||||
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
|
||||
pv_free(pv);
|
||||
}
|
||||
STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
|
||||
if (STAILQ_EMPTY(&m->md.tte_list))
|
||||
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
|
||||
tp->tte_pmap->pm_stats.resident_count--;
|
||||
}
|
||||
|
||||
void
|
||||
pv_bit_clear(vm_page_t m, u_long bits)
|
||||
{
|
||||
struct tte *tp;
|
||||
pv_entry_t pv;
|
||||
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
KASSERT(pv->pv_pmap != NULL, ("pv_bit_clear: null pmap"));
|
||||
if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL &&
|
||||
(tp->tte_data & bits) != 0) {
|
||||
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if ((tp->tte_data & bits) != 0) {
|
||||
if ((bits & TD_SW) != 0 &&
|
||||
pmap_track_modified(pv->pv_pmap, pv->pv_va)) {
|
||||
pmap_track_modified(TTE_GET_PMAP(tp),
|
||||
TTE_GET_VA(tp))) {
|
||||
if (tp->tte_data & TD_W)
|
||||
vm_page_dirty(m);
|
||||
}
|
||||
atomic_clear_long(&tp->tte_data, bits);
|
||||
tlb_tte_demap(tp, pv->pv_pmap);
|
||||
tp->tte_data &= ~bits;
|
||||
tlb_tte_demap(tp, TTE_GET_PMAP(tp));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -167,30 +108,27 @@ pv_bit_clear(vm_page_t m, u_long bits)
|
||||
int
|
||||
pv_bit_count(vm_page_t m, u_long bits)
|
||||
{
|
||||
struct tte *tpf;
|
||||
struct tte *tpn;
|
||||
struct tte *tp;
|
||||
pv_entry_t pvf;
|
||||
pv_entry_t pvn;
|
||||
pv_entry_t pv;
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
pvf = pv;
|
||||
if ((tp = STAILQ_FIRST(&m->md.tte_list)) != NULL) {
|
||||
tpf = tp;
|
||||
do {
|
||||
pvn = TAILQ_NEXT(pv, pv_list);
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
|
||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
|
||||
if (!pmap_track_modified(pv->pv_pmap, pv->pv_va))
|
||||
tpn = STAILQ_NEXT(tp, tte_link);
|
||||
STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
|
||||
STAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
|
||||
if (!pmap_track_modified(TTE_GET_PMAP(tp),
|
||||
TTE_GET_VA(tp)))
|
||||
continue;
|
||||
tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va);
|
||||
if (tp != NULL) {
|
||||
if ((tp->tte_data & bits) != 0) {
|
||||
atomic_clear_long(&tp->tte_data, bits);
|
||||
if (++count > 4)
|
||||
break;
|
||||
}
|
||||
if ((tp->tte_data & bits) != 0) {
|
||||
tp->tte_data &= ~bits;
|
||||
if (++count > 4)
|
||||
break;
|
||||
}
|
||||
} while ((pv = pvn) != NULL && pv != pvf);
|
||||
} while ((tp = tpn) != NULL && tp != tpf);
|
||||
}
|
||||
return (count);
|
||||
}
|
||||
@ -199,19 +137,15 @@ int
|
||||
pv_bit_test(vm_page_t m, u_long bits)
|
||||
{
|
||||
struct tte *tp;
|
||||
pv_entry_t pv;
|
||||
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if (bits & (TD_REF | TD_W)) {
|
||||
if (!pmap_track_modified(pv->pv_pmap, pv->pv_va))
|
||||
if (!pmap_track_modified(TTE_GET_PMAP(tp),
|
||||
TTE_GET_VA(tp)))
|
||||
continue;
|
||||
}
|
||||
KASSERT(pv->pv_pmap != NULL, ("pv_bit_test: null pmap"));
|
||||
if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL) {
|
||||
if (atomic_load_long(&tp->tte_data) & bits) {
|
||||
return (TRUE);
|
||||
}
|
||||
}
|
||||
if (tp->tte_data & bits)
|
||||
return (TRUE);
|
||||
}
|
||||
return (FALSE);
|
||||
}
|
||||
@ -224,15 +158,14 @@ pv_bit_test(vm_page_t m, u_long bits)
|
||||
int
|
||||
pv_page_exists(pmap_t pm, vm_page_t m)
|
||||
{
|
||||
pv_entry_t pv;
|
||||
int loops = 0;
|
||||
struct tte *tp;
|
||||
int loops;
|
||||
|
||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
|
||||
if (pv->pv_pmap == pm) {
|
||||
loops = 0;
|
||||
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
|
||||
if (TTE_GET_PMAP(tp) == pm)
|
||||
return (TRUE);
|
||||
}
|
||||
loops++;
|
||||
if (loops >= 16)
|
||||
if (++loops >= 16)
|
||||
break;
|
||||
}
|
||||
return (FALSE);
|
||||
@ -241,38 +174,32 @@ pv_page_exists(pmap_t pm, vm_page_t m)
|
||||
void
|
||||
pv_remove_all(vm_page_t m)
|
||||
{
|
||||
struct pmap *pm;
|
||||
struct tte *tp;
|
||||
pv_entry_t pv;
|
||||
u_long data;
|
||||
vm_offset_t va;
|
||||
|
||||
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
|
||||
("pv_remove_all: illegal for unmanaged page %#lx",
|
||||
VM_PAGE_TO_PHYS(m)));
|
||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va);
|
||||
KASSERT(tp != NULL, ("pv_remove_all: mapping lost"));
|
||||
data = atomic_load_long(&tp->tte_data);
|
||||
if ((data & TD_WIRED) != 0)
|
||||
pv->pv_pmap->pm_stats.wired_count--;
|
||||
if ((data & TD_REF) != 0)
|
||||
while ((tp = STAILQ_FIRST(&m->md.tte_list)) != NULL) {
|
||||
pm = TTE_GET_PMAP(tp);
|
||||
va = TTE_GET_VA(tp);
|
||||
if ((tp->tte_data & TD_WIRED) != 0)
|
||||
pm->pm_stats.wired_count--;
|
||||
if ((tp->tte_data & TD_REF) != 0)
|
||||
vm_page_flag_set(m, PG_REFERENCED);
|
||||
if ((data & TD_W) != 0) {
|
||||
if (pmap_track_modified(pv->pv_pmap, pv->pv_va))
|
||||
if ((tp->tte_data & TD_W) != 0) {
|
||||
if (pmap_track_modified(pm, va))
|
||||
vm_page_dirty(m);
|
||||
}
|
||||
atomic_clear_long(&tp->tte_data, TD_V);
|
||||
tlb_tte_demap(tp, pv->pv_pmap);
|
||||
tp->tte_vpn = 0;
|
||||
tp->tte_data = 0;
|
||||
pv->pv_pmap->pm_stats.resident_count--;
|
||||
m->md.pv_list_count--;
|
||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
|
||||
pmap_cache_remove(pv->pv_m, pv->pv_va);
|
||||
pv_free(pv);
|
||||
tp->tte_data &= ~TD_V;
|
||||
tlb_page_demap(TLB_DTLB | TLB_ITLB, pm, va);
|
||||
STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
|
||||
pm->pm_stats.resident_count--;
|
||||
pmap_cache_remove(m, va);
|
||||
TTE_ZERO(tp);
|
||||
}
|
||||
KASSERT(m->md.pv_list_count == 0,
|
||||
("pv_remove_all: leaking pv entries 0 != %d", m->md.pv_list_count));
|
||||
KASSERT(TAILQ_EMPTY(&m->md.pv_list),
|
||||
KASSERT(STAILQ_EMPTY(&m->md.tte_list),
|
||||
("pv_remove_all: leaking pv entries"));
|
||||
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
|
@ -149,6 +149,12 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long data)
|
||||
if (pm == kernel_pmap) {
|
||||
TSB_STATS_INC(tsb_nenter_k);
|
||||
tp = tsb_kvtotte(va);
|
||||
if ((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0) {
|
||||
pv_insert(pm, m, tp);
|
||||
data |= TD_PV;
|
||||
}
|
||||
if (pmap_cache_enter(m, va) != 0)
|
||||
data |= TD_CV;
|
||||
tp->tte_vpn = TV_VPN(va);
|
||||
tp->tte_data = data;
|
||||
return (tp);
|
||||
@ -189,11 +195,18 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long data)
|
||||
if ((tp->tte_data & TD_REF) != 0)
|
||||
vm_page_flag_set(om, PG_REFERENCED);
|
||||
pmap_cache_remove(om, ova);
|
||||
pv_remove(pm, om, ova);
|
||||
pv_remove(pm, om, tp);
|
||||
}
|
||||
tlb_tte_demap(tp, pm);
|
||||
}
|
||||
|
||||
if ((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0) {
|
||||
pv_insert(pm, m, tp);
|
||||
data |= TD_PV;
|
||||
}
|
||||
if (pmap_cache_enter(m, va) != 0)
|
||||
data |= TD_CV;
|
||||
|
||||
tp->tte_vpn = TV_VPN(va);
|
||||
tp->tte_data = data;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user