Remove the tlb argument to tlb_page_demap (itlb or dtlb), in order to better

match the pmap_invalidate api.
This commit is contained in:
jake 2002-07-26 15:54:04 +00:00
parent 11df7e3f6f
commit dc1ed5c34f
9 changed files with 21 additions and 60 deletions

View File

@ -64,7 +64,6 @@ struct ipi_cache_args {
struct ipi_tlb_args {
u_int ita_mask;
u_long ita_tlb;
struct pmap *ita_pmap;
u_long ita_start;
u_long ita_end;
@ -157,7 +156,7 @@ ipi_tlb_context_demap(struct pmap *pm)
}
static __inline void *
ipi_tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
struct ipi_tlb_args *ita;
u_int cpus;
@ -168,7 +167,6 @@ ipi_tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
return (NULL);
ita = &ipi_tlb_args;
ita->ita_mask = cpus | PCPU_GET(cpumask);
ita->ita_tlb = tlb;
ita->ita_pmap = pm;
ita->ita_va = va;
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
@ -229,7 +227,7 @@ ipi_tlb_context_demap(struct pmap *pm)
}
static __inline void *
ipi_tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
return (NULL);
}

View File

@ -62,9 +62,6 @@
#define TLB_CTX_USER_MIN (1)
#define TLB_CTX_USER_MAX (8192)
#define TLB_DTLB (1 << 0)
#define TLB_ITLB (1 << 1)
#define MMU_SFSR_ASI_SHIFT (16)
#define MMU_SFSR_FT_SHIFT (7)
#define MMU_SFSR_E_SHIFT (6)
@ -88,11 +85,8 @@ extern struct tlb_entry *kernel_tlbs;
extern int tlb_slot_count;
void tlb_context_demap(struct pmap *pm);
void tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va);
void tlb_page_demap(struct pmap *pm, vm_offset_t va);
void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end);
void tlb_dump(void);
#define tlb_tte_demap(tp, pm) \
tlb_page_demap(TTE_GET_TLB(tp), pm, TTE_GET_VA(tp))
#endif /* !_MACHINE_TLB_H_ */

View File

@ -94,8 +94,6 @@
#define TTE_GET_PA(tp) \
((tp)->tte_data & (TD_PA_MASK << TD_PA_SHIFT))
#define TTE_GET_TLB(tp) \
(((tp)->tte_data & TD_EXEC) ? (TLB_DTLB | TLB_ITLB) : TLB_DTLB)
#define TTE_GET_VA(tp) \
((tp)->tte_vpn << PAGE_SHIFT)
#define TTE_GET_PMAP(tp) \

View File

@ -94,9 +94,6 @@ ASSYM(TLB_DEMAP_PRIMARY, TLB_DEMAP_PRIMARY);
ASSYM(TLB_DEMAP_CONTEXT, TLB_DEMAP_CONTEXT);
ASSYM(TLB_DEMAP_PAGE, TLB_DEMAP_PAGE);
ASSYM(TLB_DTLB, TLB_DTLB);
ASSYM(TLB_ITLB, TLB_ITLB);
ASSYM(TSB_BUCKET_ADDRESS_BITS, TSB_BUCKET_ADDRESS_BITS);
ASSYM(TSB_BUCKET_SHIFT, TSB_BUCKET_SHIFT);
ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
@ -197,7 +194,6 @@ ASSYM(IR_PRI, offsetof(struct intr_request, ir_pri));
ASSYM(IR_VEC, offsetof(struct intr_request, ir_vec));
ASSYM(ITA_MASK, offsetof(struct ipi_tlb_args, ita_mask));
ASSYM(ITA_TLB, offsetof(struct ipi_tlb_args, ita_tlb));
ASSYM(ITA_PMAP, offsetof(struct ipi_tlb_args, ita_pmap));
ASSYM(ITA_START, offsetof(struct ipi_tlb_args, ita_start));
ASSYM(ITA_END, offsetof(struct ipi_tlb_args, ita_end));

View File

@ -167,23 +167,14 @@ ENTRY(tl_ipi_tlb_page_demap)
cmp %g1, %g2
movne %xcc, TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, %g3
ldx [%g5 + ITA_TLB], %g1
ldx [%g5 + ITA_VA], %g2
or %g2, %g3, %g2
andcc %g1, TLB_DTLB, %g0
bz,a,pn %xcc, 1f
nop
stxa %g0, [%g2] ASI_DMMU_DEMAP
membar #Sync
1: andcc %g1, TLB_ITLB, %g0
bz,a,pn %xcc, 2f
nop
stxa %g0, [%g2] ASI_IMMU_DEMAP
membar #Sync
2: IPI_WAIT(%g5, %g1, %g2, %g3)
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tlb_page_demap)

View File

@ -167,23 +167,14 @@ ENTRY(tl_ipi_tlb_page_demap)
cmp %g1, %g2
movne %xcc, TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, %g3
ldx [%g5 + ITA_TLB], %g1
ldx [%g5 + ITA_VA], %g2
or %g2, %g3, %g2
andcc %g1, TLB_DTLB, %g0
bz,a,pn %xcc, 1f
nop
stxa %g0, [%g2] ASI_DMMU_DEMAP
membar #Sync
1: andcc %g1, TLB_ITLB, %g0
bz,a,pn %xcc, 2f
nop
stxa %g0, [%g2] ASI_IMMU_DEMAP
membar #Sync
2: IPI_WAIT(%g5, %g1, %g2, %g3)
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tlb_page_demap)

View File

@ -602,8 +602,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
CTR0(KTR_PMAP, "pmap_cache_enter: marking uncacheable");
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
tp->tte_data &= ~TD_CV;
tlb_page_demap(TLB_DTLB | TLB_ITLB, TTE_GET_PMAP(tp),
TTE_GET_VA(tp));
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
dcache_page_inval(VM_PAGE_TO_PHYS(m));
m->md.flags |= PG_UNCACHEABLE;
@ -630,8 +629,7 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
return;
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
tp->tte_data |= TD_CV;
tlb_page_demap(TLB_DTLB | TLB_ITLB, TTE_GET_PMAP(tp),
TTE_GET_VA(tp));
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
m->md.flags &= ~PG_UNCACHEABLE;
}
@ -658,7 +656,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
STAILQ_REMOVE(&om->md.tte_list, tp, tte, tte_link);
pmap_cache_remove(om, ova);
if (va != ova)
tlb_page_demap(TLB_DTLB, kernel_pmap, ova);
tlb_page_demap(kernel_pmap, ova);
}
data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_P | TD_W;
if (pmap_cache_enter(m, va) != 0)
@ -839,7 +837,7 @@ pmap_new_thread(struct thread *td)
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (KSTACK_GUARD_PAGES != 0) {
tlb_page_demap(TLB_DTLB, kernel_pmap, ks);
tlb_page_demap(kernel_pmap, ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
}
td->td_kstack = ks;
@ -1155,7 +1153,7 @@ pmap_remove_all(vm_page_t m)
pmap_track_modified(pm, va))
vm_page_dirty(m);
tp->tte_data &= ~TD_V;
tlb_page_demap(TLB_DTLB | TLB_ITLB, pm, va);
tlb_page_demap(pm, va);
STAILQ_REMOVE(&m->md.tte_list, tp, tte, tte_link);
pm->pm_stats.resident_count--;
pmap_cache_remove(m, va);
@ -1291,7 +1289,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Delete the old mapping.
*/
tlb_tte_demap(tp, pm);
tlb_page_demap(pm, TTE_GET_VA(tp));
} else {
/*
* If there is an existing mapping, but its for a different
@ -1301,7 +1299,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
CTR0(KTR_PMAP, "pmap_enter: replace");
PMAP_STATS_INC(pmap_enter_nreplace);
pmap_remove_tte(pm, NULL, tp, va);
tlb_page_demap(TLB_DTLB | TLB_ITLB, pm, va);
tlb_page_demap(pm, va);
} else {
CTR0(KTR_PMAP, "pmap_enter: new");
PMAP_STATS_INC(pmap_enter_nnew);
@ -1608,7 +1606,7 @@ pmap_clear_modify(vm_page_t m)
continue;
if ((tp->tte_data & TD_W) != 0) {
tp->tte_data &= ~TD_W;
tlb_tte_demap(tp, TTE_GET_PMAP(tp));
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
}
@ -1625,7 +1623,7 @@ pmap_clear_reference(vm_page_t m)
continue;
if ((tp->tte_data & TD_REF) != 0) {
tp->tte_data &= ~TD_REF;
tlb_tte_demap(tp, TTE_GET_PMAP(tp));
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
}
@ -1646,7 +1644,7 @@ pmap_clear_write(vm_page_t m)
TTE_GET_VA(tp)))
vm_page_dirty(m);
tp->tte_data &= ~(TD_SW | TD_W);
tlb_tte_demap(tp, TTE_GET_PMAP(tp));
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
}
}

View File

@ -81,14 +81,14 @@ tlb_context_demap(struct pmap *pm)
}
void
tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
u_long flags;
void *cookie;
u_long s;
critical_enter();
cookie = ipi_tlb_page_demap(tlb, pm, va);
cookie = ipi_tlb_page_demap(pm, va);
if (pm->pm_active & PCPU_GET(cpumask)) {
KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
("tlb_page_demap: inactive pmap?"));
@ -98,14 +98,9 @@ tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
s = intr_disable();
if (tlb & TLB_DTLB) {
stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
membar(Sync);
}
if (tlb & TLB_ITLB) {
stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
membar(Sync);
}
stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
membar(Sync);
intr_restore(s);
}
ipi_wait(cookie);

View File

@ -163,7 +163,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long data)
TSB_STATS_INC(tsb_nrepl);
ova = TTE_GET_VA(tp);
pmap_remove_tte(pm, NULL, tp, ova);
tlb_page_demap(TLB_DTLB | TLB_ITLB, pm, ova);
tlb_page_demap(pm, ova);
}
enter: