Modify the tte format to not include the tlb context number and to store the

virtual page number in a much more convenient way; all in one piece.  This
greatly simplifies the comparison for a matching tte, and allows the fault
handlers to be much simpler due to not having to load wierd masks.
Rewrite the tlb fault handlers to account for the new format.  These are also
written to allow faults on the user tsb inside of the fault handlers; the
kernel fault handler must be aware of this and not clobber the other's
registers.  The faults do not yet occur due to other support that is needed
(and still under my desk).

Bug fixes from:	tmm
This commit is contained in:
Jake Burkholder 2002-02-25 04:56:50 +00:00
parent 13c801713d
commit 3c997c536c
8 changed files with 1265 additions and 575 deletions

View File

@ -31,16 +31,17 @@
#define TLB_SLOT_COUNT 64
#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */
#define TLB_SLOT_TSB_USER_PRIMARY 61
#define TLB_SLOT_TSB_USER_SECONDARY 62
#define TLB_SLOT_TSB_KERNEL_MIN 62 /* XXX */
#define TLB_SLOT_KERNEL 63
#define TLB_DAR_SLOT_SHIFT (3)
#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK)
#define TLB_TAR_CTX(ctx) ((ctx) & PAGE_MASK)
#define TAR_VPN_SHIFT (13)
#define TAR_CTX_MASK ((1 << TAR_VPN_SHIFT) - 1)
#define TLB_TAR_VA(va) ((va) & ~TAR_CTX_MASK)
#define TLB_TAR_CTX(ctx) ((ctx) & TAR_CTX_MASK)
#define TLB_DEMAP_ID_SHIFT (4)
#define TLB_DEMAP_ID_PRIMARY (0)
@ -222,9 +223,9 @@ tlb_range_demap(u_int ctx, vm_offset_t start, vm_offset_t end)
}
static __inline void
tlb_tte_demap(struct tte tte, vm_offset_t va)
tlb_tte_demap(struct tte tte, u_int ctx)
{
tlb_page_demap(TD_GET_TLB(tte.tte_data), TT_GET_CTX(tte.tte_tag), va);
tlb_page_demap(TD_GET_TLB(tte.tte_data), ctx, TV_GET_VA(tte.tte_vpn));
}
static __inline void

View File

@ -34,20 +34,6 @@
#define TTE_SHIFT (4)
#define TT_CTX_SHIFT (48)
#define TT_VA_SHIFT (22)
#define TT_VPN_SHIFT (9)
#define TT_CTX_SIZE (13)
#define TT_VA_SIZE (42)
#define TT_CTX_MASK ((1UL << TT_CTX_SIZE) - 1)
#define TT_VA_MASK ((1UL << TT_VA_SIZE) - 1)
#define TT_G (1UL << 63)
#define TT_CTX(ctx) (((u_long)(ctx) & TT_CTX_MASK) << TT_CTX_SHIFT)
#define TT_VA(va) ((u_long)(va) >> TT_VA_SHIFT)
#define TD_SIZE_SHIFT (61)
#define TD_SOFT2_SHIFT (50)
#define TD_DIAG_SHIFT (41)
@ -66,9 +52,6 @@
#define TD_PA_MASK (((1UL << TD_PA_SIZE) - 1) << TD_PA_SHIFT)
#define TD_SOFT_MASK (((1UL << TD_SOFT_SIZE) - 1) << TD_SOFT_SHIFT)
#define TD_VA_LOW_SHIFT TD_SOFT2_SHIFT
#define TD_VA_LOW_MASK TD_SOFT2_MASK
#define TS_EXEC (1UL << 4)
#define TS_REF (1UL << 3)
#define TS_PV (1UL << 2)
@ -82,8 +65,6 @@
#define TD_4M (3UL << TD_SIZE_SHIFT)
#define TD_NFO (1UL << 60)
#define TD_IE (1UL << 59)
#define TD_VPN_LOW(vpn) ((vpn << TD_SOFT2_SHIFT) & TD_SOFT2_MASK)
#define TD_VA_LOW(va) (TD_VPN_LOW((va) >> PAGE_SHIFT))
#define TD_PA(pa) ((pa) & TD_PA_MASK)
#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT)
#define TD_REF (TS_REF << TD_SOFT_SHIFT)
@ -98,35 +79,28 @@
#define TD_W (1UL << 1)
#define TD_G (1UL << 0)
#define TT_GET_CTX(tag) (((tag) >> TT_CTX_SHIFT) & TT_CTX_MASK)
#define TV_VPN(va) ((va) >> PAGE_SHIFT)
#define TD_GET_SIZE(d) (((d) >> TD_SIZE_SHIFT) & 3)
#define TD_GET_PA(d) ((d) & TD_PA_MASK)
#define TD_GET_TLB(d) (((d) & TD_EXEC) ? (TLB_DTLB | TLB_ITLB) : TLB_DTLB)
#define TV_GET_VA(vpn) ((vpn) << PAGE_SHIFT)
struct tte {
u_long tte_tag;
u_long tte_vpn;
u_long tte_data;
};
static __inline vm_offset_t
tte_get_vpn(struct tte tte)
static __inline int
tte_match_vpn(struct tte tte, vm_offset_t vpn)
{
return (((tte.tte_tag & TT_VA_MASK) << TT_VPN_SHIFT) |
((tte.tte_data & TD_VA_LOW_MASK) >> TD_VA_LOW_SHIFT));
}
static __inline vm_offset_t
tte_get_va(struct tte tte)
{
return (tte_get_vpn(tte) << PAGE_SHIFT);
return ((tte.tte_data & TD_V) != 0 && tte.tte_vpn == vpn);
}
static __inline int
tte_match(struct tte tte, vm_offset_t va)
{
return ((tte.tte_data & TD_V) != 0 &&
((tte.tte_tag ^ TT_VA(va)) & TT_VA_MASK) == 0 &&
((tte.tte_data ^ TD_VA_LOW(va)) & TD_VA_LOW_MASK) == 0);
return (tte_match_vpn(tte, va >> PAGE_SHIFT));
}
#endif /* !_MACHINE_TTE_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -83,7 +83,8 @@ ASSYM(FPRS_FEF, FPRS_FEF);
ASSYM(LSU_VW, LSU_VW);
ASSYM(TLB_DAR_TSB_USER_PRIMARY, TLB_DAR_SLOT(TLB_SLOT_TSB_USER_PRIMARY));
ASSYM(TAR_VPN_SHIFT, TAR_VPN_SHIFT);
ASSYM(TLB_DEMAP_NUCLEUS, TLB_DEMAP_NUCLEUS);
ASSYM(TLB_DEMAP_SECONDARY, TLB_DEMAP_SECONDARY);
ASSYM(TLB_DEMAP_CONTEXT, TLB_DEMAP_CONTEXT);
@ -94,7 +95,7 @@ ASSYM(TLB_ITLB, TLB_ITLB);
ASSYM(TSB_BUCKET_ADDRESS_BITS, TSB_BUCKET_ADDRESS_BITS);
ASSYM(TSB_BUCKET_SHIFT, TSB_BUCKET_SHIFT);
ASSYM(TSB_KERNEL_VA_MASK, TSB_KERNEL_VA_MASK);
ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_MASK, PAGE_MASK);
@ -127,22 +128,16 @@ ASSYM(KTR_PARM4, offsetof(struct ktr_entry, ktr_parm4));
ASSYM(KTR_PARM5, offsetof(struct ktr_entry, ktr_parm5));
ASSYM(KTR_PARM6, offsetof(struct ktr_entry, ktr_parm6));
ASSYM(TTE_VPN, offsetof(struct tte, tte_vpn));
ASSYM(TTE_DATA, offsetof(struct tte, tte_data));
ASSYM(TTE_TAG, offsetof(struct tte, tte_tag));
ASSYM(TTE_SHIFT, TTE_SHIFT);
ASSYM(TD_VA_LOW_MASK, TD_VA_LOW_MASK);
ASSYM(TD_VA_LOW_SHIFT, TD_VA_LOW_SHIFT);
ASSYM(TD_EXEC, TD_EXEC);
ASSYM(TD_REF, TD_REF);
ASSYM(TD_SW, TD_SW);
ASSYM(TD_L, TD_L);
ASSYM(TD_W, TD_W);
ASSYM(TT_VA_MASK, TT_VA_MASK);
ASSYM(TT_VA_SHIFT, TT_VA_SHIFT);
ASSYM(TT_CTX_SHIFT, TT_CTX_SHIFT);
ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));

View File

@ -384,7 +384,7 @@ pmap_bootstrap(vm_offset_t ekva)
off += PAGE_SIZE) {
va = translations[i].om_start + off;
tte.tte_data = translations[i].om_tte + off;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_vpn = TV_VPN(va);
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP,
"mapping: va=%#lx tp=%p tte=%#lx pa=%#lx",
@ -432,9 +432,9 @@ pmap_map_tsb(void)
for (i = 0; i < KVA_PAGES; i++) {
va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M;
pa = tsb_kernel_phys + i * PAGE_SIZE_4M;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) |
TD_L | TD_CP | TD_CV | TD_P | TD_W;
tte.tte_vpn = TV_VPN(va);
tte.tte_data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
TD_CV | TD_P | TD_W;
tlb_store_slot(TLB_DTLB, va, TLB_CTX_KERNEL, tte,
TLB_SLOT_TSB_KERNEL_MIN + i);
}
@ -633,9 +633,9 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
struct tte tte;
struct tte *tp;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
tte.tte_vpn = TV_VPN(va);
tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP |
TD_CV | TD_P | TD_W;
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
va, pa, tp, tp->tte_data);
@ -657,9 +657,8 @@ pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags)
struct tte tte;
struct tte *tp;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_REF | TD_P | flags;
tte.tte_vpn = TV_VPN(va);
tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
va, pa, tp, tp->tte_data);
@ -691,7 +690,7 @@ pmap_kremove(vm_offset_t va)
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
tp->tte_data);
atomic_clear_long(&tp->tte_data, TD_V);
tp->tte_tag = 0;
tp->tte_vpn = 0;
tp->tte_data = 0;
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
}
@ -1224,7 +1223,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t v
pmap_cache_remove(m, va);
}
atomic_clear_long(&tp->tte_data, TD_V);
tp->tte_tag = 0;
tp->tte_vpn = 0;
tp->tte_data = 0;
if (PMAP_REMOVE_DONE(pm))
return (0);
@ -1339,8 +1338,8 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
"pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
pm->pm_context, m, va, pa, prot, wired);
tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_CP;
tte.tte_vpn = TV_VPN(va);
tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP;
/*
* If there is an existing mapping, and the physical address has not
@ -1385,7 +1384,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_track_modified(pm, va))
vm_page_dirty(m);
}
tlb_tte_demap(otte, va);
tlb_tte_demap(otte, pm->pm_context);
}
} else {
CTR0(KTR_PMAP, "pmap_enter: replace");
@ -1416,7 +1415,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_cache_enter(m, va) != 0)
tte.tte_data |= TD_CV;
}
tlb_tte_demap(otte, va);
tlb_tte_demap(otte, pm->pm_context);
}
} else {
CTR0(KTR_PMAP, "pmap_enter: new");
@ -1511,8 +1510,9 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
vm_page_t m;
if (tsb_tte_lookup(dst_pmap, va) == NULL) {
tte.tte_data = tp->tte_data & ~(TD_PV | TD_REF | TD_CV | TD_W);
tte.tte_tag = TT_CTX(dst_pmap->pm_context) | TT_VA(va);
tte.tte_data = tp->tte_data &
~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
tte.tte_vpn = TV_VPN(va);
m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
if ((tp->tte_data & TD_PV) != 0) {
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
@ -1634,7 +1634,7 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
continue;
atomic_clear_long(&tp->tte_data, TD_V);
tp->tte_tag = 0;
tp->tte_vpn = 0;
tp->tte_data = 0;
m = pv->pv_m;

View File

@ -157,7 +157,7 @@ pv_bit_clear(vm_page_t m, u_long bits)
vm_page_dirty(m);
}
atomic_clear_long(&tp->tte_data, bits);
tlb_tte_demap(*tp, pv->pv_va);
tlb_tte_demap(*tp, pv->pv_pmap->pm_context);
}
}
}
@ -250,8 +250,8 @@ pv_remove_all(vm_page_t m)
vm_page_dirty(m);
}
atomic_clear_long(&tp->tte_data, TD_V);
tlb_tte_demap(*tp, pv->pv_va);
tp->tte_tag = 0;
tlb_tte_demap(*tp, pv->pv_pmap->pm_context);
tp->tte_vpn = 0;
tp->tte_data = 0;
pv->pv_pmap->pm_stats.resident_count--;
m->md.pv_list_count--;

View File

@ -178,7 +178,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
tp = rtp;
if ((tp->tte_data & TD_V) != 0) {
TSB_STATS_INC(tsb_nrepl);
ova = tte_get_va(*tp);
ova = TV_GET_VA(tp->tte_vpn);
if ((tp->tte_data & TD_PV) != 0) {
om = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
if ((tp->tte_data & TD_W) != 0 &&
@ -189,7 +189,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
pmap_cache_remove(om, ova);
pv_remove(pm, om, ova);
}
tlb_tte_demap(*tp, ova);
tlb_tte_demap(*tp, pm->pm_context);
}
*tp = tte;
@ -218,7 +218,7 @@ tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
for (i = 0; i < TSB_SIZE; i++) {
tp = &pm1->pm_tsb[i];
if ((tp->tte_data & TD_V) != 0) {
va = tte_get_va(*tp);
va = TV_GET_VA(tp->tte_vpn);
if (va >= start && va < end) {
if (!callback(pm1, pm2, tp, va))
break;