Make a macro for the guts of tl0_immu_miss, like dmmu_miss and prot.

Rearrange things slightly so that the contents of the tag access
register are read and restored outside of the macros.  The intention
is to pass the page size to look up as an argument to the macros.
This commit is contained in:
Jake Burkholder 2002-05-08 04:14:16 +00:00
parent 76fdd728b6
commit ea4ad4bb29
2 changed files with 152 additions and 102 deletions

View File

@ -543,19 +543,7 @@ END(tl0_sfsr_trap)
.align 32 .align 32
.endm .endm
.macro tl0_immu_miss .macro immu_miss_user
/*
* Force kernel store order.
*/
wrpr %g0, PSTATE_MMU, %pstate
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_IMMU, %asi
ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
/* /*
* Extract the virtual page number from the contents of the tag * Extract the virtual page number from the contents of the tag
* access register. * access register.
@ -623,13 +611,22 @@ END(tl0_sfsr_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0 andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b bnz,a,pt %xcc, 1b
nop nop
.endm
.macro tl0_immu_miss
/*
* Force kernel store order.
*/
wrpr %g0, PSTATE_MMU, %pstate
/* /*
* Put back the contents of the tag access register, in case we * Load the virtual page number and context from the tag access
* faulted. * register. We ignore the context.
*/ */
stxa %g2, [%g0 + AA_IMMU_TAR] %asi wr %g0, ASI_IMMU, %asi
membar #Sync ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
immu_miss_user
b,a %xcc, tl0_immu_miss_trap b,a %xcc, tl0_immu_miss_trap
nop nop
@ -657,6 +654,13 @@ ENTRY(tl0_immu_miss_set_ref)
END(tl0_immu_miss_set_ref) END(tl0_immu_miss_set_ref)
ENTRY(tl0_immu_miss_trap) ENTRY(tl0_immu_miss_trap)
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_IMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -677,13 +681,6 @@ ENTRY(tl0_immu_miss_trap)
END(tl0_immu_miss_trap) END(tl0_immu_miss_trap)
.macro dmmu_miss_user .macro dmmu_miss_user
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Extract the virtual page number from the contents of the tag * Extract the virtual page number from the contents of the tag
* access register. * access register.
@ -748,13 +745,6 @@ END(tl0_immu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0 andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b bnz,a,pt %xcc, 1b
nop nop
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
.endm .endm
ENTRY(dmmu_miss_user_set_ref) ENTRY(dmmu_miss_user_set_ref)
@ -783,6 +773,13 @@ END(dmmu_miss_user_set_ref)
*/ */
wrpr %g0, PSTATE_MMU, %pstate wrpr %g0, PSTATE_MMU, %pstate
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Try a fast inline lookup of the primary tsb. * Try a fast inline lookup of the primary tsb.
*/ */
@ -797,6 +794,13 @@ END(dmmu_miss_user_set_ref)
.endm .endm
ENTRY(tl0_dmmu_miss_trap) ENTRY(tl0_dmmu_miss_trap)
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -817,13 +821,6 @@ ENTRY(tl0_dmmu_miss_trap)
END(tl0_dmmu_miss_trap) END(tl0_dmmu_miss_trap)
.macro dmmu_prot_user .macro dmmu_prot_user
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Extract the virtual page number from the contents of the tag * Extract the virtual page number from the contents of the tag
* access register. * access register.
@ -879,13 +876,6 @@ END(tl0_dmmu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0 andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b bnz,a,pt %xcc, 1b
nop nop
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
.endm .endm
.macro tl0_dmmu_prot .macro tl0_dmmu_prot
@ -894,6 +884,13 @@ END(tl0_dmmu_miss_trap)
*/ */
wrpr %g0, PSTATE_MMU, %pstate wrpr %g0, PSTATE_MMU, %pstate
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Try a fast inline lookup of the tsb. * Try a fast inline lookup of the tsb.
*/ */
@ -936,6 +933,13 @@ ENTRY(dmmu_prot_set_w)
END(dmmu_prot_set_w) END(dmmu_prot_set_w)
ENTRY(tl0_dmmu_prot_trap) ENTRY(tl0_dmmu_prot_trap)
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -1330,13 +1334,14 @@ END(tl1_immu_miss_trap)
* the virtual page number. * the virtual page number.
*/ */
sllx %g6, 64 - TAR_VPN_SHIFT, %g5 sllx %g6, 64 - TAR_VPN_SHIFT, %g5
brnz,pn %g5, tl1_dmmu_miss_user brnz,a,pn %g5, tl1_dmmu_miss_user
srlx %g6, TAR_VPN_SHIFT, %g6 mov %g6, %g2
/* /*
* Find the index into the kernel tsb. * Find the index into the kernel tsb.
*/ */
set TSB_KERNEL_MASK, %g4 set TSB_KERNEL_MASK, %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g3 and %g6, %g4, %g3
/* /*
@ -1403,6 +1408,13 @@ ENTRY(tl1_dmmu_miss_user)
*/ */
dmmu_miss_user dmmu_miss_user
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -1438,13 +1450,14 @@ END(tl1_dmmu_miss_user)
* the virtual page number. * the virtual page number.
*/ */
sllx %g6, 64 - TAR_VPN_SHIFT, %g5 sllx %g6, 64 - TAR_VPN_SHIFT, %g5
brnz,pn %g5, tl1_dmmu_prot_user brnz,a,pn %g5, tl1_dmmu_prot_user
srlx %g6, TAR_VPN_SHIFT, %g6 mov %g6, %g2
/* /*
* Find the index into the kernel tsb. * Find the index into the kernel tsb.
*/ */
set TSB_KERNEL_MASK, %g4 set TSB_KERNEL_MASK, %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g5 and %g6, %g4, %g5
/* /*
@ -1479,6 +1492,12 @@ END(tl1_dmmu_miss_user)
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync membar #Sync
ba,a %xcc, tl1_dmmu_prot_cont
nop
.align 128
.endm
ENTRY(tl1_dmmu_prot_cont)
/* /*
* Set the hardware write bit. * Set the hardware write bit.
*/ */
@ -1490,8 +1509,7 @@ END(tl1_dmmu_miss_user)
or %g5, TD_W, %g5 or %g5, TD_W, %g5
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry retry
.align 128 END(tl1_dmmu_prot_cont)
.endm
ENTRY(tl1_dmmu_prot_user) ENTRY(tl1_dmmu_prot_user)
/* /*
@ -1499,6 +1517,13 @@ ENTRY(tl1_dmmu_prot_user)
*/ */
dmmu_prot_user dmmu_prot_user
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */

View File

@ -543,19 +543,7 @@ END(tl0_sfsr_trap)
.align 32 .align 32
.endm .endm
.macro tl0_immu_miss .macro immu_miss_user
/*
* Force kernel store order.
*/
wrpr %g0, PSTATE_MMU, %pstate
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_IMMU, %asi
ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
/* /*
* Extract the virtual page number from the contents of the tag * Extract the virtual page number from the contents of the tag
* access register. * access register.
@ -623,13 +611,22 @@ END(tl0_sfsr_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0 andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b bnz,a,pt %xcc, 1b
nop nop
.endm
.macro tl0_immu_miss
/*
* Force kernel store order.
*/
wrpr %g0, PSTATE_MMU, %pstate
/* /*
* Put back the contents of the tag access register, in case we * Load the virtual page number and context from the tag access
* faulted. * register. We ignore the context.
*/ */
stxa %g2, [%g0 + AA_IMMU_TAR] %asi wr %g0, ASI_IMMU, %asi
membar #Sync ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
immu_miss_user
b,a %xcc, tl0_immu_miss_trap b,a %xcc, tl0_immu_miss_trap
nop nop
@ -657,6 +654,13 @@ ENTRY(tl0_immu_miss_set_ref)
END(tl0_immu_miss_set_ref) END(tl0_immu_miss_set_ref)
ENTRY(tl0_immu_miss_trap) ENTRY(tl0_immu_miss_trap)
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_IMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -677,13 +681,6 @@ ENTRY(tl0_immu_miss_trap)
END(tl0_immu_miss_trap) END(tl0_immu_miss_trap)
.macro dmmu_miss_user .macro dmmu_miss_user
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Extract the virtual page number from the contents of the tag * Extract the virtual page number from the contents of the tag
* access register. * access register.
@ -748,13 +745,6 @@ END(tl0_immu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0 andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b bnz,a,pt %xcc, 1b
nop nop
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
.endm .endm
ENTRY(dmmu_miss_user_set_ref) ENTRY(dmmu_miss_user_set_ref)
@ -783,6 +773,13 @@ END(dmmu_miss_user_set_ref)
*/ */
wrpr %g0, PSTATE_MMU, %pstate wrpr %g0, PSTATE_MMU, %pstate
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Try a fast inline lookup of the primary tsb. * Try a fast inline lookup of the primary tsb.
*/ */
@ -797,6 +794,13 @@ END(dmmu_miss_user_set_ref)
.endm .endm
ENTRY(tl0_dmmu_miss_trap) ENTRY(tl0_dmmu_miss_trap)
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -817,13 +821,6 @@ ENTRY(tl0_dmmu_miss_trap)
END(tl0_dmmu_miss_trap) END(tl0_dmmu_miss_trap)
.macro dmmu_prot_user .macro dmmu_prot_user
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Extract the virtual page number from the contents of the tag * Extract the virtual page number from the contents of the tag
* access register. * access register.
@ -879,13 +876,6 @@ END(tl0_dmmu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0 andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b bnz,a,pt %xcc, 1b
nop nop
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
.endm .endm
.macro tl0_dmmu_prot .macro tl0_dmmu_prot
@ -894,6 +884,13 @@ END(tl0_dmmu_miss_trap)
*/ */
wrpr %g0, PSTATE_MMU, %pstate wrpr %g0, PSTATE_MMU, %pstate
/*
* Load the virtual page number and context from the tag access
* register. We ignore the context.
*/
wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/* /*
* Try a fast inline lookup of the tsb. * Try a fast inline lookup of the tsb.
*/ */
@ -936,6 +933,13 @@ ENTRY(dmmu_prot_set_w)
END(dmmu_prot_set_w) END(dmmu_prot_set_w)
ENTRY(tl0_dmmu_prot_trap) ENTRY(tl0_dmmu_prot_trap)
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -1330,13 +1334,14 @@ END(tl1_immu_miss_trap)
* the virtual page number. * the virtual page number.
*/ */
sllx %g6, 64 - TAR_VPN_SHIFT, %g5 sllx %g6, 64 - TAR_VPN_SHIFT, %g5
brnz,pn %g5, tl1_dmmu_miss_user brnz,a,pn %g5, tl1_dmmu_miss_user
srlx %g6, TAR_VPN_SHIFT, %g6 mov %g6, %g2
/* /*
* Find the index into the kernel tsb. * Find the index into the kernel tsb.
*/ */
set TSB_KERNEL_MASK, %g4 set TSB_KERNEL_MASK, %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g3 and %g6, %g4, %g3
/* /*
@ -1403,6 +1408,13 @@ ENTRY(tl1_dmmu_miss_user)
*/ */
dmmu_miss_user dmmu_miss_user
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */
@ -1438,13 +1450,14 @@ END(tl1_dmmu_miss_user)
* the virtual page number. * the virtual page number.
*/ */
sllx %g6, 64 - TAR_VPN_SHIFT, %g5 sllx %g6, 64 - TAR_VPN_SHIFT, %g5
brnz,pn %g5, tl1_dmmu_prot_user brnz,a,pn %g5, tl1_dmmu_prot_user
srlx %g6, TAR_VPN_SHIFT, %g6 mov %g6, %g2
/* /*
* Find the index into the kernel tsb. * Find the index into the kernel tsb.
*/ */
set TSB_KERNEL_MASK, %g4 set TSB_KERNEL_MASK, %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g5 and %g6, %g4, %g5
/* /*
@ -1479,6 +1492,12 @@ END(tl1_dmmu_miss_user)
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync membar #Sync
ba,a %xcc, tl1_dmmu_prot_cont
nop
.align 128
.endm
ENTRY(tl1_dmmu_prot_cont)
/* /*
* Set the hardware write bit. * Set the hardware write bit.
*/ */
@ -1490,8 +1509,7 @@ END(tl1_dmmu_miss_user)
or %g5, TD_W, %g5 or %g5, TD_W, %g5
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry retry
.align 128 END(tl1_dmmu_prot_cont)
.endm
ENTRY(tl1_dmmu_prot_user) ENTRY(tl1_dmmu_prot_user)
/* /*
@ -1499,6 +1517,13 @@ ENTRY(tl1_dmmu_prot_user)
*/ */
dmmu_prot_user dmmu_prot_user
/*
* Put back the contents of the tag access register, in case we
* faulted.
*/
stxa %g2, [%g0 + AA_DMMU_TAR] %asi
membar #Sync
/* /*
* Switch to alternate globals. * Switch to alternate globals.
*/ */