Fix a (new) source of instability:

When interrupting a kernel context, we don't need to switch stacks
(memory nor register). As such, we were also not restoring the
register stack pointer (ar.bspstore). This, however, fails to be
valid in 1 situation: when we interrupt a register stack switch as
is being done in restorectx(). The problem is that restorectx()
needs to have ar.bsp == ar.bspstore before it can assign the new
value to ar.bspstore. This is achieved by doing a loadrs prior to
assigning to ar.bspstore. If we take an interrupt in between the
loadrs and the assignment and we don't make sure we restore the
ar.bspstore prior to returning from the interrupt, we switch
stacks with possibly non-zero dirty registers, which means that
the new frame pointer (ar.bsp) will be invalid.

So, instead of jumping over the restoration of the register frame
pointer and related registers, we conditionalize it based on whether
we return to kernel context or user context. A future performance
tweak is possible by only restoring ar.bspstore when returning to
kernel mode *and* when the RSE is in enforced lazy mode. One cannot
assume ar.bsp == ar.bspstore if the RSE is not in enforced lazy mode
anyway.

While here (well, not quite) don't unconditionally assign to
ar.bspstore in exception_save. Only do that when we actually switch
stacks. It can only harm us to do it unconditionally.

Approved by: re@ (blanket)
This commit is contained in:
Marcel Moolenaar 2003-05-23 23:55:31 +00:00
parent 59a47b31d0
commit ca125f9c17
2 changed files with 14 additions and 16 deletions

View File

@ -166,7 +166,7 @@ exception_save_restart:
// r20=bspstore, r22=iip, r23=ipsr
{ .mmi
st8 [r31]=r23,16 // psr
mov ar.bspstore=r20
(p13) mov ar.bspstore=r20
nop 0
;;
}
@ -505,7 +505,7 @@ ENTRY(exception_restore, 0)
{ .mmb
ld8 r26=[r30] // cfm
ld8 r19=[r31] // ip
(p14) br.cond.sptk 1f
nop 0
;;
}
{ .mib
@ -522,8 +522,8 @@ ENTRY(exception_restore, 0)
// the backing store.
{ .mmi
mov ar.rsc=r31 // setup for loadrs
mov ar.k7=r16
mov r13=r29
(p15) mov ar.k7=r16
(p15) mov r13=r29
;;
}
exception_restore_restart:
@ -535,19 +535,18 @@ exception_restore_restart:
;;
}
{ .mmi
mov r31=ar.bspstore
(p15) mov r31=ar.bspstore
;;
mov ar.bspstore=r20
dep r31=0,r31,0,9
(p15) dep r31=0,r31,0,9
;;
}
{ .mmb
mov ar.k6=r31
(p15) mov ar.k6=r31
mov ar.rnat=r21
nop 0
;;
}
1:
{ .mmb
mov ar.unat=r17
mov cr.iip=r19

View File

@ -166,7 +166,7 @@ exception_save_restart:
// r20=bspstore, r22=iip, r23=ipsr
{ .mmi
st8 [r31]=r23,16 // psr
mov ar.bspstore=r20
(p13) mov ar.bspstore=r20
nop 0
;;
}
@ -505,7 +505,7 @@ ENTRY(exception_restore, 0)
{ .mmb
ld8 r26=[r30] // cfm
ld8 r19=[r31] // ip
(p14) br.cond.sptk 1f
nop 0
;;
}
{ .mib
@ -522,8 +522,8 @@ ENTRY(exception_restore, 0)
// the backing store.
{ .mmi
mov ar.rsc=r31 // setup for loadrs
mov ar.k7=r16
mov r13=r29
(p15) mov ar.k7=r16
(p15) mov r13=r29
;;
}
exception_restore_restart:
@ -535,19 +535,18 @@ exception_restore_restart:
;;
}
{ .mmi
mov r31=ar.bspstore
(p15) mov r31=ar.bspstore
;;
mov ar.bspstore=r20
dep r31=0,r31,0,9
(p15) dep r31=0,r31,0,9
;;
}
{ .mmb
mov ar.k6=r31
(p15) mov ar.k6=r31
mov ar.rnat=r21
nop 0
;;
}
1:
{ .mmb
mov ar.unat=r17
mov cr.iip=r19