Load the new sp_el0 with interrupts disabled in fork_trampoline. If an

interrupt arrives in fork_trampoline after sp_el0 was written we may then
switch to a new thread, enter userland so change this stack pointer, then
return to this code with the wrong value. This fixes this case by moving
the load of sp_el0 until after interrupts have been disabled.

Reported by:	Mark Millard (markmi@dsl-only.net)
Sponsored by:	ABT Systems Ltd
Differential Revision:	https://reviews.freebsd.org/D9593
This commit is contained in:
Andrew Turner 2017-02-15 14:56:47 +00:00
parent f33f887e94
commit 783b367605
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=313772

View File

@ -241,11 +241,6 @@ ENTRY(fork_trampoline)
mov fp, #0 /* Stack traceback stops here. */
bl _C_LABEL(fork_exit)
/* Restore sp and lr */
ldp x0, x1, [sp]
msr sp_el0, x0
mov lr, x1
/* Restore the registers other than x0 and x1 */
ldp x2, x3, [sp, #TF_X + 2 * 8]
ldp x4, x5, [sp, #TF_X + 4 * 8]
@ -261,14 +256,18 @@ ENTRY(fork_trampoline)
ldp x24, x25, [sp, #TF_X + 24 * 8]
ldp x26, x27, [sp, #TF_X + 26 * 8]
ldp x28, x29, [sp, #TF_X + 28 * 8]
/* Skip x30 as it was restored above as lr */
/*
* Disable interrupts to avoid
* overwriting spsr_el1 by an IRQ exception.
* overwriting spsr_el1 and sp_el0 by an IRQ exception.
*/
msr daifset, #2
/* Restore sp and lr */
ldp x0, x1, [sp]
msr sp_el0, x0
mov lr, x1
/* Restore elr and spsr */
ldp x0, x1, [sp, #16]
msr elr_el1, x0