2004-05-14 11:46:45 +00:00
|
|
|
/* $NetBSD: frame.h,v 1.6 2003/10/05 19:44:58 matt Exp $ */
|
|
|
|
|
2005-01-05 21:58:49 +00:00
|
|
|
/*-
|
2004-05-14 11:46:45 +00:00
|
|
|
* Copyright (c) 1994-1997 Mark Brinicombe.
|
|
|
|
* Copyright (c) 1994 Brini.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software written for Brini by Mark Brinicombe
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Brini.
|
|
|
|
* 4. The name of the company nor the name of the author may be used to
|
|
|
|
* endorse or promote products derived from this software without specific
|
|
|
|
* prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
|
|
|
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
|
|
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MACHINE_ASMACROS_H_
|
|
|
|
#define _MACHINE_ASMACROS_H_
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
|
|
|
|
|
|
|
#ifdef LOCORE
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ASM macros for pushing and pulling trapframes from the stack
|
|
|
|
*
|
|
|
|
* These macros are used to handle the irqframe and trapframe structures
|
|
|
|
* defined above.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PUSHFRAME - macro to push a trap frame on the stack in the current mode
|
|
|
|
* Since the current mode is used, the SVC lr field is not defined.
|
|
|
|
*
|
|
|
|
* NOTE: r13 and r14 are stored separately as a work around for the
|
|
|
|
* SA110 rev 2 STM^ bug
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PUSHFRAME \
|
|
|
|
str lr, [sp, #-4]!; /* Push the return address */ \
|
|
|
|
sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
|
|
|
|
stmia sp, {r0-r12}; /* Push the user mode registers */ \
|
|
|
|
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
|
|
|
|
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
|
Close a race.
The RAS implementation would set the end address, then the start
address. These were used by the kernel to restart a RAS sequence if
it was interrupted. When the thread switching code ran, it would
check these values and adjust the PC and clear them if it did.
However, there's a small flaw in this scheme. Thread T1, sets the end
address and gets preempted. Thread T2 runs and also does a RAS
operation. This resets end to zero. Thread T1 now runs again and
sets start and then begins the RAS sequence, but is preempted before
the RAS sequence executes its last instruction. The kernel code that
would ordinarily restart the RAS sequence doesn't because the PC isn't
between start and 0, so the PC isn't set to the start of the sequence.
So when T1 is resumed again, it is at the wrong location for RAS to
produce the correct results. This causes the wrong results for the
atomic sequence.
The window for the first race is 3 instructions. The window for the
second race is 5-10 instructions depending on the atomic operation.
This makes this failure fairly rare and hard to reproduce.
Mutexs are implemented in libthr using atomic operations. When the
above race would occur, a lock could get stuck locked, causing many
downstream problems, as you might expect.
Also, make sure to reset the start and end address when doing a syscall, or
a malicious process could set them before doing a syscall.
Reviewed by: imp, ups (thanks guys)
Pointy hat to: cognet
MFC After: 3 days
2007-12-02 12:49:28 +00:00
|
|
|
str r0, [sp, #-4]!; \
|
2008-02-05 10:22:33 +00:00
|
|
|
ldr r0, =ARM_RAS_START; \
|
Close a race.
The RAS implementation would set the end address, then the start
address. These were used by the kernel to restart a RAS sequence if
it was interrupted. When the thread switching code ran, it would
check these values and adjust the PC and clear them if it did.
However, there's a small flaw in this scheme. Thread T1, sets the end
address and gets preempted. Thread T2 runs and also does a RAS
operation. This resets end to zero. Thread T1 now runs again and
sets start and then begins the RAS sequence, but is preempted before
the RAS sequence executes its last instruction. The kernel code that
would ordinarily restart the RAS sequence doesn't because the PC isn't
between start and 0, so the PC isn't set to the start of the sequence.
So when T1 is resumed again, it is at the wrong location for RAS to
produce the correct results. This causes the wrong results for the
atomic sequence.
The window for the first race is 3 instructions. The window for the
second race is 5-10 instructions depending on the atomic operation.
This makes this failure fairly rare and hard to reproduce.
Mutexs are implemented in libthr using atomic operations. When the
above race would occur, a lock could get stuck locked, causing many
downstream problems, as you might expect.
Also, make sure to reset the start and end address when doing a syscall, or
a malicious process could set them before doing a syscall.
Reviewed by: imp, ups (thanks guys)
Pointy hat to: cognet
MFC After: 3 days
2007-12-02 12:49:28 +00:00
|
|
|
mov r1, #0; \
|
|
|
|
str r1, [r0]; \
|
2008-02-05 10:22:33 +00:00
|
|
|
ldr r0, =ARM_RAS_END; \
|
Close a race.
The RAS implementation would set the end address, then the start
address. These were used by the kernel to restart a RAS sequence if
it was interrupted. When the thread switching code ran, it would
check these values and adjust the PC and clear them if it did.
However, there's a small flaw in this scheme. Thread T1, sets the end
address and gets preempted. Thread T2 runs and also does a RAS
operation. This resets end to zero. Thread T1 now runs again and
sets start and then begins the RAS sequence, but is preempted before
the RAS sequence executes its last instruction. The kernel code that
would ordinarily restart the RAS sequence doesn't because the PC isn't
between start and 0, so the PC isn't set to the start of the sequence.
So when T1 is resumed again, it is at the wrong location for RAS to
produce the correct results. This causes the wrong results for the
atomic sequence.
The window for the first race is 3 instructions. The window for the
second race is 5-10 instructions depending on the atomic operation.
This makes this failure fairly rare and hard to reproduce.
Mutexs are implemented in libthr using atomic operations. When the
above race would occur, a lock could get stuck locked, causing many
downstream problems, as you might expect.
Also, make sure to reset the start and end address when doing a syscall, or
a malicious process could set them before doing a syscall.
Reviewed by: imp, ups (thanks guys)
Pointy hat to: cognet
MFC After: 3 days
2007-12-02 12:49:28 +00:00
|
|
|
mov r1, #0xffffffff; \
|
|
|
|
str r1, [r0];
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PULLFRAME - macro to pull a trap frame from the stack in the current mode
|
|
|
|
* Since the current mode is used, the SVC lr field is ignored.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PULLFRAME \
|
|
|
|
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
|
|
|
|
msr spsr_all, r0; \
|
|
|
|
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
add sp, sp, #(4*17); /* Adjust the stack pointer */ \
|
|
|
|
ldr lr, [sp], #0x0004; /* Pull the return address */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
|
|
|
|
* This should only be used if the processor is not currently in SVC32
|
|
|
|
* mode. The processor mode is switched to SVC mode and the trap frame is
|
|
|
|
* stored. The SVC lr field is used to store the previous value of
|
|
|
|
* lr in SVC mode.
|
|
|
|
*
|
|
|
|
* NOTE: r13 and r14 are stored separately as a work around for the
|
|
|
|
* SA110 rev 2 STM^ bug
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PUSHFRAMEINSVC \
|
|
|
|
stmdb sp, {r0-r3}; /* Save 4 registers */ \
|
|
|
|
mov r0, lr; /* Save xxx32 r14 */ \
|
|
|
|
mov r1, sp; /* Save xxx32 sp */ \
|
|
|
|
mrs r3, spsr; /* Save xxx32 spsr */ \
|
|
|
|
mrs r2, cpsr; /* Get the CPSR */ \
|
|
|
|
bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
|
|
|
|
orr r2, r2, #(PSR_SVC32_MODE); \
|
|
|
|
msr cpsr_c, r2; /* Punch into SVC mode */ \
|
|
|
|
mov r2, sp; /* Save SVC sp */ \
|
|
|
|
str r0, [sp, #-4]!; /* Push return address */ \
|
|
|
|
str lr, [sp, #-4]!; /* Push SVC lr */ \
|
|
|
|
str r2, [sp, #-4]!; /* Push SVC sp */ \
|
|
|
|
msr spsr_all, r3; /* Restore correct spsr */ \
|
|
|
|
ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
|
|
|
|
sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
|
|
|
|
stmia sp, {r0-r12}; /* Push the user mode registers */ \
|
|
|
|
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
|
|
|
|
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
2008-02-05 10:22:33 +00:00
|
|
|
ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \
|
2005-04-07 22:03:04 +00:00
|
|
|
ldr r3, [r5]; \
|
|
|
|
cmp r3, #0; /* Is the update needed ? */ \
|
2007-09-22 14:23:52 +00:00
|
|
|
ldrgt lr, [r0, #16]; \
|
2008-02-05 10:22:33 +00:00
|
|
|
ldrgt r1, =ARM_RAS_END; \
|
2007-09-22 14:23:52 +00:00
|
|
|
ldrgt r4, [r1]; /* Get the end of the RAS */ \
|
|
|
|
movgt r2, #0; /* Reset the magic addresses */ \
|
|
|
|
strgt r2, [r5]; \
|
Close a race.
The RAS implementation would set the end address, then the start
address. These were used by the kernel to restart a RAS sequence if
it was interrupted. When the thread switching code ran, it would
check these values and adjust the PC and clear them if it did.
However, there's a small flaw in this scheme. Thread T1, sets the end
address and gets preempted. Thread T2 runs and also does a RAS
operation. This resets end to zero. Thread T1 now runs again and
sets start and then begins the RAS sequence, but is preempted before
the RAS sequence executes its last instruction. The kernel code that
would ordinarily restart the RAS sequence doesn't because the PC isn't
between start and 0, so the PC isn't set to the start of the sequence.
So when T1 is resumed again, it is at the wrong location for RAS to
produce the correct results. This causes the wrong results for the
atomic sequence.
The window for the first race is 3 instructions. The window for the
second race is 5-10 instructions depending on the atomic operation.
This makes this failure fairly rare and hard to reproduce.
Mutexs are implemented in libthr using atomic operations. When the
above race would occur, a lock could get stuck locked, causing many
downstream problems, as you might expect.
Also, make sure to reset the start and end address when doing a syscall, or
a malicious process could set them before doing a syscall.
Reviewed by: imp, ups (thanks guys)
Pointy hat to: cognet
MFC After: 3 days
2007-12-02 12:49:28 +00:00
|
|
|
movgt r2, #0xffffffff; \
|
2007-09-22 14:23:52 +00:00
|
|
|
strgt r2, [r1]; \
|
|
|
|
cmpgt lr, r3; /* Were we in the RAS ? */ \
|
|
|
|
cmpgt r4, lr; \
|
|
|
|
strgt r3, [r0, #16]; /* Yes, update the pc */ \
|
2004-05-14 11:46:45 +00:00
|
|
|
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
|
|
|
|
str r0, [sp, #-4]!
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
|
|
|
|
* in SVC32 mode and restore the saved processor mode and PC.
|
|
|
|
* This should be used when the SVC lr register needs to be restored on
|
|
|
|
* exit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PULLFRAMEFROMSVCANDEXIT \
|
|
|
|
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
|
|
|
|
msr spsr_all, r0; /* restore SPSR */ \
|
|
|
|
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
add sp, sp, #(4*15); /* Adjust the stack pointer */ \
|
|
|
|
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
|
|
|
|
|
|
|
|
#define DATA(name) \
|
|
|
|
.data ; \
|
|
|
|
_ALIGN_DATA ; \
|
|
|
|
.globl name ; \
|
|
|
|
.type name, %object ; \
|
|
|
|
name:
|
|
|
|
|
|
|
|
#define EMPTY
|
|
|
|
|
|
|
|
|
2004-09-23 22:05:40 +00:00
|
|
|
#define DO_AST \
|
2004-05-14 11:46:45 +00:00
|
|
|
ldr r0, [sp] /* Get the SPSR from stack */ ;\
|
|
|
|
mrs r4, cpsr /* save CPSR */ ;\
|
2006-04-13 14:25:28 +00:00
|
|
|
orr r1, r4, #(I32_bit|F32_bit) ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
msr cpsr_c, r1 /* Disable interrupts */ ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
|
|
|
|
teq r0, #(PSR_USR32_MODE) ;\
|
|
|
|
bne 2f /* Nope, get out now */ ;\
|
2006-04-13 14:25:28 +00:00
|
|
|
bic r4, r4, #(I32_bit|F32_bit) ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
1: ldr r5, .Lcurthread ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
ldr r5, [r5] ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
ldr r1, [r5, #(TD_FLAGS)] ;\
|
|
|
|
and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED) ;\
|
|
|
|
teq r1, #0x00000000 ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
beq 2f /* Nope. Just bail */ ;\
|
|
|
|
msr cpsr_c, r4 /* Restore interrupts */ ;\
|
|
|
|
mov r0, sp ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
bl _C_LABEL(ast) /* ast(frame) */ ;\
|
2006-04-13 14:25:28 +00:00
|
|
|
orr r0, r4, #(I32_bit|F32_bit) ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
msr cpsr_c, r0 ;\
|
|
|
|
b 1b ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
2:
|
|
|
|
|
2004-09-23 22:05:40 +00:00
|
|
|
|
|
|
|
#define AST_LOCALS ;\
|
|
|
|
.Lcurthread: ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
.word _C_LABEL(__pcpu) + PC_CURTHREAD
|
|
|
|
|
|
|
|
#endif /* LOCORE */
|
|
|
|
|
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
|
|
|
#endif /* !_MACHINE_ASMACROS_H_ */
|