2004-05-14 11:46:45 +00:00
|
|
|
/* $NetBSD: frame.h,v 1.6 2003/10/05 19:44:58 matt Exp $ */
|
|
|
|
|
2005-01-05 21:58:49 +00:00
|
|
|
/*-
|
2004-05-14 11:46:45 +00:00
|
|
|
* Copyright (c) 1994-1997 Mark Brinicombe.
|
|
|
|
* Copyright (c) 1994 Brini.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software written for Brini by Mark Brinicombe
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Brini.
|
|
|
|
* 4. The name of the company nor the name of the author may be used to
|
|
|
|
* endorse or promote products derived from this software without specific
|
|
|
|
* prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
|
|
|
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
|
|
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MACHINE_ASMACROS_H_
|
|
|
|
#define _MACHINE_ASMACROS_H_
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#include <machine/asm.h>
|
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
#ifdef _KERNEL
|
|
|
|
|
|
|
|
#ifdef LOCORE
|
2012-08-15 03:03:03 +00:00
|
|
|
#include "opt_global.h"
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ASM macros for pushing and pulling trapframes from the stack
|
|
|
|
*
|
|
|
|
* These macros are used to handle the irqframe and trapframe structures
|
|
|
|
* defined above.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PUSHFRAME - macro to push a trap frame on the stack in the current mode
|
|
|
|
* Since the current mode is used, the SVC lr field is not defined.
|
|
|
|
*
|
|
|
|
* NOTE: r13 and r14 are stored separately as a work around for the
|
|
|
|
* SA110 rev 2 STM^ bug
|
|
|
|
*/
|
2012-08-15 03:03:03 +00:00
|
|
|
#ifdef ARM_TP_ADDRESS
|
2004-05-14 11:46:45 +00:00
|
|
|
#define PUSHFRAME \
|
2013-08-05 19:06:28 +00:00
|
|
|
sub sp, sp, #4; /* Align the stack */ \
|
2004-05-14 11:46:45 +00:00
|
|
|
str lr, [sp, #-4]!; /* Push the return address */ \
|
|
|
|
sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
|
|
|
|
stmia sp, {r0-r12}; /* Push the user mode registers */ \
|
|
|
|
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
|
|
|
|
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
|
Close a race.
The RAS implementation would set the end address, then the start
address. These were used by the kernel to restart a RAS sequence if
it was interrupted. When the thread switching code ran, it would
check these values and adjust the PC and clear them if it did.
However, there's a small flaw in this scheme. Thread T1, sets the end
address and gets preempted. Thread T2 runs and also does a RAS
operation. This resets end to zero. Thread T1 now runs again and
sets start and then begins the RAS sequence, but is preempted before
the RAS sequence executes its last instruction. The kernel code that
would ordinarily restart the RAS sequence doesn't because the PC isn't
between start and 0, so the PC isn't set to the start of the sequence.
So when T1 is resumed again, it is at the wrong location for RAS to
produce the correct results. This causes the wrong results for the
atomic sequence.
The window for the first race is 3 instructions. The window for the
second race is 5-10 instructions depending on the atomic operation.
This makes this failure fairly rare and hard to reproduce.
Mutexs are implemented in libthr using atomic operations. When the
above race would occur, a lock could get stuck locked, causing many
downstream problems, as you might expect.
Also, make sure to reset the start and end address when doing a syscall, or
a malicious process could set them before doing a syscall.
Reviewed by: imp, ups (thanks guys)
Pointy hat to: cognet
MFC After: 3 days
2007-12-02 12:49:28 +00:00
|
|
|
str r0, [sp, #-4]!; \
|
2008-02-05 10:22:33 +00:00
|
|
|
ldr r0, =ARM_RAS_START; \
|
Close a race.
The RAS implementation would set the end address, then the start
address. These were used by the kernel to restart a RAS sequence if
it was interrupted. When the thread switching code ran, it would
check these values and adjust the PC and clear them if it did.
However, there's a small flaw in this scheme. Thread T1, sets the end
address and gets preempted. Thread T2 runs and also does a RAS
operation. This resets end to zero. Thread T1 now runs again and
sets start and then begins the RAS sequence, but is preempted before
the RAS sequence executes its last instruction. The kernel code that
would ordinarily restart the RAS sequence doesn't because the PC isn't
between start and 0, so the PC isn't set to the start of the sequence.
So when T1 is resumed again, it is at the wrong location for RAS to
produce the correct results. This causes the wrong results for the
atomic sequence.
The window for the first race is 3 instructions. The window for the
second race is 5-10 instructions depending on the atomic operation.
This makes this failure fairly rare and hard to reproduce.
Mutexs are implemented in libthr using atomic operations. When the
above race would occur, a lock could get stuck locked, causing many
downstream problems, as you might expect.
Also, make sure to reset the start and end address when doing a syscall, or
a malicious process could set them before doing a syscall.
Reviewed by: imp, ups (thanks guys)
Pointy hat to: cognet
MFC After: 3 days
2007-12-02 12:49:28 +00:00
|
|
|
mov r1, #0; \
|
|
|
|
str r1, [r0]; \
|
|
|
|
mov r1, #0xffffffff; \
|
2011-10-16 17:59:28 +00:00
|
|
|
str r1, [r0, #4];
|
2012-08-15 03:03:03 +00:00
|
|
|
#else
|
|
|
|
#define PUSHFRAME \
|
2013-08-05 19:06:28 +00:00
|
|
|
sub sp, sp, #4; /* Align the stack */ \
|
2012-08-15 03:03:03 +00:00
|
|
|
str lr, [sp, #-4]!; /* Push the return address */ \
|
|
|
|
sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
|
|
|
|
stmia sp, {r0-r12}; /* Push the user mode registers */ \
|
|
|
|
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
|
|
|
|
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
|
|
|
|
str r0, [sp, #-4]!;
|
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PULLFRAME - macro to pull a trap frame from the stack in the current mode
|
|
|
|
* Since the current mode is used, the SVC lr field is ignored.
|
|
|
|
*/
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#ifdef ARM_TP_ADDRESS
|
2004-05-14 11:46:45 +00:00
|
|
|
#define PULLFRAME \
|
|
|
|
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
|
|
|
|
msr spsr_all, r0; \
|
|
|
|
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
add sp, sp, #(4*17); /* Adjust the stack pointer */ \
|
2013-08-05 19:06:28 +00:00
|
|
|
ldr lr, [sp], #0x0004; /* Pull the return address */ \
|
|
|
|
add sp, sp, #4 /* Align the stack */
|
2012-08-15 03:03:03 +00:00
|
|
|
#else
|
|
|
|
#define PULLFRAME \
|
|
|
|
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
|
|
|
|
msr spsr_all, r0; \
|
|
|
|
clrex; \
|
|
|
|
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
add sp, sp, #(4*17); /* Adjust the stack pointer */ \
|
2013-08-05 19:06:28 +00:00
|
|
|
ldr lr, [sp], #0x0004; /* Pull the return address */ \
|
|
|
|
add sp, sp, #4 /* Align the stack */
|
2012-08-15 03:03:03 +00:00
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
|
|
|
|
* This should only be used if the processor is not currently in SVC32
|
|
|
|
* mode. The processor mode is switched to SVC mode and the trap frame is
|
|
|
|
* stored. The SVC lr field is used to store the previous value of
|
2012-06-13 05:02:51 +00:00
|
|
|
* lr in SVC mode.
|
2004-05-14 11:46:45 +00:00
|
|
|
*
|
|
|
|
* NOTE: r13 and r14 are stored separately as a work around for the
|
|
|
|
* SA110 rev 2 STM^ bug
|
|
|
|
*/
|
2012-08-15 03:03:03 +00:00
|
|
|
#ifdef ARM_TP_ADDRESS
|
2004-05-14 11:46:45 +00:00
|
|
|
#define PUSHFRAMEINSVC \
|
|
|
|
stmdb sp, {r0-r3}; /* Save 4 registers */ \
|
|
|
|
mov r0, lr; /* Save xxx32 r14 */ \
|
|
|
|
mov r1, sp; /* Save xxx32 sp */ \
|
|
|
|
mrs r3, spsr; /* Save xxx32 spsr */ \
|
|
|
|
mrs r2, cpsr; /* Get the CPSR */ \
|
|
|
|
bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
|
|
|
|
orr r2, r2, #(PSR_SVC32_MODE); \
|
|
|
|
msr cpsr_c, r2; /* Punch into SVC mode */ \
|
|
|
|
mov r2, sp; /* Save SVC sp */ \
|
2013-08-05 19:06:28 +00:00
|
|
|
bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \
|
|
|
|
sub sp, sp, #4; /* Pad trapframe to keep alignment */ \
|
2004-05-14 11:46:45 +00:00
|
|
|
str r0, [sp, #-4]!; /* Push return address */ \
|
|
|
|
str lr, [sp, #-4]!; /* Push SVC lr */ \
|
|
|
|
str r2, [sp, #-4]!; /* Push SVC sp */ \
|
|
|
|
msr spsr_all, r3; /* Restore correct spsr */ \
|
|
|
|
ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
|
|
|
|
sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
|
|
|
|
stmia sp, {r0-r12}; /* Push the user mode registers */ \
|
|
|
|
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
|
|
|
|
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
2008-02-05 10:22:33 +00:00
|
|
|
ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \
|
2011-10-16 17:59:28 +00:00
|
|
|
ldr r4, [r5, #4]; /* reset it to point at the */ \
|
|
|
|
cmp r4, #0xffffffff; /* end of memory if necessary; */ \
|
|
|
|
movne r1, #0xffffffff; /* leave value in r4 for later */ \
|
|
|
|
strne r1, [r5, #4]; /* comparision against PC. */ \
|
|
|
|
ldr r3, [r5]; /* Retrieve global RAS_START */ \
|
|
|
|
cmp r3, #0; /* and reset it if non-zero. */ \
|
|
|
|
movne r1, #0; /* If non-zero RAS_START and */ \
|
|
|
|
strne r1, [r5]; /* PC was lower than RAS_END, */ \
|
|
|
|
ldrne r1, [r0, #16]; /* adjust the saved PC so that */ \
|
|
|
|
cmpne r4, r1; /* execution later resumes at */ \
|
|
|
|
strhi r3, [r0, #16]; /* the RAS_START location. */ \
|
|
|
|
mrs r0, spsr_all; \
|
|
|
|
str r0, [sp, #-4]!
|
2012-08-15 03:03:03 +00:00
|
|
|
#else
|
|
|
|
#define PUSHFRAMEINSVC \
|
|
|
|
stmdb sp, {r0-r3}; /* Save 4 registers */ \
|
|
|
|
mov r0, lr; /* Save xxx32 r14 */ \
|
|
|
|
mov r1, sp; /* Save xxx32 sp */ \
|
|
|
|
mrs r3, spsr; /* Save xxx32 spsr */ \
|
|
|
|
mrs r2, cpsr; /* Get the CPSR */ \
|
|
|
|
bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
|
|
|
|
orr r2, r2, #(PSR_SVC32_MODE); \
|
|
|
|
msr cpsr_c, r2; /* Punch into SVC mode */ \
|
|
|
|
mov r2, sp; /* Save SVC sp */ \
|
2013-08-05 19:06:28 +00:00
|
|
|
bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \
|
|
|
|
sub sp, sp, #4; /* Pad trapframe to keep alignment */ \
|
2012-08-15 03:03:03 +00:00
|
|
|
str r0, [sp, #-4]!; /* Push return address */ \
|
|
|
|
str lr, [sp, #-4]!; /* Push SVC lr */ \
|
|
|
|
str r2, [sp, #-4]!; /* Push SVC sp */ \
|
|
|
|
msr spsr_all, r3; /* Restore correct spsr */ \
|
|
|
|
ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
|
|
|
|
sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
|
|
|
|
stmia sp, {r0-r12}; /* Push the user mode registers */ \
|
|
|
|
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
|
|
|
|
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
|
|
|
|
str r0, [sp, #-4]!
|
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
|
|
|
|
* in SVC32 mode and restore the saved processor mode and PC.
|
|
|
|
* This should be used when the SVC lr register needs to be restored on
|
|
|
|
* exit.
|
|
|
|
*/
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#ifdef ARM_TP_ADDRESS
|
2004-05-14 11:46:45 +00:00
|
|
|
#define PULLFRAMEFROMSVCANDEXIT \
|
|
|
|
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
|
|
|
|
msr spsr_all, r0; /* restore SPSR */ \
|
|
|
|
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
add sp, sp, #(4*15); /* Adjust the stack pointer */ \
|
|
|
|
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
|
2012-08-15 03:03:03 +00:00
|
|
|
#else
|
|
|
|
#define PULLFRAMEFROMSVCANDEXIT \
|
|
|
|
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
|
|
|
|
msr spsr_all, r0; /* restore SPSR */ \
|
|
|
|
clrex; \
|
|
|
|
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
|
|
|
|
mov r0, r0; /* NOP for previous instruction */ \
|
|
|
|
add sp, sp, #(4*15); /* Adjust the stack pointer */ \
|
|
|
|
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
|
2013-06-27 18:54:18 +00:00
|
|
|
#endif
|
|
|
|
#if defined(__ARM_EABI__)
|
|
|
|
#define UNWINDSVCFRAME \
|
2013-08-05 19:06:28 +00:00
|
|
|
.pad #(4); /* Skip stack alignment */ \
|
2013-06-27 18:54:18 +00:00
|
|
|
.save {r13-r15}; /* Restore sp, lr, pc */ \
|
|
|
|
.pad #(2*4); /* Skip user sp and lr */ \
|
|
|
|
.save {r0-r12}; /* Restore r0-r12 */ \
|
|
|
|
.pad #(4) /* Skip spsr */
|
|
|
|
#else
|
|
|
|
#define UNWINDSVCFRAME
|
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#define DATA(name) \
|
|
|
|
.data ; \
|
|
|
|
_ALIGN_DATA ; \
|
|
|
|
.globl name ; \
|
|
|
|
.type name, %object ; \
|
|
|
|
name:
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#ifdef _ARM_ARCH_6
|
|
|
|
#define AST_LOCALS
|
|
|
|
#define GET_CURTHREAD_PTR(tmp) \
|
|
|
|
mrc p15, 0, tmp, c13, c0, 4; \
|
|
|
|
add tmp, tmp, #(PC_CURTHREAD)
|
|
|
|
#else
|
|
|
|
#define AST_LOCALS ;\
|
|
|
|
.Lcurthread: ;\
|
|
|
|
.word _C_LABEL(__pcpu) + PC_CURTHREAD
|
|
|
|
|
|
|
|
#define GET_CURTHREAD_PTR(tmp) \
|
|
|
|
ldr tmp, .Lcurthread
|
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
2004-09-23 22:05:40 +00:00
|
|
|
#define DO_AST \
|
2004-05-14 11:46:45 +00:00
|
|
|
ldr r0, [sp] /* Get the SPSR from stack */ ;\
|
|
|
|
mrs r4, cpsr /* save CPSR */ ;\
|
2006-04-13 14:25:28 +00:00
|
|
|
orr r1, r4, #(I32_bit|F32_bit) ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
msr cpsr_c, r1 /* Disable interrupts */ ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
|
|
|
|
teq r0, #(PSR_USR32_MODE) ;\
|
|
|
|
bne 2f /* Nope, get out now */ ;\
|
2006-04-13 14:25:28 +00:00
|
|
|
bic r4, r4, #(I32_bit|F32_bit) ;\
|
2012-08-15 03:03:03 +00:00
|
|
|
1: GET_CURTHREAD_PTR(r5) ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
ldr r5, [r5] ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
ldr r1, [r5, #(TD_FLAGS)] ;\
|
|
|
|
and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED) ;\
|
|
|
|
teq r1, #0x00000000 ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
beq 2f /* Nope. Just bail */ ;\
|
|
|
|
msr cpsr_c, r4 /* Restore interrupts */ ;\
|
|
|
|
mov r0, sp ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
bl _C_LABEL(ast) /* ast(frame) */ ;\
|
2006-04-13 14:25:28 +00:00
|
|
|
orr r0, r4, #(I32_bit|F32_bit) ;\
|
2004-09-23 22:05:40 +00:00
|
|
|
msr cpsr_c, r0 ;\
|
|
|
|
b 1b ;\
|
2004-05-14 11:46:45 +00:00
|
|
|
2:
|
|
|
|
|
|
|
|
#endif /* LOCORE */
|
|
|
|
|
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
|
|
|
#endif /* !_MACHINE_ASMACROS_H_ */
|