Import a basic implementation of the restartable atomic sequences to provide

atomic operations to userland (this is OK for UP only, but SMP is still so
far away).
This commit is contained in:
cognet 2005-04-07 22:03:04 +00:00
parent 0eff5864a9
commit 64c6be3ab3
2 changed files with 155 additions and 45 deletions

View File

@ -113,6 +113,21 @@
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
mov r0, r0; /* NOP for previous instruction */ \
ldr r5, =0xe0000004; /* Check if there's any RAS */ \
ldr r3, [r5]; \
cmp r3, #0; /* Is the update needed ? */ \
beq 1f; \
ldr lr, [r0, #16]; \
ldr r1, =0xe0000008; \
ldr r4, [r1]; /* Get the end of the RAS */ \
mov r2, #0; /* Reset the magic addresses */ \
str r2, [r5]; \
str r2, [r1]; \
cmp lr, r3; /* Were we in the RAS ? */ \
blt 1f; \
cmp lr, r4; \
strlt r3, [r0, #16]; /* Yes, update the pc */ \
1: \
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
str r0, [sp, #-4]!
@ -168,7 +183,6 @@
#define AST_LOCALS ;\
.Lcurthread: ;\
.word _C_LABEL(__pcpu) + PC_CURTHREAD
#endif /* LOCORE */

View File

@ -71,6 +71,9 @@
: "cc" ); \
} while(0)
#define ARM_RAS_START 0xe0000004
#define ARM_RAS_END 0xe0000008
static __inline uint32_t
__swp(uint32_t val, volatile uint32_t *ptr)
{
@ -80,39 +83,162 @@ __swp(uint32_t val, volatile uint32_t *ptr)
}
#define atomic_op(v, op, p) ({ \
uint32_t e, r, s; \
for (e = *(volatile uint32_t *)p;; e = r) { \
s = e op v; \
r = __swp(s, p); \
if (r == e) \
break; \
} \
e; \
})
#ifdef _KERNEL
static __inline void
atomic_set_32(volatile uint32_t *address, uint32_t setmask)
{
atomic_op(setmask, |, address);
__with_interrupts_disabled(*address |= setmask);
}
static __inline void
atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
{
atomic_op(clearmask, &~, address);
__with_interrupts_disabled(*address &= ~clearmask);
}
static __inline u_int32_t
atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
int ret;
__with_interrupts_disabled(
{
if (*p == cmpval) {
*p = newval;
ret = 1;
} else {
ret = 0;
}
});
return (ret);
}
static __inline void
atomic_add_32(volatile u_int32_t *p, u_int32_t val)
{
__with_interrupts_disabled(*p += val);
}
static __inline void
atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
{
__with_interrupts_disabled(*p -= val);
}
#else /* !_KERNEL */
static __inline u_int32_t
atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
register int done, ras_start;
__asm __volatile("1:\n"
"mov %0, #0xe0000008\n"
"adr %1, 2f\n"
"str %1, [%0]\n"
"adr %1, 1b\n"
"mov %0, #0xe0000004\n"
"str %1, [%0]\n"
"ldr %1, [%2]\n"
"cmp %1, %3\n"
"streq %4, [%2]\n"
"2:\n"
"moveq %1, #1\n"
"movne %1, #0\n"
: "=r" (ras_start), "=r" (done)
,"+r" (p), "+r" (cmpval), "+r" (newval));
return (done);
}
static __inline void
atomic_add_32(volatile u_int32_t *p, u_int32_t val)
{
int ras_start, start;
__asm __volatile("1:\n"
"mov %0, #0xe0000008\n"
"adr %1, 2f\n"
"str %1, [%0]\n"
"adr %1, 1b\n"
"mov %0, #0xe0000004\n"
"str %1, [%0]\n"
"ldr %1, [%2]\n"
"add %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
: "=r" (ras_start), "=r" (start), "+r" (p), "+r" (val));
}
static __inline void
atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
{
int ras_start, start;
__asm __volatile("1:\n"
"mov %0, #0xe0000008\n"
"adr %1, 2f\n"
"str %1, [%0]\n"
"adr %1, 1b\n"
"mov %0, #0xe0000004\n"
"str %1, [%0]\n"
"ldr %1, [%2]\n"
"sub %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
: "=r" (ras_start), "=r" (start), "+r" (p), "+r" (val));
}
static __inline void
atomic_set_32(volatile uint32_t *address, uint32_t setmask)
{
int ras_start, start;
__asm __volatile("1:\n"
"mov %0, #0xe0000008\n"
"adr %1, 2f\n"
"str %1, [%0]\n"
"adr %1, 1b\n"
"mov %0, #0xe0000004\n"
"str %1, [%0]\n"
"ldr %1, [%2]\n"
"orr %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
: "=r" (ras_start), "=r" (start), "+r" (address), "+r" (setmask));
}
static __inline void
atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
{
int ras_start, start;
__asm __volatile("1:\n"
"mov %0, #0xe0000008\n"
"adr %1, 2f\n"
"str %1, [%0]\n"
"adr %1, 1b\n"
"mov %0, #0xe0000004\n"
"str %1, [%0]\n"
"ldr %1, [%2]\n"
"bic %1, %1, %3\n"
"str %1, [%2]\n"
"2:\n"
: "=r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask));
}
#endif /* _KERNEL */
static __inline int
atomic_load_32(volatile uint32_t *v)
{
return (atomic_op(0, +, v));
return (*v);
}
static __inline void
atomic_store_32(volatile uint32_t *dst, uint32_t src)
{
__swp(src, dst);
*dst = src;
}
static __inline uint32_t
@ -122,36 +248,6 @@ atomic_readandclear_32(volatile u_int32_t *p)
return (__swp(0, p));
}
#ifdef _KERNEL
static __inline u_int32_t
atomic_cmpset_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
{
int done;
__with_interrupts_disabled(
{
if (*p == cmpval) {
*p = newval;
done = 1;
} else
done = 0;
});
return (done);
}
#endif
static __inline void
atomic_add_32(volatile u_int32_t *p, u_int32_t val)
{
atomic_op(val, +, p);
}
static __inline void
atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
{
atomic_op(val, -, p);
}
#undef __with_interrupts_disabled
#endif /* _LOCORE */