From 3dcb4f7556091c887eff153f10f88f891c6efcf6 Mon Sep 17 00:00:00 2001 From: John Birrell Date: Tue, 9 Jun 1998 08:21:55 +0000 Subject: [PATCH] Atomic lock asm code for the alpha version of libc_r. --- lib/libc_r/arch/alpha/_atomic_lock.S | 57 +++++++++++++++++++ .../arch/alpha/alpha/_atomic_lock.S | 57 +++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 lib/libc_r/arch/alpha/_atomic_lock.S create mode 100644 lib/libpthread/arch/alpha/alpha/_atomic_lock.S diff --git a/lib/libc_r/arch/alpha/_atomic_lock.S b/lib/libc_r/arch/alpha/_atomic_lock.S new file mode 100644 index 000000000000..7ca46f782347 --- /dev/null +++ b/lib/libc_r/arch/alpha/_atomic_lock.S @@ -0,0 +1,57 @@ +/* + * Copyright (c) 1998 John Birrell . + * All rights reserved. + * copyright Douglas Santry 1996 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the above copyright is retained + * in the source form. + * + * THIS SOFTWARE IS PROVIDED BY Douglas Santry AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Douglas Santry OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: _atomic_lock.S,v 1.1 1998/04/29 09:36:03 jb Exp $ + * + */ + +#include "SYS.h" + +/* + * Atomicly lock a location with an identifier provided the location + * is not currently locked. + * + * long _atomic_lock(long *); + * v0 will contain the return value (zero if lock obtained). + */ +LEAF(_atomic_lock,0) + LDGP(pv) + + /* Get the existing lock value and lock memory: */ + ldq_l v0, 0(a0) + + /* Branch if already locked: */ + bne v0, already_locked + + /* Not locked, so store 1: */ + mov 1, t0 + stq_c t0, 0(a0) + + /* Obtained the lock: */ + br done + +already_locked: + /* Already locked so put the value back and unlock memory: */ + stq_c v0, 0(a0) + +done: + RET +END(_atomic_lock) diff --git a/lib/libpthread/arch/alpha/alpha/_atomic_lock.S b/lib/libpthread/arch/alpha/alpha/_atomic_lock.S new file mode 100644 index 000000000000..7ca46f782347 --- /dev/null +++ b/lib/libpthread/arch/alpha/alpha/_atomic_lock.S @@ -0,0 +1,57 @@ +/* + * Copyright (c) 1998 John Birrell . + * All rights reserved. + * copyright Douglas Santry 1996 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the above copyright is retained + * in the source form. + * + * THIS SOFTWARE IS PROVIDED BY Douglas Santry AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Douglas Santry OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: _atomic_lock.S,v 1.1 1998/04/29 09:36:03 jb Exp $ + * + */ + +#include "SYS.h" + +/* + * Atomicly lock a location with an identifier provided the location + * is not currently locked. + * + * long _atomic_lock(long *); + * v0 will contain the return value (zero if lock obtained). + */ +LEAF(_atomic_lock,0) + LDGP(pv) + + /* Get the existing lock value and lock memory: */ + ldq_l v0, 0(a0) + + /* Branch if already locked: */ + bne v0, already_locked + + /* Not locked, so store 1: */ + mov 1, t0 + stq_c t0, 0(a0) + + /* Obtained the lock: */ + br done + +already_locked: + /* Already locked so put the value back and unlock memory: */ + stq_c v0, 0(a0) + +done: + RET +END(_atomic_lock)