freebsd-skq/contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff
dim 8ba34e6179 Add the clang patch for r265477. While here, add a description to the
patch for r263619, and unify all the URLs to point to svnweb.
2014-05-24 22:27:31 +00:00

232 lines
8.0 KiB
Diff

Pull in r198286 from upstream llvm trunk (by Venkatraman Govindaraju):
[Sparc] Handle atomic loads/stores in sparc backend.
Introduced here: http://svnweb.freebsd.org/changeset/base/262261
Index: lib/Target/Sparc/SparcInstrInfo.td
===================================================================
--- lib/Target/Sparc/SparcInstrInfo.td
+++ lib/Target/Sparc/SparcInstrInfo.td
@@ -975,6 +975,33 @@ let rs1 = 0 in
def : Pat<(ctpop i32:$src),
(POPCrr (SRLri $src, 0))>;
+// Atomic swap.
+let hasSideEffects =1, rd = 0, rs1 = 0b01111, rs2 = 0 in
+ def STBAR : F3_1<2, 0b101000, (outs), (ins), "stbar", []>;
+
+let Predicates = [HasV9], hasSideEffects = 1, rd = 0, rs1 = 0b01111 in
+ def MEMBARi : F3_2<2, 0b101000, (outs), (ins i32imm:$simm13),
+ "membar $simm13", []>;
+
+let Constraints = "$val = $rd" in {
+ def SWAPrr : F3_1<3, 0b001111,
+ (outs IntRegs:$rd), (ins IntRegs:$val, MEMrr:$addr),
+ "swap [$addr], $rd",
+ [(set i32:$rd, (atomic_swap_32 ADDRrr:$addr, i32:$val))]>;
+ def SWAPri : F3_2<3, 0b001111,
+ (outs IntRegs:$rd), (ins IntRegs:$val, MEMri:$addr),
+ "swap [$addr], $rd",
+ [(set i32:$rd, (atomic_swap_32 ADDRri:$addr, i32:$val))]>;
+}
+
+let Predicates = [HasV9], Constraints = "$swap = $rd" in
+ def CASrr: F3_1<3, 0b111100,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
+ IntRegs:$swap),
+ "cas [$rs1], $rs2, $rd",
+ [(set i32:$rd,
+ (atomic_cmp_swap iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
@@ -1036,4 +1063,17 @@ def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri
def : Pat<(store (i32 0), ADDRrr:$dst), (STrr ADDRrr:$dst, (i32 G0))>;
def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>;
+// store bar for all atomic_fence in V8.
+let Predicates = [HasNoV9] in
+ def : Pat<(atomic_fence imm, imm), (STBAR)>;
+
+// atomic_load_32 addr -> load addr
+def : Pat<(i32 (atomic_load ADDRrr:$src)), (LDrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load ADDRri:$src)), (LDri ADDRri:$src)>;
+
+// atomic_store_32 val, addr -> store val, addr
+def : Pat<(atomic_store ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
+
+
include "SparcInstr64Bit.td"
Index: lib/Target/Sparc/SparcISelLowering.cpp
===================================================================
--- lib/Target/Sparc/SparcISelLowering.cpp
+++ lib/Target/Sparc/SparcISelLowering.cpp
@@ -1472,10 +1472,30 @@ SparcTargetLowering::SparcTargetLowering(TargetMac
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
}
- // FIXME: There are instructions available for ATOMIC_FENCE
- // on SparcV8 and later.
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
+ // ATOMICs.
+ // FIXME: We insert fences for each atomics and generate sub-optimal code
+ // for PSO/TSO. Also, implement other atomicrmw operations.
+ setInsertFencesForAtomic(true);
+
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32,
+ (Subtarget->isV9() ? Legal: Expand));
+
+
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
+
+ // Custom Lower Atomic LOAD/STORE
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
+
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
+ }
+
if (!Subtarget->isV9()) {
// SparcV8 does not have FNEGD and FABSD.
setOperationAction(ISD::FNEG, MVT::f64, Custom);
@@ -2723,6 +2743,16 @@ static SDValue LowerUMULO_SMULO(SDValue Op, Select
return DAG.getMergeValues(Ops, 2, dl);
}
+static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
+ // Monotonic load/stores are legal.
+ if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
+ return Op;
+
+ // Otherwise, expand with a fence.
+ return SDValue();
+}
+
+
SDValue SparcTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -2778,6 +2808,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) cons
case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
case ISD::UMULO:
case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
+ case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
}
}
Index: lib/Target/Sparc/SparcInstr64Bit.td
===================================================================
--- lib/Target/Sparc/SparcInstr64Bit.td
+++ lib/Target/Sparc/SparcInstr64Bit.td
@@ -415,6 +415,32 @@ def SETHIXi : F2_1<0b100,
"sethi $imm22, $rd",
[(set i64:$rd, SETHIimm:$imm22)]>;
}
+
+// ATOMICS.
+let Predicates = [Is64Bit], Constraints = "$swap = $rd" in {
+ def CASXrr: F3_1<3, 0b111110,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2,
+ I64Regs:$swap),
+ "casx [$rs1], $rs2, $rd",
+ [(set i64:$rd,
+ (atomic_cmp_swap i64:$rs1, i64:$rs2, i64:$swap))]>;
+
+} // Predicates = [Is64Bit], Constraints = ...
+
+let Predicates = [Is64Bit] in {
+
+def : Pat<(atomic_fence imm, imm), (MEMBARi 0xf)>;
+
+// atomic_load_64 addr -> load addr
+def : Pat<(i64 (atomic_load ADDRrr:$src)), (LDXrr ADDRrr:$src)>;
+def : Pat<(i64 (atomic_load ADDRri:$src)), (LDXri ADDRri:$src)>;
+
+// atomic_store_64 val, addr -> store val, addr
+def : Pat<(atomic_store ADDRrr:$dst, i64:$val), (STXrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store ADDRri:$dst, i64:$val), (STXri ADDRri:$dst, $val)>;
+
+} // Predicates = [Is64Bit]
+
// Global addresses, constant pool entries
let Predicates = [Is64Bit] in {
Index: test/CodeGen/SPARC/atomics.ll
===================================================================
--- test/CodeGen/SPARC/atomics.ll
+++ test/CodeGen/SPARC/atomics.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s -march=sparcv9 | FileCheck %s
+
+; CHECK-LABEL: test_atomic_i32
+; CHECK: ld [%o0]
+; CHECK: membar
+; CHECK: ld [%o1]
+; CHECK: membar
+; CHECK: membar
+; CHECK: st {{.+}}, [%o2]
+define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
+entry:
+ %0 = load atomic i32* %ptr1 acquire, align 8
+ %1 = load atomic i32* %ptr2 acquire, align 8
+ %2 = add i32 %0, %1
+ store atomic i32 %2, i32* %ptr3 release, align 8
+ ret i32 %2
+}
+
+; CHECK-LABEL: test_atomic_i64
+; CHECK: ldx [%o0]
+; CHECK: membar
+; CHECK: ldx [%o1]
+; CHECK: membar
+; CHECK: membar
+; CHECK: stx {{.+}}, [%o2]
+define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
+entry:
+ %0 = load atomic i64* %ptr1 acquire, align 8
+ %1 = load atomic i64* %ptr2 acquire, align 8
+ %2 = add i64 %0, %1
+ store atomic i64 %2, i64* %ptr3 release, align 8
+ ret i64 %2
+}
+
+; CHECK-LABEL: test_cmpxchg_i32
+; CHECK: or %g0, 123, [[R:%[gilo][0-7]]]
+; CHECK: cas [%o1], %o0, [[R]]
+
+define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
+entry:
+ %b = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic
+ ret i32 %b
+}
+
+; CHECK-LABEL: test_cmpxchg_i64
+; CHECK: or %g0, 123, [[R:%[gilo][0-7]]]
+; CHECK: casx [%o1], %o0, [[R]]
+
+define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
+entry:
+ %b = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic
+ ret i64 %b
+}
+
+; CHECK-LABEL: test_swap_i32
+; CHECK: or %g0, 42, [[R:%[gilo][0-7]]]
+; CHECK: swap [%o1], [[R]]
+
+define i32 @test_swap_i32(i32 %a, i32* %ptr) {
+entry:
+ %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
+ ret i32 %b
+}