8ba34e6179
patch for r263619, and unify all the URLs to point to svnweb.
92 lines
3.5 KiB
Diff
92 lines
3.5 KiB
Diff
Pull in r200453 from upstream llvm trunk (by Jakob Stoklund Olesen):
|
|
|
|
Implement SPARCv9 atomic_swap_64 with a pseudo.
|
|
|
|
The SWAP instruction only exists in a 32-bit variant, but the 64-bit
|
|
atomic swap can be implemented in terms of CASX, like the other atomic
|
|
rmw primitives.
|
|
|
|
Introduced here: http://svnweb.freebsd.org/changeset/base/262264
|
|
|
|
Index: lib/Target/Sparc/SparcInstr64Bit.td
|
|
===================================================================
|
|
--- lib/Target/Sparc/SparcInstr64Bit.td
|
|
+++ lib/Target/Sparc/SparcInstr64Bit.td
|
|
@@ -463,6 +463,14 @@ defm ATOMIC_LOAD_MAX : AtomicRMW<atomic_load_max_
|
|
defm ATOMIC_LOAD_UMIN : AtomicRMW<atomic_load_umin_32, atomic_load_umin_64>;
|
|
defm ATOMIC_LOAD_UMAX : AtomicRMW<atomic_load_umax_32, atomic_load_umax_64>;
|
|
|
|
+// There is no 64-bit variant of SWAP, so use a pseudo.
|
|
+let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
|
|
+ Defs = [ICC], Predicates = [Is64Bit] in
|
|
+def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd),
|
|
+ (ins ptr_rc:$addr, I64Regs:$rs2), "",
|
|
+ [(set i64:$rd,
|
|
+ (atomic_swap_64 iPTR:$addr, i64:$rs2))]>;
|
|
+
|
|
// Global addresses, constant pool entries
|
|
let Predicates = [Is64Bit] in {
|
|
|
|
Index: lib/Target/Sparc/SparcISelLowering.cpp
|
|
===================================================================
|
|
--- lib/Target/Sparc/SparcISelLowering.cpp
|
|
+++ lib/Target/Sparc/SparcISelLowering.cpp
|
|
@@ -1498,7 +1498,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMac
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
|
|
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
|
|
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
|
|
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
|
|
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
|
|
}
|
|
@@ -2885,6 +2885,9 @@ SparcTargetLowering::EmitInstrWithCustomInserter(M
|
|
case SP::ATOMIC_LOAD_NAND_64:
|
|
return expandAtomicRMW(MI, BB, SP::ANDXrr);
|
|
|
|
+ case SP::ATOMIC_SWAP_64:
|
|
+ return expandAtomicRMW(MI, BB, 0);
|
|
+
|
|
case SP::ATOMIC_LOAD_MAX_32:
|
|
return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
|
|
case SP::ATOMIC_LOAD_MAX_64:
|
|
@@ -3023,7 +3026,8 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr
|
|
|
|
// Build the loop block.
|
|
unsigned ValReg = MRI.createVirtualRegister(ValueRC);
|
|
- unsigned UpdReg = MRI.createVirtualRegister(ValueRC);
|
|
+ // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
|
|
+ unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
|
|
|
|
BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
|
|
.addReg(Val0Reg).addMBB(MBB)
|
|
@@ -3035,7 +3039,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr
|
|
BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
|
|
BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
|
|
.addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
|
|
- } else {
|
|
+ } else if (Opcode) {
|
|
BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
|
|
.addReg(ValReg).addReg(Rs2Reg);
|
|
}
|
|
Index: test/CodeGen/SPARC/atomics.ll
|
|
===================================================================
|
|
--- test/CodeGen/SPARC/atomics.ll
|
|
+++ test/CodeGen/SPARC/atomics.ll
|
|
@@ -62,6 +62,15 @@ entry:
|
|
ret i32 %b
|
|
}
|
|
|
|
+; CHECK-LABEL: test_swap_i64
|
|
+; CHECK: casx [%o1],
|
|
+
|
|
+define i64 @test_swap_i64(i64 %a, i64* %ptr) {
|
|
+entry:
|
|
+ %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
|
|
+ ret i64 %b
|
|
+}
|
|
+
|
|
; CHECK-LABEL: test_load_add_32
|
|
; CHECK: membar
|
|
; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
|