1951 lines
69 KiB
Diff
1951 lines
69 KiB
Diff
Pull in r227087 from upstream llvm trunk (by Vasileios Kalintiris):
|
|
|
|
[mips] Add tests for bitwise binary and integer arithmetic operators.
|
|
|
|
Reviewers: dsanders
|
|
|
|
Subscribers: llvm-commits
|
|
|
|
Differential Revision: http://reviews.llvm.org/D7125
|
|
|
|
Pull in r227089 from upstream llvm trunk (by Vasileios Kalintiris):
|
|
|
|
[mips] Enable arithmetic and binary operations for the i128 data type.
|
|
|
|
Summary:
|
|
This patch adds support for some operations that were missing from
|
|
128-bit integer types (add/sub/mul/sdiv/udiv... etc.). With these
|
|
changes we can support the __int128_t and __uint128_t data types
|
|
from C/C++.
|
|
|
|
Depends on D7125
|
|
|
|
Reviewers: dsanders
|
|
|
|
Subscribers: llvm-commits
|
|
|
|
Differential Revision: http://reviews.llvm.org/D7143
|
|
|
|
This fixes "error in backend" messages, when compiling parts of
|
|
compiler-rt using 128-bit integer types for mips64.
|
|
|
|
Reported by: sbruno
|
|
PR: 197259
|
|
|
|
Introduced here: http://svnweb.freebsd.org/changeset/base/278367
|
|
|
|
Index: lib/Target/Mips/Mips64InstrInfo.td
|
|
===================================================================
|
|
--- lib/Target/Mips/Mips64InstrInfo.td
|
|
+++ lib/Target/Mips/Mips64InstrInfo.td
|
|
@@ -440,6 +440,16 @@ def : MipsPat<(i64 (sext_inreg GPR64:$src, i32)),
|
|
// bswap MipsPattern
|
|
def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>;
|
|
|
|
+// Carry pattern
|
|
+def : MipsPat<(subc GPR64:$lhs, GPR64:$rhs),
|
|
+ (DSUBu GPR64:$lhs, GPR64:$rhs)>;
|
|
+let AdditionalPredicates = [NotDSP] in {
|
|
+ def : MipsPat<(addc GPR64:$lhs, GPR64:$rhs),
|
|
+ (DADDu GPR64:$lhs, GPR64:$rhs)>;
|
|
+ def : MipsPat<(addc GPR64:$lhs, immSExt16:$imm),
|
|
+ (DADDiu GPR64:$lhs, imm:$imm)>;
|
|
+}
|
|
+
|
|
//===----------------------------------------------------------------------===//
|
|
// Instruction aliases
|
|
//===----------------------------------------------------------------------===//
|
|
Index: lib/Target/Mips/MipsISelLowering.cpp
|
|
===================================================================
|
|
--- lib/Target/Mips/MipsISelLowering.cpp
|
|
+++ lib/Target/Mips/MipsISelLowering.cpp
|
|
@@ -261,6 +261,9 @@ MipsTargetLowering::MipsTargetLowering(const MipsT
|
|
setOperationAction(ISD::LOAD, MVT::i64, Custom);
|
|
setOperationAction(ISD::STORE, MVT::i64, Custom);
|
|
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
|
|
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
|
|
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
|
|
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
|
|
}
|
|
|
|
if (!Subtarget.isGP64bit()) {
|
|
@@ -2017,10 +2020,11 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDVa
|
|
SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
|
|
SelectionDAG &DAG) const {
|
|
SDLoc DL(Op);
|
|
+ MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
|
|
+
|
|
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
|
|
SDValue Shamt = Op.getOperand(2);
|
|
-
|
|
- // if shamt < 32:
|
|
+ // if shamt < (VT.bits):
|
|
// lo = (shl lo, shamt)
|
|
// hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
|
|
// else:
|
|
@@ -2028,18 +2032,17 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SD
|
|
// hi = (shl lo, shamt[4:0])
|
|
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
|
|
DAG.getConstant(-1, MVT::i32));
|
|
- SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo,
|
|
- DAG.getConstant(1, MVT::i32));
|
|
- SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo,
|
|
- Not);
|
|
- SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt);
|
|
- SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
|
|
- SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt);
|
|
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
|
|
+ DAG.getConstant(1, VT));
|
|
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
|
|
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
|
|
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
|
|
+ SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
|
|
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
|
|
DAG.getConstant(0x20, MVT::i32));
|
|
- Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
|
|
- DAG.getConstant(0, MVT::i32), ShiftLeftLo);
|
|
- Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
|
|
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
|
|
+ DAG.getConstant(0, VT), ShiftLeftLo);
|
|
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
|
|
|
|
SDValue Ops[2] = {Lo, Hi};
|
|
return DAG.getMergeValues(Ops, DL);
|
|
@@ -2050,8 +2053,9 @@ SDValue MipsTargetLowering::lowerShiftRightParts(S
|
|
SDLoc DL(Op);
|
|
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
|
|
SDValue Shamt = Op.getOperand(2);
|
|
+ MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
|
|
|
|
- // if shamt < 32:
|
|
+ // if shamt < (VT.bits):
|
|
// lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
|
|
// if isSRA:
|
|
// hi = (sra hi, shamt)
|
|
@@ -2066,21 +2070,19 @@ SDValue MipsTargetLowering::lowerShiftRightParts(S
|
|
// hi = 0
|
|
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
|
|
DAG.getConstant(-1, MVT::i32));
|
|
- SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
|
|
- DAG.getConstant(1, MVT::i32));
|
|
- SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not);
|
|
- SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt);
|
|
- SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
|
|
- SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32,
|
|
- Hi, Shamt);
|
|
+ SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
|
|
+ DAG.getConstant(1, VT));
|
|
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
|
|
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
|
|
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
|
|
+ SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
|
|
+ DL, VT, Hi, Shamt);
|
|
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
|
|
DAG.getConstant(0x20, MVT::i32));
|
|
- SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi,
|
|
- DAG.getConstant(31, MVT::i32));
|
|
- Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or);
|
|
- Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
|
|
- IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32),
|
|
- ShiftRightHi);
|
|
+ SDValue Shift31 = DAG.getNode(ISD::SRA, DL, VT, Hi, DAG.getConstant(31, VT));
|
|
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
|
|
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
|
|
+ IsSRA ? Shift31 : DAG.getConstant(0, VT), ShiftRightHi);
|
|
|
|
SDValue Ops[2] = {Lo, Hi};
|
|
return DAG.getMergeValues(Ops, DL);
|
|
Index: lib/Target/Mips/MipsSEISelDAGToDAG.cpp
|
|
===================================================================
|
|
--- lib/Target/Mips/MipsSEISelDAGToDAG.cpp
|
|
+++ lib/Target/Mips/MipsSEISelDAGToDAG.cpp
|
|
@@ -236,13 +236,31 @@ SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigne
|
|
(Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
|
|
"(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
|
|
|
|
+ unsigned SLTuOp = Mips::SLTu, ADDuOp = Mips::ADDu;
|
|
+ if (Subtarget->isGP64bit()) {
|
|
+ SLTuOp = Mips::SLTu64;
|
|
+ ADDuOp = Mips::DADDu;
|
|
+ }
|
|
+
|
|
SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) };
|
|
SDValue LHS = Node->getOperand(0), RHS = Node->getOperand(1);
|
|
EVT VT = LHS.getValueType();
|
|
|
|
- SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, DL, VT, Ops);
|
|
- SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, DL, VT,
|
|
+ SDNode *Carry = CurDAG->getMachineNode(SLTuOp, DL, VT, Ops);
|
|
+
|
|
+ if (Subtarget->isGP64bit()) {
|
|
+ // On 64-bit targets, sltu produces an i64 but our backend currently says
|
|
+ // that SLTu64 produces an i32. We need to fix this in the long run but for
|
|
+ // now, just make the DAG type-correct by asserting the upper bits are zero.
|
|
+ Carry = CurDAG->getMachineNode(Mips::SUBREG_TO_REG, DL, VT,
|
|
+ CurDAG->getTargetConstant(0, VT),
|
|
+ SDValue(Carry, 0),
|
|
+ CurDAG->getTargetConstant(Mips::sub_32, VT));
|
|
+ }
|
|
+
|
|
+ SDNode *AddCarry = CurDAG->getMachineNode(ADDuOp, DL, VT,
|
|
SDValue(Carry, 0), RHS);
|
|
+
|
|
return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS,
|
|
SDValue(AddCarry, 0));
|
|
}
|
|
@@ -641,7 +659,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selec
|
|
|
|
case ISD::SUBE: {
|
|
SDValue InFlag = Node->getOperand(2);
|
|
- Result = selectAddESubE(Mips::SUBu, InFlag, InFlag.getOperand(0), DL, Node);
|
|
+ unsigned Opc = Subtarget->isGP64bit() ? Mips::DSUBu : Mips::SUBu;
|
|
+ Result = selectAddESubE(Opc, InFlag, InFlag.getOperand(0), DL, Node);
|
|
return std::make_pair(true, Result);
|
|
}
|
|
|
|
@@ -649,7 +668,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selec
|
|
if (Subtarget->hasDSP()) // Select DSP instructions, ADDSC and ADDWC.
|
|
break;
|
|
SDValue InFlag = Node->getOperand(2);
|
|
- Result = selectAddESubE(Mips::ADDu, InFlag, InFlag.getValue(0), DL, Node);
|
|
+ unsigned Opc = Subtarget->isGP64bit() ? Mips::DADDu : Mips::ADDu;
|
|
+ Result = selectAddESubE(Opc, InFlag, InFlag.getValue(0), DL, Node);
|
|
return std::make_pair(true, Result);
|
|
}
|
|
|
|
Index: lib/Target/Mips/MipsSEISelLowering.cpp
|
|
===================================================================
|
|
--- lib/Target/Mips/MipsSEISelLowering.cpp
|
|
+++ lib/Target/Mips/MipsSEISelLowering.cpp
|
|
@@ -122,6 +122,8 @@ MipsSETargetLowering::MipsSETargetLowering(const M
|
|
setOperationAction(ISD::MUL, MVT::i64, Custom);
|
|
|
|
if (Subtarget.isGP64bit()) {
|
|
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Custom);
|
|
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom);
|
|
setOperationAction(ISD::MULHS, MVT::i64, Custom);
|
|
setOperationAction(ISD::MULHU, MVT::i64, Custom);
|
|
setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
|
|
@@ -200,6 +202,8 @@ MipsSETargetLowering::MipsSETargetLowering(const M
|
|
if (Subtarget.hasMips64r6()) {
|
|
// MIPS64r6 replaces the accumulator-based multiplies with a three register
|
|
// instruction
|
|
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
|
|
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
|
|
setOperationAction(ISD::MUL, MVT::i64, Legal);
|
|
setOperationAction(ISD::MULHS, MVT::i64, Legal);
|
|
setOperationAction(ISD::MULHU, MVT::i64, Legal);
|
|
Index: test/CodeGen/Mips/llvm-ir/add.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/add.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/add.ll
|
|
@@ -0,0 +1,115 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
|
|
+
|
|
+define signext i1 @add_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: add_i1:
|
|
+
|
|
+ ; ALL: addu $[[T0:[0-9]+]], $4, $5
|
|
+ ; ALL: sll $[[T0]], $[[T0]], 31
|
|
+ ; ALL: sra $2, $[[T0]], 31
|
|
+
|
|
+ %r = add i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @add_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: add_i8:
|
|
+
|
|
+ ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5
|
|
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 24
|
|
+ ; NOT-R2-R6: sra $2, $[[T0]], 24
|
|
+
|
|
+ ; R2-R6: addu $[[T0:[0-9]+]], $4, $5
|
|
+ ; R2-R6: seb $2, $[[T0:[0-9]+]]
|
|
+
|
|
+ %r = add i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @add_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: add_i16:
|
|
+
|
|
+ ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5
|
|
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 16
|
|
+ ; NOT-R2-R6: sra $2, $[[T0]], 16
|
|
+
|
|
+ ; R2-R6: addu $[[T0:[0-9]+]], $4, $5
|
|
+ ; R2-R6: seh $2, $[[T0:[0-9]+]]
|
|
+
|
|
+ %r = add i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @add_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: add_i32:
|
|
+
|
|
+ ; ALL: addu $2, $4, $5
|
|
+
|
|
+ %r = add i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @add_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: add_i64:
|
|
+
|
|
+ ; GP32: addu $3, $5, $7
|
|
+ ; GP32: sltu $[[T0:[0-9]+]], $3, $7
|
|
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
|
|
+ ; GP32: addu $2, $4, $[[T1]]
|
|
+
|
|
+ ; GP64: daddu $2, $4, $5
|
|
+
|
|
+ %r = add i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: add_i128:
|
|
+
|
|
+ ; GP32: lw $[[T0:[0-9]+]], 28($sp)
|
|
+ ; GP32: addu $[[T1:[0-9]+]], $7, $[[T0]]
|
|
+ ; GP32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
|
|
+ ; GP32: lw $[[T3:[0-9]+]], 24($sp)
|
|
+ ; GP32: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
|
|
+ ; GP32: addu $[[T5:[0-9]+]], $6, $[[T4]]
|
|
+ ; GP32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
|
|
+ ; GP32: lw $[[T7:[0-9]+]], 20($sp)
|
|
+ ; GP32: addu $[[T8:[0-9]+]], $[[T6]], $[[T7]]
|
|
+ ; GP32: lw $[[T9:[0-9]+]], 16($sp)
|
|
+ ; GP32: addu $3, $5, $[[T8]]
|
|
+ ; GP32: sltu $[[T10:[0-9]+]], $3, $[[T7]]
|
|
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T9]]
|
|
+ ; GP32: addu $2, $4, $[[T11]]
|
|
+ ; GP32: move $4, $[[T5]]
|
|
+ ; GP32: move $5, $[[T1]]
|
|
+
|
|
+ ; GP64: daddu $3, $5, $7
|
|
+ ; GP64: sltu $[[T0:[0-9]+]], $3, $7
|
|
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
|
|
+ ; GP64: daddu $2, $4, $[[T1]]
|
|
+
|
|
+ %r = add i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/and.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/and.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/and.ll
|
|
@@ -0,0 +1,94 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+
|
|
+define signext i1 @and_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: and_i1:
|
|
+
|
|
+ ; ALL: and $2, $4, $5
|
|
+
|
|
+ %r = and i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @and_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: and_i8:
|
|
+
|
|
+ ; ALL: and $2, $4, $5
|
|
+
|
|
+ %r = and i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @and_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: and_i16:
|
|
+
|
|
+ ; ALL: and $2, $4, $5
|
|
+
|
|
+ %r = and i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @and_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: and_i32:
|
|
+
|
|
+ ; GP32: and $2, $4, $5
|
|
+
|
|
+ ; GP64: and $[[T0:[0-9]+]], $4, $5
|
|
+ ; GP64: sll $2, $[[T0]], 0
|
|
+
|
|
+ %r = and i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @and_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: and_i64:
|
|
+
|
|
+ ; GP32: and $2, $4, $6
|
|
+ ; GP32: and $3, $5, $7
|
|
+
|
|
+ ; GP64: and $2, $4, $5
|
|
+
|
|
+ %r = and i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @and_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: and_i128:
|
|
+
|
|
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
|
|
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
|
|
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
|
|
+ ; GP32: and $2, $4, $[[T2]]
|
|
+ ; GP32: and $3, $5, $[[T1]]
|
|
+ ; GP32: and $4, $6, $[[T0]]
|
|
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
|
|
+ ; GP32: and $5, $7, $[[T3]]
|
|
+
|
|
+ ; GP64: and $2, $4, $6
|
|
+ ; GP64: and $3, $5, $7
|
|
+
|
|
+ %r = and i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/ashr.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/ashr.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/ashr.ll
|
|
@@ -0,0 +1,188 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=M2 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 -check-prefix=NOT-R2-R6 \
|
|
+; RUN: -check-prefix=32R1-R2
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=32R1-R2 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=32R6 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=M3 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=64R6 -check-prefix=R2-R6
|
|
+
|
|
+define signext i1 @ashr_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: ashr_i1:
|
|
+
|
|
+ ; ALL: move $2, $4
|
|
+
|
|
+ %r = ashr i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @ashr_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: ashr_i8:
|
|
+
|
|
+ ; FIXME: The andi instruction is redundant.
|
|
+ ; ALL: andi $[[T0:[0-9]+]], $5, 255
|
|
+ ; ALL: srav $2, $4, $[[T0]]
|
|
+
|
|
+ %r = ashr i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @ashr_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: ashr_i16:
|
|
+
|
|
+ ; FIXME: The andi instruction is redundant.
|
|
+ ; ALL: andi $[[T0:[0-9]+]], $5, 65535
|
|
+ ; ALL: srav $2, $4, $[[T0]]
|
|
+
|
|
+ %r = ashr i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @ashr_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: ashr_i32:
|
|
+
|
|
+ ; ALL: srav $2, $4, $5
|
|
+
|
|
+ %r = ashr i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: ashr_i64:
|
|
+
|
|
+ ; M2: srav $[[T0:[0-9]+]], $4, $7
|
|
+ ; M2: andi $[[T1:[0-9]+]], $7, 32
|
|
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
|
|
+ ; M2: move $3, $[[T0]]
|
|
+ ; M2: srlv $[[T2:[0-9]+]], $5, $7
|
|
+ ; M2: not $[[T3:[0-9]+]], $7
|
|
+ ; M2: sll $[[T4:[0-9]+]], $4, 1
|
|
+ ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
|
|
+ ; M2: or $3, $[[T3]], $[[T2]]
|
|
+ ; M2: $[[BB0]]:
|
|
+ ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
|
|
+ ; M2: nop
|
|
+ ; M2: sra $2, $4, 31
|
|
+ ; M2: $[[BB1]]:
|
|
+ ; M2: jr $ra
|
|
+ ; M2: nop
|
|
+
|
|
+ ; 32R1-R2: srlv $[[T0:[0-9]+]], $5, $7
|
|
+ ; 32R1-R2: not $[[T1:[0-9]+]], $7
|
|
+ ; 32R1-R2: sll $[[T2:[0-9]+]], $4, 1
|
|
+ ; 32R1-R2: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
|
|
+ ; 32R1-R2: or $3, $[[T3]], $[[T0]]
|
|
+ ; 32R1-R2: srav $[[T4:[0-9]+]], $4, $7
|
|
+ ; 32R1-R2: andi $[[T5:[0-9]+]], $7, 32
|
|
+ ; 32R1-R2: movn $3, $[[T4]], $[[T5]]
|
|
+ ; 32R1-R2: sra $4, $4, 31
|
|
+ ; 32R1-R2: jr $ra
|
|
+ ; 32R1-R2: movn $2, $4, $[[T5]]
|
|
+
|
|
+ ; 32R6: srav $[[T0:[0-9]+]], $4, $7
|
|
+ ; 32R6: andi $[[T1:[0-9]+]], $7, 32
|
|
+ ; 32R6: seleqz $[[T2:[0-9]+]], $[[T0]], $[[T1]]
|
|
+ ; 32R6: sra $[[T3:[0-9]+]], $4, 31
|
|
+ ; 32R6: selnez $[[T4:[0-9]+]], $[[T3]], $[[T1]]
|
|
+ ; 32R6: or $[[T5:[0-9]+]], $[[T4]], $[[T2]]
|
|
+ ; 32R6: srlv $[[T6:[0-9]+]], $5, $7
|
|
+ ; 32R6: not $[[T7:[0-9]+]], $7
|
|
+ ; 32R6: sll $[[T8:[0-9]+]], $4, 1
|
|
+ ; 32R6: sllv $[[T9:[0-9]+]], $[[T8]], $[[T7]]
|
|
+ ; 32R6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]]
|
|
+ ; 32R6: seleqz $[[T11:[0-9]+]], $[[T10]], $[[T1]]
|
|
+ ; 32R6: selnez $[[T12:[0-9]+]], $[[T0]], $[[T1]]
|
|
+ ; 32R6: jr $ra
|
|
+ ; 32R6: or $3, $[[T0]], $[[T11]]
|
|
+
|
|
+ ; FIXME: The sll instruction below is redundant.
|
|
+ ; GP64: sll $[[T0:[0-9]+]], $5, 0
|
|
+ ; GP64: dsrav $2, $4, $[[T0]]
|
|
+
|
|
+ %r = ashr i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: ashr_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__ashrti3)($gp)
|
|
+
|
|
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; M3: dsrav $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
|
|
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
|
|
+ ; M3: move $3, $[[T1]]
|
|
+ ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
|
|
+ ; M3: dsll $[[T5:[0-9]+]], $4, 1
|
|
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
|
|
+ ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
|
|
+ ; M3: or $3, $[[T7]], $[[T4]]
|
|
+ ; M3: $[[BB0]]:
|
|
+ ; M3: beqz $[[T3]], $[[BB1:BB[0-9_]+]]
|
|
+ ; M3: nop
|
|
+ ; M3: dsra $2, $4, 31
|
|
+ ; M3: $[[BB1]]:
|
|
+ ; M3: jr $ra
|
|
+ ; M3: nop
|
|
+
|
|
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
|
|
+ ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
|
|
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
|
|
+ ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
|
|
+ ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
|
|
+ ; GP64-NOT-R6: dsrav $2, $4, $[[T0]]
|
|
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
|
|
+
|
|
+ ; GP64-NOT-R6: movn $3, $2, $[[T5]]
|
|
+ ; GP64-NOT-R6: dsra $[[T6:[0-9]+]], $4, 31
|
|
+ ; GP64-NOT-R6: jr $ra
|
|
+ ; GP64-NOT-R6: movn $2, $[[T6]], $[[T5]]
|
|
+
|
|
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; 64R6: dsrav $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; 64R6: andi $[[T2:[0-9]+]], $[[T0]], 32
|
|
+ ; 64R6: sll $[[T3:[0-9]+]], $[[T2]], 0
|
|
+ ; 64R6: seleqz $[[T4:[0-9]+]], $[[T1]], $[[T3]]
|
|
+ ; 64R6: dsra $[[T5:[0-9]+]], $4, 31
|
|
+ ; 64R6: selnez $[[T6:[0-9]+]], $[[T5]], $[[T3]]
|
|
+ ; 64R6: or $2, $[[T6]], $[[T4]]
|
|
+ ; 64R6: dsrlv $[[T7:[0-9]+]], $5, $[[T0]]
|
|
+ ; 64R6: dsll $[[T8:[0-9]+]], $4, 1
|
|
+ ; 64R6: not $[[T9:[0-9]+]], $[[T0]]
|
|
+ ; 64R6: dsllv $[[T10:[0-9]+]], $[[T8]], $[[T9]]
|
|
+ ; 64R6: or $[[T11:[0-9]+]], $[[T10]], $[[T7]]
|
|
+ ; 64R6: seleqz $[[T12:[0-9]+]], $[[T11]], $[[T3]]
|
|
+ ; 64R6: selnez $[[T13:[0-9]+]], $[[T1]], $[[T3]]
|
|
+ ; 64R6: jr $ra
|
|
+ ; 64R6: or $3, $[[T13]], $[[T12]]
|
|
+
|
|
+ %r = ashr i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/lshr.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/lshr.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/lshr.ll
|
|
@@ -0,0 +1,176 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=M2 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 -check-prefix=NOT-R2-R6 \
|
|
+; RUN: -check-prefix=32R1-R2
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=32R1-R2 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=32R6 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=M3 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=64R6 -check-prefix=R2-R6
|
|
+
|
|
+define signext i1 @lshr_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: lshr_i1:
|
|
+
|
|
+ ; ALL: move $2, $4
|
|
+
|
|
+ %r = lshr i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define zeroext i8 @lshr_i8(i8 zeroext %a, i8 zeroext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: lshr_i8:
|
|
+
|
|
+ ; ALL: srlv $[[T0:[0-9]+]], $4, $5
|
|
+ ; ALL: andi $2, $[[T0]], 255
|
|
+
|
|
+ %r = lshr i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define zeroext i16 @lshr_i16(i16 zeroext %a, i16 zeroext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: lshr_i16:
|
|
+
|
|
+ ; ALL: srlv $[[T0:[0-9]+]], $4, $5
|
|
+ ; ALL: andi $2, $[[T0]], 65535
|
|
+
|
|
+ %r = lshr i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @lshr_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: lshr_i32:
|
|
+
|
|
+ ; ALL: srlv $2, $4, $5
|
|
+
|
|
+ %r = lshr i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @lshr_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: lshr_i64:
|
|
+
|
|
+ ; M2: srlv $[[T0:[0-9]+]], $4, $7
|
|
+ ; M2: andi $[[T1:[0-9]+]], $7, 32
|
|
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
|
|
+ ; M2: move $3, $[[T0]]
|
|
+ ; M2: srlv $[[T2:[0-9]+]], $5, $7
|
|
+ ; M2: not $[[T3:[0-9]+]], $7
|
|
+ ; M2: sll $[[T4:[0-9]+]], $4, 1
|
|
+ ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
|
|
+ ; M2: or $3, $[[T3]], $[[T2]]
|
|
+ ; M2: $[[BB0]]:
|
|
+ ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
|
|
+ ; M2: addiu $2, $zero, 0
|
|
+ ; M2: move $2, $[[T0]]
|
|
+ ; M2: $[[BB1]]:
|
|
+ ; M2: jr $ra
|
|
+ ; M2: nop
|
|
+
|
|
+ ; 32R1-R2: srlv $[[T0:[0-9]+]], $5, $7
|
|
+ ; 32R1-R2: not $[[T1:[0-9]+]], $7
|
|
+ ; 32R1-R2: sll $[[T2:[0-9]+]], $4, 1
|
|
+ ; 32R1-R2: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
|
|
+ ; 32R1-R2: or $3, $[[T3]], $[[T0]]
|
|
+ ; 32R1-R2: srlv $[[T4:[0-9]+]], $4, $7
|
|
+ ; 32R1-R2: andi $[[T5:[0-9]+]], $7, 32
|
|
+ ; 32R1-R2: movn $3, $[[T4]], $[[T5]]
|
|
+ ; 32R1-R2: jr $ra
|
|
+ ; 32R1-R2: movn $2, $zero, $[[T5]]
|
|
+
|
|
+ ; 32R6: srlv $[[T0:[0-9]+]], $5, $7
|
|
+ ; 32R6: not $[[T1:[0-9]+]], $7
|
|
+ ; 32R6: sll $[[T2:[0-9]+]], $4, 1
|
|
+ ; 32R6: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
|
|
+ ; 32R6: or $[[T4:[0-9]+]], $[[T3]], $[[T0]]
|
|
+ ; 32R6: andi $[[T5:[0-9]+]], $7, 32
|
|
+ ; 32R6: seleqz $[[T6:[0-9]+]], $[[T4]], $[[T3]]
|
|
+ ; 32R6: srlv $[[T7:[0-9]+]], $4, $7
|
|
+ ; 32R6: selnez $[[T8:[0-9]+]], $[[T7]], $[[T5]]
|
|
+ ; 32R6: or $3, $[[T8]], $[[T6]]
|
|
+ ; 32R6: jr $ra
|
|
+ ; 32R6: seleqz $2, $[[T7]], $[[T5]]
|
|
+
|
|
+ ; GP64: sll $[[T0:[0-9]+]], $5, 0
|
|
+ ; GP64: dsrlv $2, $4, $[[T0]]
|
|
+
|
|
+ %r = lshr i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: lshr_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__lshrti3)($gp)
|
|
+
|
|
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; M3: dsrlv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
|
|
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
|
|
+ ; M3: move $3, $[[T1]]
|
|
+ ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
|
|
+ ; M3: dsll $[[T5:[0-9]+]], $4, 1
|
|
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
|
|
+ ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
|
|
+ ; M3: or $3, $[[T7]], $[[T4]]
|
|
+ ; M3: $[[BB0]]:
|
|
+ ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
|
|
+ ; M3: daddiu $2, $zero, 0
|
|
+ ; M3: move $2, $[[T1]]
|
|
+ ; M3: $[[BB1]]:
|
|
+ ; M3: jr $ra
|
|
+ ; M3: nop
|
|
+
|
|
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
|
|
+ ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
|
|
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
|
|
+ ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
|
|
+ ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
|
|
+ ; GP64-NOT-R6: dsrlv $2, $4, $[[T0]]
|
|
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
|
|
+ ; GP64-NOT-R6: movn $3, $2, $[[T5]]
|
|
+ ; GP64-NOT-R6: jr $ra
|
|
+ ; GP64-NOT-R6: movn $2, $zero, $1
|
|
+
|
|
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; 64R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
|
|
+ ; 64R6: dsll $[[T2:[0-9]+]], $4, 1
|
|
+ ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
|
|
+ ; 64R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
|
|
+ ; 64R6: or $[[T5:[0-9]+]], $[[T4]], $[[T1]]
|
|
+ ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
|
|
+ ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
|
|
+ ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
|
|
+ ; 64R6: dsrlv $[[T9:[0-9]+]], $4, $[[T0]]
|
|
+ ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
|
|
+ ; 64R6: or $3, $[[T10]], $[[T8]]
|
|
+ ; 64R6: jr $ra
|
|
+ ; 64R6: seleqz $2, $[[T0]], $[[T7]]
|
|
+
|
|
+ %r = lshr i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/mul.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/mul.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/mul.ll
|
|
@@ -1,19 +1,19 @@
|
|
-; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=M2
|
|
-; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=32R1-R2 -check-prefix=32R1
|
|
-; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=32R1-R2 -check-prefix=32R2
|
|
-; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=32R6
|
|
-; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=M4
|
|
-; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=64R1-R2
|
|
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=64R1-R2
|
|
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
-; RUN: -check-prefix=ALL -check-prefix=64R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=M2 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=32R1-R2 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=32R1-R2 -check-prefix=32R2 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=32R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=M4 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=64R1-R2 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=64R1-R2 -check-prefix=GP64 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL \
|
|
+; RUN: -check-prefix=64R6
|
|
|
|
define signext i1 @mul_i1(i1 signext %a, i1 signext %b) {
|
|
entry:
|
|
@@ -179,3 +179,30 @@ entry:
|
|
%r = mul i64 %a, %b
|
|
ret i64 %r
|
|
}
|
|
+
|
|
+define signext i128 @mul_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: mul_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__multi3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: dmult $4, $7
|
|
+ ; GP64-NOT-R6: mflo $[[T0:[0-9]+]]
|
|
+ ; GP64-NOT-R6: dmult $5, $6
|
|
+ ; GP64-NOT-R6: mflo $[[T1:[0-9]+]]
|
|
+ ; GP64-NOT-R6: dmultu $5, $7
|
|
+ ; GP64-NOT-R6: mflo $3
|
|
+ ; GP64-NOT-R6: mfhi $[[T2:[0-9]+]]
|
|
+ ; GP64-NOT-R6: daddu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
|
|
+ ; GP64-NOT-R6: daddu $2, $[[T3:[0-9]+]], $[[T0]]
|
|
+
|
|
+ ; 64R6: dmul $[[T0:[0-9]+]], $5, $6
|
|
+ ; 64R6: dmuhu $[[T1:[0-9]+]], $5, $7
|
|
+ ; 64R6: daddu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
|
|
+ ; 64R6: dmul $[[T3:[0-9]+]], $4, $7
|
|
+ ; 64R6: daddu $2, $[[T2]], $[[T3]]
|
|
+ ; 64R6: dmul $3, $5, $7
|
|
+
|
|
+ %r = mul i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/or.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/or.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/or.ll
|
|
@@ -0,0 +1,95 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+
|
|
+define signext i1 @or_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: or_i1:
|
|
+
|
|
+ ; ALL: or $2, $4, $5
|
|
+
|
|
+ %r = or i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @or_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: or_i8:
|
|
+
|
|
+ ; ALL: or $2, $4, $5
|
|
+
|
|
+ %r = or i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @or_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: or_i16:
|
|
+
|
|
+ ; ALL: or $2, $4, $5
|
|
+
|
|
+ %r = or i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @or_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: or_i32:
|
|
+
|
|
+ ; GP32: or $2, $4, $5
|
|
+
|
|
+ ; GP64: or $[[T0:[0-9]+]], $4, $5
|
|
+ ; FIXME: The sll instruction below is redundant.
|
|
+ ; GP64: sll $2, $[[T0]], 0
|
|
+
|
|
+ %r = or i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @or_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: or_i64:
|
|
+
|
|
+ ; GP32: or $2, $4, $6
|
|
+ ; GP32: or $3, $5, $7
|
|
+
|
|
+ ; GP64: or $2, $4, $5
|
|
+
|
|
+ %r = or i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @or_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: or_i128:
|
|
+
|
|
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
|
|
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
|
|
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
|
|
+ ; GP32: or $2, $4, $[[T2]]
|
|
+ ; GP32: or $3, $5, $[[T1]]
|
|
+ ; GP32: or $4, $6, $[[T0]]
|
|
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
|
|
+ ; GP32: or $5, $7, $[[T3]]
|
|
+
|
|
+ ; GP64: or $2, $4, $6
|
|
+ ; GP64: or $3, $5, $7
|
|
+
|
|
+ %r = or i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/sdiv.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/sdiv.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/sdiv.ll
|
|
@@ -0,0 +1,136 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=R6 -check-prefix=64R6
|
|
+
|
|
+define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sdiv_i1:
|
|
+
|
|
+ ; NOT-R6: div $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mflo $[[T0:[0-9]+]]
|
|
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
|
|
+ ; NOT-R6: sll $[[T1:[0-9]+]], $[[T0]], 31
|
|
+ ; NOT-R6: sra $2, $[[T1]], 31
|
|
+
|
|
+ ; R6: div $[[T0:[0-9]+]], $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
|
|
+ ; R6: sll $[[T1:[0-9]+]], $[[T0]], 31
|
|
+ ; R6: sra $2, $[[T1]], 31
|
|
+
|
|
+ %r = sdiv i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sdiv_i8:
|
|
+
|
|
+ ; NOT-R2-R6: div $zero, $4, $5
|
|
+ ; NOT-R2-R6: teq $5, $zero, 7
|
|
+ ; NOT-R2-R6: mflo $[[T0:[0-9]+]]
|
|
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
|
|
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 24
|
|
+ ; NOT-R2-R6: sra $2, $[[T1]], 24
|
|
+
|
|
+ ; R2: div $zero, $4, $5
|
|
+ ; R2: teq $5, $zero, 7
|
|
+ ; R2: mflo $[[T0:[0-9]+]]
|
|
+ ; FIXME: This instruction is redundant.
|
|
+ ; R2: seb $2, $[[T0]]
|
|
+
|
|
+ ; R6: div $[[T0:[0-9]+]], $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+ ; FIXME: This instruction is redundant.
|
|
+ ; R6: seb $2, $[[T0]]
|
|
+
|
|
+ %r = sdiv i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sdiv_i16:
|
|
+
|
|
+ ; NOT-R2-R6: div $zero, $4, $5
|
|
+ ; NOT-R2-R6: teq $5, $zero, 7
|
|
+ ; NOT-R2-R6: mflo $[[T0:[0-9]+]]
|
|
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
|
|
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 16
|
|
+ ; NOT-R2-R6: sra $2, $[[T1]], 16
|
|
+
|
|
+ ; R2: div $zero, $4, $5
|
|
+ ; R2: teq $5, $zero, 7
|
|
+ ; R2: mflo $[[T0:[0-9]+]]
|
|
+ ; FIXME: This is instruction is redundant since div is signed.
|
|
+ ; R2: seh $2, $[[T0]]
|
|
+
|
|
+ ; R6: div $[[T0:[0-9]+]], $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+ ; FIXME: This is instruction is redundant since div is signed.
|
|
+ ; R6: seh $2, $[[T0]]
|
|
+
|
|
+ %r = sdiv i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sdiv_i32:
|
|
+
|
|
+ ; NOT-R6: div $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mflo $2
|
|
+
|
|
+ ; R6: div $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = sdiv i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sdiv_i64:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__divdi3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ddiv $zero, $4, $5
|
|
+ ; GP64-NOT-R6: teq $5, $zero, 7
|
|
+ ; GP64-NOT-R6: mflo $2
|
|
+
|
|
+ ; 64R6: ddiv $2, $4, $5
|
|
+ ; 64R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = sdiv i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+ ; ALL-LABEL: sdiv_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__divti3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ld $25, %call16(__divti3)($gp)
|
|
+ ; 64R6: ld $25, %call16(__divti3)($gp)
|
|
+
|
|
+ %r = sdiv i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/shl.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/shl.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/shl.ll
|
|
@@ -0,0 +1,188 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=M2 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 -check-prefix=NOT-R2-R6 \
|
|
+; RUN: -check-prefix=32R1-R2
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=32R1-R2 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=32R6 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=M3 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
|
|
+; RUN: -check-prefix=64R6 -check-prefix=R2-R6
|
|
+
|
|
+define signext i1 @shl_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: shl_i1:
|
|
+
|
|
+ ; ALL: move $2, $4
|
|
+
|
|
+ %r = shl i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @shl_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: shl_i8:
|
|
+
|
|
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 255
|
|
+ ; NOT-R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 24
|
|
+ ; NOT-R2-R6: sra $2, $[[T2]], 24
|
|
+
|
|
+ ; R2-R6: andi $[[T0:[0-9]+]], $5, 255
|
|
+ ; R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; R2-R6: seb $2, $[[T1]]
|
|
+
|
|
+ %r = shl i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @shl_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: shl_i16:
|
|
+
|
|
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 65535
|
|
+ ; NOT-R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 16
|
|
+ ; NOT-R2-R6: sra $2, $[[T2]], 16
|
|
+
|
|
+ ; R2-R6: andi $[[T0:[0-9]+]], $5, 65535
|
|
+ ; R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; R2-R6: seh $2, $[[T1]]
|
|
+
|
|
+ %r = shl i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @shl_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: shl_i32:
|
|
+
|
|
+ ; ALL: sllv $2, $4, $5
|
|
+
|
|
+ %r = shl i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @shl_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: shl_i64:
|
|
+
|
|
+ ; M2: sllv $[[T0:[0-9]+]], $5, $7
|
|
+ ; M2: andi $[[T1:[0-9]+]], $7, 32
|
|
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
|
|
+ ; M2: move $2, $[[T0]]
|
|
+ ; M2: sllv $[[T2:[0-9]+]], $4, $7
|
|
+ ; M2: not $[[T3:[0-9]+]], $7
|
|
+ ; M2: srl $[[T4:[0-9]+]], $5, 1
|
|
+ ; M2: srlv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
|
|
+ ; M2: or $2, $[[T2]], $[[T3]]
|
|
+ ; M2: $[[BB0]]:
|
|
+ ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
|
|
+ ; M2: addiu $3, $zero, 0
|
|
+ ; M2: move $3, $[[T0]]
|
|
+ ; M2: $[[BB1]]:
|
|
+ ; M2: jr $ra
|
|
+ ; M2: nop
|
|
+
|
|
+ ; 32R1-R2: sllv $[[T0:[0-9]+]], $4, $7
|
|
+ ; 32R1-R2: not $[[T1:[0-9]+]], $7
|
|
+ ; 32R1-R2: srl $[[T2:[0-9]+]], $5, 1
|
|
+ ; 32R1-R2: srlv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
|
|
+ ; 32R1-R2: or $2, $[[T0]], $[[T3]]
|
|
+ ; 32R1-R2: sllv $[[T4:[0-9]+]], $5, $7
|
|
+ ; 32R1-R2: andi $[[T5:[0-9]+]], $7, 32
|
|
+ ; 32R1-R2: movn $2, $[[T4]], $[[T5]]
|
|
+ ; 32R1-R2: jr $ra
|
|
+ ; 32R1-R2: movn $3, $zero, $[[T5]]
|
|
+
|
|
+ ; 32R6: sllv $[[T0:[0-9]+]], $4, $7
|
|
+ ; 32R6: not $[[T1:[0-9]+]], $7
|
|
+ ; 32R6: srl $[[T2:[0-9]+]], $5, 1
|
|
+ ; 32R6: srlv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
|
|
+ ; 32R6: or $[[T4:[0-9]+]], $[[T0]], $[[T3]]
|
|
+ ; 32R6: andi $[[T5:[0-9]+]], $7, 32
|
|
+ ; 32R6: seleqz $[[T6:[0-9]+]], $[[T4]], $[[T2]]
|
|
+ ; 32R6: sllv $[[T7:[0-9]+]], $5, $7
|
|
+ ; 32R6: selnez $[[T8:[0-9]+]], $[[T7]], $[[T5]]
|
|
+ ; 32R6: or $2, $[[T8]], $[[T6]]
|
|
+ ; 32R6: jr $ra
|
|
+ ; 32R6: seleqz $3, $[[T7]], $[[T5]]
|
|
+
|
|
+ ; GP64: sll $[[T0:[0-9]+]], $5, 0
|
|
+ ; GP64: dsllv $2, $4, $1
|
|
+
|
|
+ %r = shl i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: shl_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__ashlti3)($gp)
|
|
+
|
|
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; M3: dsllv $[[T1:[0-9]+]], $5, $[[T0]]
|
|
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
|
|
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
|
|
+ ; M3: move $2, $[[T1]]
|
|
+ ; M3: dsllv $[[T4:[0-9]+]], $4, $[[T0]]
|
|
+ ; M3: dsrl $[[T5:[0-9]+]], $5, 1
|
|
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
|
|
+ ; M3: dsrlv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
|
|
+ ; M3: or $2, $[[T4]], $[[T7]]
|
|
+ ; M3: $[[BB0]]:
|
|
+ ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
|
|
+ ; M3: daddiu $3, $zero, 0
|
|
+ ; M3: move $3, $[[T1]]
|
|
+ ; M3: $[[BB1]]:
|
|
+ ; M3: jr $ra
|
|
+ ; M3: nop
|
|
+
|
|
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; GP64-NOT-R6: dsllv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; GP64-NOT-R6: dsrl $[[T2:[0-9]+]], $5, 1
|
|
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
|
|
+ ; GP64-NOT-R6: dsrlv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
|
|
+ ; GP64-NOT-R6: or $2, $[[T1]], $[[T4]]
|
|
+ ; GP64-NOT-R6: dsllv $3, $5, $[[T0]]
|
|
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
|
|
+ ; GP64-NOT-R6: movn $2, $3, $[[T5]]
|
|
+ ; GP64-NOT-R6: jr $ra
|
|
+ ; GP64-NOT-R6: movn $3, $zero, $1
|
|
+
|
|
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
|
|
+ ; 64R6: dsllv $[[T1:[0-9]+]], $4, $[[T0]]
|
|
+ ; 64R6: dsrl $[[T2:[0-9]+]], $5, 1
|
|
+ ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
|
|
+ ; 64R6: dsrlv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
|
|
+ ; 64R6: or $[[T5:[0-9]+]], $[[T1]], $[[T4]]
|
|
+ ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
|
|
+ ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
|
|
+ ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
|
|
+ ; 64R6: dsllv $[[T9:[0-9]+]], $5, $[[T0]]
|
|
+ ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
|
|
+ ; 64R6: or $2, $[[T10]], $[[T8]]
|
|
+ ; 64R6: jr $ra
|
|
+ ; 64R6: seleqz $3, $[[T0]], $[[T7]]
|
|
+
|
|
+ %r = shl i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/srem.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/srem.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/srem.ll
|
|
@@ -0,0 +1,129 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=R2 -check-prefix=R2-R6 -check-prefix=NOT-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP32 -check-prefix=R6 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=R2 -check-prefix=R2-R6 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=64R6 -check-prefix=R6 -check-prefix=R2-R6
|
|
+
|
|
+define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: srem_i1:
|
|
+
|
|
+ ; NOT-R6: div $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mfhi $[[T0:[0-9]+]]
|
|
+ ; NOT-R6: sll $[[T1:[0-9]+]], $[[T0]], 31
|
|
+ ; NOT-R6: sra $2, $[[T1]], 31
|
|
+
|
|
+ ; R6: mod $[[T0:[0-9]+]], $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+ ; R6: sll $[[T3:[0-9]+]], $[[T0]], 31
|
|
+ ; R6: sra $2, $[[T3]], 31
|
|
+
|
|
+ %r = srem i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @srem_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: srem_i8:
|
|
+
|
|
+ ; NOT-R2-R6: div $zero, $4, $5
|
|
+ ; NOT-R2-R6: teq $5, $zero, 7
|
|
+ ; NOT-R2-R6: mfhi $[[T0:[0-9]+]]
|
|
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 24
|
|
+ ; NOT-R2-R6: sra $2, $[[T1]], 24
|
|
+
|
|
+ ; R2: div $zero, $4, $5
|
|
+ ; R2: teq $5, $zero, 7
|
|
+ ; R2: mfhi $[[T0:[0-9]+]]
|
|
+ ; R2: seb $2, $[[T0]]
|
|
+
|
|
+ ; R6: mod $[[T0:[0-9]+]], $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+ ; R6: seb $2, $[[T0]]
|
|
+
|
|
+ %r = srem i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @srem_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: srem_i16:
|
|
+
|
|
+ ; NOT-R2-R6: div $zero, $4, $5
|
|
+ ; NOT-R2-R6: teq $5, $zero, 7
|
|
+ ; NOT-R2-R6: mfhi $[[T0:[0-9]+]]
|
|
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 16
|
|
+ ; NOT-R2-R6: sra $2, $[[T1]], 16
|
|
+
|
|
+ ; R2: div $zero, $4, $5
|
|
+ ; R2: teq $5, $zero, 7
|
|
+ ; R2: mfhi $[[T0:[0-9]+]]
|
|
+ ; R2: seh $2, $[[T1]]
|
|
+
|
|
+ ; R6: mod $[[T0:[0-9]+]], $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+ ; R6: seh $2, $[[T0]]
|
|
+
|
|
+ %r = srem i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @srem_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: srem_i32:
|
|
+
|
|
+ ; NOT-R6: div $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mfhi $2
|
|
+
|
|
+ ; R6: mod $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = srem i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @srem_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: srem_i64:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__moddi3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ddiv $zero, $4, $5
|
|
+ ; GP64-NOT-R6: teq $5, $zero, 7
|
|
+ ; GP64-NOT-R6: mfhi $2
|
|
+
|
|
+ ; 64R6: dmod $2, $4, $5
|
|
+ ; 64R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = srem i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @srem_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: srem_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__modti3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ld $25, %call16(__modti3)($gp)
|
|
+ ; 64-R6: ld $25, %call16(__modti3)($gp)
|
|
+
|
|
+ %r = srem i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/sub.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/sub.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/sub.ll
|
|
@@ -0,0 +1,114 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
|
|
+
|
|
+define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sub_i1:
|
|
+
|
|
+ ; ALL: subu $[[T0:[0-9]+]], $4, $5
|
|
+ ; ALL: sll $[[T0]], $[[T0]], 31
|
|
+ ; ALL: sra $2, $[[T0]], 31
|
|
+
|
|
+ %r = sub i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @sub_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sub_i8:
|
|
+
|
|
+ ; NOT-R2-R6: subu $[[T0:[0-9]+]], $4, $5
|
|
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 24
|
|
+ ; NOT-R2-R6: sra $2, $[[T0]], 24
|
|
+
|
|
+ ; R2-R6: subu $[[T0:[0-9]+]], $4, $5
|
|
+ ; R2-R6: seb $2, $[[T0:[0-9]+]]
|
|
+
|
|
+ %r = sub i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @sub_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sub_i16:
|
|
+
|
|
+ ; NOT-R2-R6: subu $[[T0:[0-9]+]], $4, $5
|
|
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 16
|
|
+ ; NOT-R2-R6: sra $2, $[[T0]], 16
|
|
+
|
|
+ ; R2-R6: subu $[[T0:[0-9]+]], $4, $5
|
|
+ ; R2-R6: seh $2, $[[T0:[0-9]+]]
|
|
+
|
|
+ %r = sub i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @sub_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sub_i32:
|
|
+
|
|
+ ; ALL: subu $2, $4, $5
|
|
+
|
|
+ %r = sub i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sub_i64:
|
|
+
|
|
+ ; GP32: subu $3, $5, $7
|
|
+ ; GP32: sltu $[[T0:[0-9]+]], $5, $7
|
|
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
|
|
+ ; GP32: subu $2, $4, $[[T1]]
|
|
+
|
|
+ ; GP64: dsubu $2, $4, $5
|
|
+
|
|
+ %r = sub i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: sub_i128:
|
|
+
|
|
+ ; GP32: lw $[[T0:[0-9]+]], 20($sp)
|
|
+ ; GP32: sltu $[[T1:[0-9]+]], $5, $[[T0]]
|
|
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
|
|
+ ; GP32: addu $[[T3:[0-9]+]], $[[T1]], $[[T2]]
|
|
+ ; GP32: lw $[[T4:[0-9]+]], 24($sp)
|
|
+ ; GP32: lw $[[T5:[0-9]+]], 28($sp)
|
|
+ ; GP32: subu $[[T6:[0-9]+]], $7, $[[T5]]
|
|
+ ; GP32: subu $2, $4, $[[T3]]
|
|
+ ; GP32: sltu $[[T8:[0-9]+]], $6, $[[T4]]
|
|
+ ; GP32: addu $[[T9:[0-9]+]], $[[T8]], $[[T0]]
|
|
+ ; GP32: subu $3, $5, $[[T9]]
|
|
+ ; GP32: sltu $[[T10:[0-9]+]], $7, $[[T5]]
|
|
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T4]]
|
|
+ ; GP32: subu $4, $6, $[[T11]]
|
|
+ ; GP32: move $5, $[[T6]]
|
|
+
|
|
+ ; GP64: dsubu $3, $5, $7
|
|
+ ; GP64: sltu $[[T0:[0-9]+]], $5, $7
|
|
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
|
|
+ ; GP64: dsubu $2, $4, $[[T1]]
|
|
+
|
|
+ %r = sub i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/udiv.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/udiv.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/udiv.ll
|
|
@@ -0,0 +1,108 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=R6 -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=R6 -check-prefix=64R6
|
|
+
|
|
+define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: udiv_i1:
|
|
+
|
|
+ ; NOT-R6: divu $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mflo $2
|
|
+
|
|
+ ; R6: divu $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = udiv i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define zeroext i8 @udiv_i8(i8 zeroext %a, i8 zeroext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: udiv_i8:
|
|
+
|
|
+ ; NOT-R6: divu $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mflo $2
|
|
+
|
|
+ ; R6: divu $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = udiv i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define zeroext i16 @udiv_i16(i16 zeroext %a, i16 zeroext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: udiv_i16:
|
|
+
|
|
+ ; NOT-R6: divu $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mflo $2
|
|
+
|
|
+ ; R6: divu $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = udiv i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: udiv_i32:
|
|
+
|
|
+ ; NOT-R6: divu $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mflo $2
|
|
+
|
|
+ ; R6: divu $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = udiv i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: udiv_i64:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__udivdi3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ddivu $zero, $4, $5
|
|
+ ; GP64-NOT-R6: teq $5, $zero, 7
|
|
+ ; GP64-NOT-R6: mflo $2
|
|
+
|
|
+ ; 64R6: ddivu $2, $4, $5
|
|
+ ; 64R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = udiv i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: udiv_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__udivti3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ld $25, %call16(__udivti3)($gp)
|
|
+ ; 64-R6: ld $25, %call16(__udivti3)($gp)
|
|
+
|
|
+ %r = udiv i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/urem.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/urem.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/urem.ll
|
|
@@ -0,0 +1,145 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=GP32 \
|
|
+; RUN: -check-prefix=R2 -check-prefix=R2-R6 -check-prefix=NOT-R6
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP32 -check-prefix=R6 -check-prefix=R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=R2 -check-prefix=R2-R6 \
|
|
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=64R6 -check-prefix=R6 -check-prefix=R2-R6
|
|
+
|
|
+define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: urem_i1:
|
|
+
|
|
+ ; NOT-R6: andi $[[T0:[0-9]+]], $5, 1
|
|
+ ; NOT-R6: andi $[[T1:[0-9]+]], $4, 1
|
|
+ ; NOT-R6: divu $zero, $[[T1]], $[[T0]]
|
|
+ ; NOT-R6: teq $[[T0]], $zero, 7
|
|
+ ; NOT-R6: mfhi $[[T2:[0-9]+]]
|
|
+ ; NOT-R6: sll $[[T3:[0-9]+]], $[[T2]], 31
|
|
+ ; NOT-R6: sra $2, $[[T3]], 31
|
|
+
|
|
+ ; R6: andi $[[T0:[0-9]+]], $5, 1
|
|
+ ; R6: andi $[[T1:[0-9]+]], $4, 1
|
|
+ ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
|
|
+ ; R6: teq $[[T0]], $zero, 7
|
|
+ ; R6: sll $[[T3:[0-9]+]], $[[T2]], 31
|
|
+ ; R6: sra $2, $[[T3]], 31
|
|
+
|
|
+ %r = urem i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @urem_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: urem_i8:
|
|
+
|
|
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 255
|
|
+ ; NOT-R2-R6: andi $[[T1:[0-9]+]], $4, 255
|
|
+ ; NOT-R2-R6: divu $zero, $[[T1]], $[[T0]]
|
|
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
|
|
+ ; NOT-R2-R6: mfhi $[[T2:[0-9]+]]
|
|
+ ; NOT-R2-R6: sll $[[T3:[0-9]+]], $[[T2]], 24
|
|
+ ; NOT-R2-R6: sra $2, $[[T3]], 24
|
|
+
|
|
+ ; R2: andi $[[T0:[0-9]+]], $5, 255
|
|
+ ; R2: andi $[[T1:[0-9]+]], $4, 255
|
|
+ ; R2: divu $zero, $[[T1]], $[[T0]]
|
|
+ ; R2: teq $[[T0]], $zero, 7
|
|
+ ; R2: mfhi $[[T2:[0-9]+]]
|
|
+ ; R2: seb $2, $[[T2]]
|
|
+
|
|
+ ; R6: andi $[[T0:[0-9]+]], $5, 255
|
|
+ ; R6: andi $[[T1:[0-9]+]], $4, 255
|
|
+ ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
|
|
+ ; R6: teq $[[T0]], $zero, 7
|
|
+ ; R6: seb $2, $[[T2]]
|
|
+
|
|
+ %r = urem i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @urem_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: urem_i16:
|
|
+
|
|
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 65535
|
|
+ ; NOT-R2-R6: andi $[[T1:[0-9]+]], $4, 65535
|
|
+ ; NOT-R2-R6: divu $zero, $[[T1]], $[[T0]]
|
|
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
|
|
+ ; NOT-R2-R6: mfhi $[[T2:[0-9]+]]
|
|
+ ; NOT-R2-R6: sll $[[T3:[0-9]+]], $[[T2]], 16
|
|
+ ; NOT-R2-R6: sra $2, $[[T3]], 16
|
|
+
|
|
+ ; R2: andi $[[T0:[0-9]+]], $5, 65535
|
|
+ ; R2: andi $[[T1:[0-9]+]], $4, 65535
|
|
+ ; R2: divu $zero, $[[T1]], $[[T0]]
|
|
+ ; R2: teq $[[T0]], $zero, 7
|
|
+ ; R2: mfhi $[[T3:[0-9]+]]
|
|
+ ; R2: seh $2, $[[T2]]
|
|
+
|
|
+ ; R6: andi $[[T0:[0-9]+]], $5, 65535
|
|
+ ; R6: andi $[[T1:[0-9]+]], $4, 65535
|
|
+ ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
|
|
+ ; R6: teq $[[T0]], $zero, 7
|
|
+ ; R6: seh $2, $[[T2]]
|
|
+
|
|
+ %r = urem i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @urem_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: urem_i32:
|
|
+
|
|
+ ; NOT-R6: divu $zero, $4, $5
|
|
+ ; NOT-R6: teq $5, $zero, 7
|
|
+ ; NOT-R6: mfhi $2
|
|
+
|
|
+ ; R6: modu $2, $4, $5
|
|
+ ; R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = urem i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @urem_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: urem_i64:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__umoddi3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ddivu $zero, $4, $5
|
|
+ ; GP64-NOT-R6: teq $5, $zero, 7
|
|
+ ; GP64-NOT-R6: mfhi $2
|
|
+
|
|
+ ; 64R6: dmodu $2, $4, $5
|
|
+ ; 64R6: teq $5, $zero, 7
|
|
+
|
|
+ %r = urem i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+ ; ALL-LABEL: urem_i128:
|
|
+
|
|
+ ; GP32: lw $25, %call16(__umodti3)($gp)
|
|
+
|
|
+ ; GP64-NOT-R6: ld $25, %call16(__umodti3)($gp)
|
|
+ ; 64-R6: ld $25, %call16(__umodti3)($gp)
|
|
+
|
|
+ %r = urem i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|
|
Index: test/CodeGen/Mips/llvm-ir/xor.ll
|
|
===================================================================
|
|
--- test/CodeGen/Mips/llvm-ir/xor.ll
|
|
+++ test/CodeGen/Mips/llvm-ir/xor.ll
|
|
@@ -0,0 +1,94 @@
|
|
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP32
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
|
|
+; RUN: -check-prefix=ALL -check-prefix=GP64
|
|
+
|
|
+define signext i1 @xor_i1(i1 signext %a, i1 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: xor_i1:
|
|
+
|
|
+ ; ALL: xor $2, $4, $5
|
|
+
|
|
+ %r = xor i1 %a, %b
|
|
+ ret i1 %r
|
|
+}
|
|
+
|
|
+define signext i8 @xor_i8(i8 signext %a, i8 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: xor_i8:
|
|
+
|
|
+ ; ALL: xor $2, $4, $5
|
|
+
|
|
+ %r = xor i8 %a, %b
|
|
+ ret i8 %r
|
|
+}
|
|
+
|
|
+define signext i16 @xor_i16(i16 signext %a, i16 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: xor_i16:
|
|
+
|
|
+ ; ALL: xor $2, $4, $5
|
|
+
|
|
+ %r = xor i16 %a, %b
|
|
+ ret i16 %r
|
|
+}
|
|
+
|
|
+define signext i32 @xor_i32(i32 signext %a, i32 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: xor_i32:
|
|
+
|
|
+ ; GP32: xor $2, $4, $5
|
|
+
|
|
+ ; GP64: xor $[[T0:[0-9]+]], $4, $5
|
|
+ ; GP64: sll $2, $[[T0]], 0
|
|
+
|
|
+ %r = xor i32 %a, %b
|
|
+ ret i32 %r
|
|
+}
|
|
+
|
|
+define signext i64 @xor_i64(i64 signext %a, i64 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: xor_i64:
|
|
+
|
|
+ ; GP32: xor $2, $4, $6
|
|
+ ; GP32: xor $3, $5, $7
|
|
+
|
|
+ ; GP64: xor $2, $4, $5
|
|
+
|
|
+ %r = xor i64 %a, %b
|
|
+ ret i64 %r
|
|
+}
|
|
+
|
|
+define signext i128 @xor_i128(i128 signext %a, i128 signext %b) {
|
|
+entry:
|
|
+; ALL-LABEL: xor_i128:
|
|
+
|
|
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
|
|
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
|
|
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
|
|
+ ; GP32: xor $2, $4, $[[T2]]
|
|
+ ; GP32: xor $3, $5, $[[T1]]
|
|
+ ; GP32: xor $4, $6, $[[T0]]
|
|
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
|
|
+ ; GP32: xor $5, $7, $[[T3]]
|
|
+
|
|
+ ; GP64: xor $2, $4, $6
|
|
+ ; GP64: xor $3, $5, $7
|
|
+
|
|
+ %r = xor i128 %a, %b
|
|
+ ret i128 %r
|
|
+}
|