Update llvm, clang and lldb to release_38 branch r260756.

This commit is contained in:
Dimitry Andric 2016-02-13 15:58:51 +00:00
commit a8bcc4d878
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang380-import/; revision=295600
47 changed files with 1867 additions and 1111 deletions

View File

@ -484,7 +484,7 @@ let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
def int_ppc_altivec_vpkswss : GCCBuiltin<"__builtin_altivec_vpkswss">,
Intrinsic<[llvm_v16i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
def int_ppc_altivec_vpkswus : GCCBuiltin<"__builtin_altivec_vpkswus">,
Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],

View File

@ -280,11 +280,7 @@ class Value {
// when using them since you might not get all uses.
// The methods that don't start with materialized_ assert that modules is
// fully materialized.
#ifdef NDEBUG
void assertModuleIsMaterialized() const {}
#else
void assertModuleIsMaterialized() const;
#endif
bool use_empty() const {
assertModuleIsMaterialized();

View File

@ -242,13 +242,6 @@ void DemandedBits::determineLiveOperandBits(
if (OperandNo != 0)
AB = AOut;
break;
case Instruction::ICmp:
// Count the number of leading zeroes in each operand.
ComputeKnownBits(BitWidth, UserI->getOperand(0), UserI->getOperand(1));
auto NumLeadingZeroes = std::min(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
AB = ~APInt::getHighBitsSet(BitWidth, NumLeadingZeroes);
break;
}
}

View File

@ -555,6 +555,11 @@ bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
return true;
O << -MO.getImm();
return false;
case 's': // The GCC deprecated s modifier
if (MO.getType() != MachineOperand::MO_Immediate)
return true;
O << ((32 - MO.getImm()) & 31);
return false;
}
}
return true;

View File

@ -793,16 +793,27 @@ static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) {
llvm_unreachable("Unexpected 4-operand DBG_VALUE instruction!");
}
/// Determine whether two variable pieces overlap.
static bool piecesOverlap(const DIExpression *P1, const DIExpression *P2) {
if (!P1->isBitPiece() || !P2->isBitPiece())
return true;
// Determine the relative position of the pieces described by P1 and P2.
// Returns -1 if P1 is entirely before P2, 0 if P1 and P2 overlap,
// 1 if P1 is entirely after P2.
static int pieceCmp(const DIExpression *P1, const DIExpression *P2) {
unsigned l1 = P1->getBitPieceOffset();
unsigned l2 = P2->getBitPieceOffset();
unsigned r1 = l1 + P1->getBitPieceSize();
unsigned r2 = l2 + P2->getBitPieceSize();
// True where [l1,r1[ and [r1,r2[ overlap.
return (l1 < r2) && (l2 < r1);
if (r1 <= l2)
return -1;
else if (r2 <= l1)
return 1;
else
return 0;
}
/// Determine whether two variable pieces overlap.
static bool piecesOverlap(const DIExpression *P1, const DIExpression *P2) {
if (!P1->isBitPiece() || !P2->isBitPiece())
return true;
return pieceCmp(P1, P2) == 0;
}
/// \brief If this and Next are describing different pieces of the same
@ -811,14 +822,32 @@ static bool piecesOverlap(const DIExpression *P1, const DIExpression *P2) {
/// Return true if the merge was successful.
bool DebugLocEntry::MergeValues(const DebugLocEntry &Next) {
if (Begin == Next.Begin) {
auto *Expr = cast_or_null<DIExpression>(Values[0].Expression);
auto *NextExpr = cast_or_null<DIExpression>(Next.Values[0].Expression);
if (Expr->isBitPiece() && NextExpr->isBitPiece() &&
!piecesOverlap(Expr, NextExpr)) {
addValues(Next.Values);
End = Next.End;
return true;
auto *FirstExpr = cast<DIExpression>(Values[0].Expression);
auto *FirstNextExpr = cast<DIExpression>(Next.Values[0].Expression);
if (!FirstExpr->isBitPiece() || !FirstNextExpr->isBitPiece())
return false;
// We can only merge entries if none of the pieces overlap any others.
// In doing so, we can take advantage of the fact that both lists are
// sorted.
for (unsigned i = 0, j = 0; i < Values.size(); ++i) {
for (; j < Next.Values.size(); ++j) {
int res = pieceCmp(cast<DIExpression>(Values[i].Expression),
cast<DIExpression>(Next.Values[j].Expression));
if (res == 0) // The two expressions overlap, we can't merge.
return false;
// Values[i] is entirely before Next.Values[j],
// so go back to the next entry of Values.
else if (res == -1)
break;
// Next.Values[j] is entirely before Values[i], so go on to the
// next entry of Next.Values.
}
}
addValues(Next.Values);
End = Next.End;
return true;
}
return false;
}

View File

@ -313,8 +313,8 @@ void Value::takeName(Value *V) {
ST->reinsertValue(this);
}
#ifndef NDEBUG
void Value::assertModuleIsMaterialized() const {
#ifndef NDEBUG
const GlobalValue *GV = dyn_cast<GlobalValue>(this);
if (!GV)
return;
@ -322,8 +322,10 @@ void Value::assertModuleIsMaterialized() const {
if (!M)
return;
assert(M->isMaterialized());
#endif
}
#ifndef NDEBUG
static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
Constant *C) {
if (!Cache.insert(Expr).second)

View File

@ -90,6 +90,7 @@ def AArch64InstrInfo : InstrInfo;
include "AArch64SchedA53.td"
include "AArch64SchedA57.td"
include "AArch64SchedCyclone.td"
include "AArch64SchedM1.td"
def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35",
"Cortex-A35 ARM processors",
@ -144,8 +145,7 @@ def : ProcessorModel<"cortex-a57", CortexA57Model, [ProcA57]>;
// FIXME: Cortex-A72 is currently modelled as an Cortex-A57.
def : ProcessorModel<"cortex-a72", CortexA57Model, [ProcA57]>;
def : ProcessorModel<"cyclone", CycloneModel, [ProcCyclone]>;
// FIXME: Exynos-M1 is currently modelled without a specific SchedModel.
def : ProcessorModel<"exynos-m1", NoSchedModel, [ProcExynosM1]>;
def : ProcessorModel<"exynos-m1", ExynosM1Model, [ProcExynosM1]>;
//===----------------------------------------------------------------------===//
// Assembly parser

View File

@ -6689,6 +6689,9 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
}
if (LHS.getValueType().getVectorElementType() == MVT::f16)
return SDValue();
assert(LHS.getValueType().getVectorElementType() == MVT::f32 ||
LHS.getValueType().getVectorElementType() == MVT::f64);

View File

@ -0,0 +1,359 @@
//=- AArch64SchedM1.td - Samsung Exynos-M1 Scheduling Defs ---*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the machine model for Samsung Exynos-M1 to support
// instruction scheduling and other instruction cost heuristics.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// The Exynos-M1 is a traditional superscalar microprocessor with a
// 4-wide in-order stage for decode and dispatch and a wider issue stage.
// The execution units and loads and stores are out-of-order.
def ExynosM1Model : SchedMachineModel {
let IssueWidth = 4; // Up to 4 uops per cycle.
let MinLatency = 0; // OoO.
let MicroOpBufferSize = 96; // ROB size.
let LoopMicroOpBufferSize = 32; // Instruction queue size.
let LoadLatency = 4; // Optimistic load cases.
let MispredictPenalty = 14; // Minimum branch misprediction penalty.
let CompleteModel = 0; // Use the default model otherwise.
}
//===----------------------------------------------------------------------===//
// Define each kind of processor resource and number available on the Exynos-M1,
// which has 9 pipelines, each with its own queue with out-of-order dispatch.
def M1UnitA : ProcResource<2>; // Simple integer
def M1UnitC : ProcResource<1>; // Simple and complex integer
def M1UnitB : ProcResource<2>; // Branch
def M1UnitL : ProcResource<1>; // Load
def M1UnitS : ProcResource<1>; // Store
def M1PipeF0 : ProcResource<1>; // FP #0
def M1PipeF1 : ProcResource<1>; // FP #1
let Super = M1PipeF0 in {
def M1UnitFMAC : ProcResource<1>; // FP multiplication
def M1UnitFCVT : ProcResource<1>; // FP conversion
def M1UnitNAL0 : ProcResource<1>; // Simple vector.
def M1UnitNMISC : ProcResource<1>; // Miscellanea
def M1UnitNCRYPT : ProcResource<1>; // Cryptographic
}
let Super = M1PipeF1 in {
def M1UnitFADD : ProcResource<1>; // Simple FP
let BufferSize = 1 in
def M1UnitFVAR : ProcResource<1>; // FP division & square root (serialized)
def M1UnitNAL1 : ProcResource<1>; // Simple vector.
def M1UnitFST : ProcResource<1>; // FP store
}
let SchedModel = ExynosM1Model in {
def M1UnitALU : ProcResGroup<[M1UnitA,
M1UnitC]>; // All simple integer.
def M1UnitNALU : ProcResGroup<[M1UnitNAL0,
M1UnitNAL1]>; // All simple vector.
}
let SchedModel = ExynosM1Model in {
//===----------------------------------------------------------------------===//
// Coarse scheduling model for the Exynos-M1.
// Branch instructions.
// TODO: Non-conditional direct branches take zero cycles and units.
def : WriteRes<WriteBr, [M1UnitB]> { let Latency = 1; }
def : WriteRes<WriteBrReg, [M1UnitC]> { let Latency = 1; }
// TODO: Branch and link is much different.
// Arithmetic and logical integer instructions.
def : WriteRes<WriteI, [M1UnitALU]> { let Latency = 1; }
// TODO: Shift over 3 and some extensions take 2 cycles.
def : WriteRes<WriteISReg, [M1UnitALU]> { let Latency = 1; }
def : WriteRes<WriteIEReg, [M1UnitALU]> { let Latency = 1; }
def : WriteRes<WriteIS, [M1UnitALU]> { let Latency = 1; }
// Move instructions.
def : WriteRes<WriteImm, [M1UnitALU]> { let Latency = 1; }
// Divide and multiply instructions.
// TODO: Division blocks the divider inside C.
def : WriteRes<WriteID32, [M1UnitC]> { let Latency = 13; }
def : WriteRes<WriteID64, [M1UnitC]> { let Latency = 21; }
// TODO: Long multiplication take 5 cycles and also the ALU.
// TODO: Multiplication with accumulation can be advanced.
def : WriteRes<WriteIM32, [M1UnitC]> { let Latency = 3; }
// TODO: 64-bit multiplication has a throughput of 1/2.
def : WriteRes<WriteIM64, [M1UnitC]> { let Latency = 4; }
// Miscellaneous instructions.
def : WriteRes<WriteExtr, [M1UnitALU,
M1UnitALU]> { let Latency = 2; }
// TODO: The latency for the post or pre register is 1 cycle.
def : WriteRes<WriteAdr, []> { let Latency = 0; }
// Load instructions.
def : WriteRes<WriteLD, [M1UnitL]> { let Latency = 4; }
// TODO: Extended address requires also the ALU.
def : WriteRes<WriteLDIdx, [M1UnitL]> { let Latency = 5; }
def : WriteRes<WriteLDHi, [M1UnitALU]> { let Latency = 4; }
// Store instructions.
def : WriteRes<WriteST, [M1UnitS]> { let Latency = 1; }
// TODO: Extended address requires also the ALU.
def : WriteRes<WriteSTIdx, [M1UnitS]> { let Latency = 1; }
def : WriteRes<WriteSTP, [M1UnitS]> { let Latency = 1; }
def : WriteRes<WriteSTX, [M1UnitS]> { let Latency = 1; }
// FP data instructions.
def : WriteRes<WriteF, [M1UnitFADD]> { let Latency = 3; }
// TODO: FCCMP is much different.
def : WriteRes<WriteFCmp, [M1UnitNMISC]> { let Latency = 4; }
// TODO: DP takes longer.
def : WriteRes<WriteFDiv, [M1UnitFVAR]> { let Latency = 15; }
// TODO: MACC takes longer.
def : WriteRes<WriteFMul, [M1UnitFMAC]> { let Latency = 4; }
// FP miscellaneous instructions.
// TODO: Conversion between register files is much different.
def : WriteRes<WriteFCvt, [M1UnitFCVT]> { let Latency = 3; }
def : WriteRes<WriteFImm, [M1UnitNALU]> { let Latency = 1; }
// TODO: Copy from FPR to GPR is much different.
def : WriteRes<WriteFCopy, [M1UnitS]> { let Latency = 4; }
// FP load instructions.
// TODO: ASIMD loads are much different.
def : WriteRes<WriteVLD, [M1UnitL]> { let Latency = 5; }
// FP store instructions.
// TODO: ASIMD stores are much different.
def : WriteRes<WriteVST, [M1UnitS, M1UnitFST]> { let Latency = 1; }
// ASIMD FP instructions.
// TODO: Other operations are much different.
def : WriteRes<WriteV, [M1UnitFADD]> { let Latency = 3; }
// Other miscellaneous instructions.
def : WriteRes<WriteSys, []> { let Latency = 1; }
def : WriteRes<WriteBarrier, []> { let Latency = 1; }
def : WriteRes<WriteHint, []> { let Latency = 1; }
//===----------------------------------------------------------------------===//
// Fast forwarding.
// TODO: Add FP register forwarding rules.
def : ReadAdvance<ReadI, 0>;
def : ReadAdvance<ReadISReg, 0>;
def : ReadAdvance<ReadIEReg, 0>;
def : ReadAdvance<ReadIM, 0>;
// Integer multiply-accumulate.
// TODO: The forwarding for WriteIM64 saves actually 3 cycles.
def : ReadAdvance<ReadIMA, 2, [WriteIM32, WriteIM64]>;
def : ReadAdvance<ReadID, 0>;
def : ReadAdvance<ReadExtrHi, 0>;
def : ReadAdvance<ReadAdrBase, 0>;
def : ReadAdvance<ReadVLD, 0>;
//===----------------------------------------------------------------------===//
// Finer scheduling model for the Exynos-M1.
def M1WriteNEONA : SchedWriteRes<[M1UnitNALU,
M1UnitNALU,
M1UnitFADD]> { let Latency = 9; }
def M1WriteNEONB : SchedWriteRes<[M1UnitNALU,
M1UnitFST]> { let Latency = 5; }
def M1WriteNEONC : SchedWriteRes<[M1UnitNALU,
M1UnitFST]> { let Latency = 6; }
def M1WriteNEOND : SchedWriteRes<[M1UnitNALU,
M1UnitFST,
M1UnitL]> { let Latency = 10; }
def M1WriteNEONE : SchedWriteRes<[M1UnitFCVT,
M1UnitFST]> { let Latency = 8; }
def M1WriteNEONF : SchedWriteRes<[M1UnitFCVT,
M1UnitFST,
M1UnitL]> { let Latency = 13; }
def M1WriteNEONG : SchedWriteRes<[M1UnitNMISC,
M1UnitFST]> { let Latency = 6; }
def M1WriteNEONH : SchedWriteRes<[M1UnitNALU,
M1UnitFST]> { let Latency = 3; }
def M1WriteNEONI : SchedWriteRes<[M1UnitFST,
M1UnitL]> { let Latency = 9; }
def M1WriteALU1 : SchedWriteRes<[M1UnitALU]> { let Latency = 1; }
def M1WriteB : SchedWriteRes<[M1UnitB]> { let Latency = 1; }
// FIXME: This is the worst case, conditional branch and link.
def M1WriteBL : SchedWriteRes<[M1UnitB,
M1UnitALU]> { let Latency = 1; }
// FIXME: This is the worst case, when using LR.
def M1WriteBLR : SchedWriteRes<[M1UnitB,
M1UnitALU,
M1UnitALU]> { let Latency = 2; }
def M1WriteC1 : SchedWriteRes<[M1UnitC]> { let Latency = 1; }
def M1WriteC2 : SchedWriteRes<[M1UnitC]> { let Latency = 2; }
def M1WriteFADD3 : SchedWriteRes<[M1UnitFADD]> { let Latency = 3; }
def M1WriteFCVT3 : SchedWriteRes<[M1UnitFCVT]> { let Latency = 3; }
def M1WriteFCVT4 : SchedWriteRes<[M1UnitFCVT]> { let Latency = 4; }
def M1WriteFMAC4 : SchedWriteRes<[M1UnitFMAC]> { let Latency = 4; }
def M1WriteFMAC5 : SchedWriteRes<[M1UnitFMAC]> { let Latency = 5; }
def M1WriteFVAR15 : SchedWriteRes<[M1UnitFVAR]> { let Latency = 15; }
def M1WriteFVAR23 : SchedWriteRes<[M1UnitFVAR]> { let Latency = 23; }
def M1WriteNALU1 : SchedWriteRes<[M1UnitNALU]> { let Latency = 1; }
def M1WriteNALU2 : SchedWriteRes<[M1UnitNALU]> { let Latency = 2; }
def M1WriteNAL11 : SchedWriteRes<[M1UnitNAL1]> { let Latency = 1; }
def M1WriteNAL12 : SchedWriteRes<[M1UnitNAL1]> { let Latency = 2; }
def M1WriteNAL13 : SchedWriteRes<[M1UnitNAL1]> { let Latency = 3; }
def M1WriteNCRYPT1 : SchedWriteRes<[M1UnitNCRYPT]> { let Latency = 1; }
def M1WriteNCRYPT5 : SchedWriteRes<[M1UnitNCRYPT]> { let Latency = 5; }
def M1WriteNMISC1 : SchedWriteRes<[M1UnitNMISC]> { let Latency = 1; }
def M1WriteNMISC2 : SchedWriteRes<[M1UnitNMISC]> { let Latency = 2; }
def M1WriteNMISC3 : SchedWriteRes<[M1UnitNMISC]> { let Latency = 3; }
def M1WriteNMISC4 : SchedWriteRes<[M1UnitNMISC]> { let Latency = 4; }
def M1WriteS4 : SchedWriteRes<[M1UnitS]> { let Latency = 4; }
def M1WriteTB : SchedWriteRes<[M1UnitC,
M1UnitALU]> { let Latency = 2; }
// Branch instructions
def : InstRW<[M1WriteB ], (instrs Bcc)>;
def : InstRW<[M1WriteBL], (instrs BL)>;
def : InstRW<[M1WriteBLR], (instrs BLR)>;
def : InstRW<[M1WriteC1], (instregex "^CBN?Z[WX]")>;
def : InstRW<[M1WriteTB], (instregex "^TBN?Z[WX]")>;
// Arithmetic and logical integer instructions.
def : InstRW<[M1WriteALU1], (instrs COPY)>;
// Divide and multiply instructions.
// Miscellaneous instructions.
// Load instructions.
// Store instructions.
// FP data instructions.
def : InstRW<[M1WriteNALU1], (instregex "^F(ABS|NEG)[DS]r")>;
def : InstRW<[M1WriteFADD3], (instregex "^F(ADD|SUB)[DS]rr")>;
def : InstRW<[M1WriteNEONG], (instregex "^FCCMPE?[DS]rr")>;
def : InstRW<[M1WriteNMISC4], (instregex "^FCMPE?[DS]r")>;
def : InstRW<[M1WriteFVAR15], (instrs FDIVSrr)>;
def : InstRW<[M1WriteFVAR23], (instrs FDIVDrr)>;
def : InstRW<[M1WriteNMISC2], (instregex "^F(MAX|MIN).+rr")>;
def : InstRW<[M1WriteFMAC4], (instregex "^FN?MUL[DS]rr")>;
def : InstRW<[M1WriteFMAC5], (instregex "^FN?M(ADD|SUB)[DS]rrr")>;
def : InstRW<[M1WriteFCVT3], (instregex "^FRINT.+r")>;
def : InstRW<[M1WriteNEONH], (instregex "^FCSEL[DS]rrr")>;
def : InstRW<[M1WriteFVAR15], (instrs FSQRTSr)>;
def : InstRW<[M1WriteFVAR23], (instrs FSQRTDr)>;
// FP miscellaneous instructions.
def : InstRW<[M1WriteFCVT3], (instregex "^FCVT[DS][DS]r")>;
def : InstRW<[M1WriteNEONF], (instregex "^[FSU]CVT[AMNPZ][SU](_Int)?[SU]?[XW]?[DS]?[rds]i?")>;
def : InstRW<[M1WriteNEONE], (instregex "^[SU]CVTF[SU]")>;
def : InstRW<[M1WriteNALU1], (instregex "^FMOV[DS][ir]")>;
def : InstRW<[M1WriteS4], (instregex "^FMOV[WX][DS](High)?r")>;
def : InstRW<[M1WriteNEONI], (instregex "^FMOV[DS][WX](High)?r")>;
// FP load instructions.
// FP store instructions.
// ASIMD instructions.
def : InstRW<[M1WriteNMISC3], (instregex "^[SU]ABAL?v")>;
def : InstRW<[M1WriteNMISC1], (instregex "^[SU]ABDL?v")>;
def : InstRW<[M1WriteNMISC1], (instregex "^(SQ)?ABSv")>;
def : InstRW<[M1WriteNMISC1], (instregex "^SQNEGv")>;
def : InstRW<[M1WriteNALU1], (instregex "^(ADD|NEG|SUB)v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^[SU]?H(ADD|SUB)v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^[SU]?AD[AD](L|LP|P|W)V?2?v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^[SU]?SUB[LW]2?v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^R?(ADD|SUB)HN?2?v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^[SU]+Q(ADD|SUB)v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^[SU]RHADDv")>;
def : InstRW<[M1WriteNMISC1], (instregex "^CM(EQ|GE|GT|HI|HS|LE|LT)v")>;
def : InstRW<[M1WriteNALU1], (instregex "^CMTSTv")>;
def : InstRW<[M1WriteNALU1], (instregex "^(AND|BIC|EOR|MVNI|NOT|ORN|ORR)v")>;
def : InstRW<[M1WriteNMISC1], (instregex "^[SU](MIN|MAX)v")>;
def : InstRW<[M1WriteNMISC2], (instregex "^[SU](MIN|MAX)Pv")>;
def : InstRW<[M1WriteNMISC3], (instregex "^[SU](MIN|MAX)Vv")>;
def : InstRW<[M1WriteNMISC4], (instregex "^(MUL|SQR?DMULH)v")>;
def : InstRW<[M1WriteNMISC4], (instregex "^ML[AS]v")>;
def : InstRW<[M1WriteNMISC4], (instregex "^(S|U|SQD|SQRD)ML[AS][HL]v")>;
def : InstRW<[M1WriteNMISC4], (instregex "^(S|U|SQD)MULLv")>;
def : InstRW<[M1WriteNAL13], (instregex "^(S|SR|U|UR)SRAv")>;
def : InstRW<[M1WriteNALU1], (instregex "^[SU]?SH(L|LL|R)2?v")>;
def : InstRW<[M1WriteNALU1], (instregex "^S[LR]Iv")>;
def : InstRW<[M1WriteNAL13], (instregex "^[SU]?(Q|QR|R)?SHR(N|U|UN)?2?v")>;
def : InstRW<[M1WriteNAL13], (instregex "^[SU](Q|QR|R)SHLU?v")>;
// ASIMD FP instructions.
def : InstRW<[M1WriteNALU1], (instregex "^F(ABS|NEG)v")>;
def : InstRW<[M1WriteNMISC3], (instregex "^F(ABD|ADD|SUB)v")>;
def : InstRW<[M1WriteNEONA], (instregex "^FADDP")>;
def : InstRW<[M1WriteNMISC1], (instregex "^F(AC|CM)(EQ|GE|GT|LE|LT)v[^1]")>;
def : InstRW<[M1WriteFCVT3], (instregex "^[FVSU]CVTX?[AFLMNPZ][SU]?(_Int)?v")>;
def : InstRW<[M1WriteFVAR15], (instregex "FDIVv.f32")>;
def : InstRW<[M1WriteFVAR23], (instregex "FDIVv2f64")>;
def : InstRW<[M1WriteFVAR15], (instregex "FSQRTv.f32")>;
def : InstRW<[M1WriteFVAR23], (instregex "FSQRTv2f64")>;
def : InstRW<[M1WriteNMISC1], (instregex "^F(MAX|MIN)(NM)?V?v")>;
def : InstRW<[M1WriteNMISC2], (instregex "^F(MAX|MIN)(NM)?Pv")>;
def : InstRW<[M1WriteFMAC4], (instregex "^FMULX?v")>;
def : InstRW<[M1WriteFMAC5], (instregex "^FML[AS]v")>;
def : InstRW<[M1WriteFCVT3], (instregex "^FRINT[AIMNPXZ]v")>;
// ASIMD miscellaneous instructions.
def : InstRW<[M1WriteNALU1], (instregex "^RBITv")>;
def : InstRW<[M1WriteNAL11], (instregex "^(BIF|BIT|BSL)v")>;
def : InstRW<[M1WriteNALU1], (instregex "^CPY")>;
def : InstRW<[M1WriteNEONB], (instregex "^DUPv.+gpr")>;
def : InstRW<[M1WriteNALU1], (instregex "^DUPv.+lane")>;
def : InstRW<[M1WriteNAL13], (instregex "^[SU]?Q?XTU?Nv")>;
def : InstRW<[M1WriteNEONC], (instregex "^INSv.+gpr")>;
def : InstRW<[M1WriteFCVT4], (instregex "^[FU](RECP|RSQRT)Ev")>;
def : InstRW<[M1WriteNMISC1], (instregex "^[FU](RECP|RSQRT)Xv")>;
def : InstRW<[M1WriteFMAC5], (instregex "^F(RECP|RSQRT)Sv")>;
def : InstRW<[M1WriteNALU1], (instregex "^REV(16|32|64)v")>;
def : InstRW<[M1WriteNAL11], (instregex "^TB[LX]v8i8One")>;
def : InstRW<[WriteSequence<[M1WriteNAL11], 2>],
(instregex "^TB[LX]v8i8Two")>;
def : InstRW<[WriteSequence<[M1WriteNAL11], 3>],
(instregex "^TB[LX]v8i8Three")>;
def : InstRW<[WriteSequence<[M1WriteNAL11], 4>],
(instregex "^TB[LX]v8i8Four")>;
def : InstRW<[M1WriteNAL12], (instregex "^TB[LX]v16i8One")>;
def : InstRW<[WriteSequence<[M1WriteNAL12], 2>],
(instregex "^TB[LX]v16i8Two")>;
def : InstRW<[WriteSequence<[M1WriteNAL12], 3>],
(instregex "^TB[LX]v16i8Three")>;
def : InstRW<[WriteSequence<[M1WriteNAL12], 4>],
(instregex "^TB[LX]v16i8Four")>;
def : InstRW<[M1WriteNEOND], (instregex "^[SU]MOVv")>;
def : InstRW<[M1WriteNALU1], (instregex "^INSv.+lane")>;
def : InstRW<[M1WriteNALU1], (instregex "^(TRN|UZP)(1|2)(v8i8|v4i16|v2i32)")>;
def : InstRW<[M1WriteNALU2], (instregex "^(TRN|UZP)(1|2)(v16i8|v8i16|v4i32|v2i64)")>;
def : InstRW<[M1WriteNALU1], (instregex "^ZIP(1|2)v")>;
// ASIMD load instructions.
// ASIMD store instructions.
// Cryptography instructions.
def : InstRW<[M1WriteNCRYPT1], (instregex "^AES")>;
def : InstRW<[M1WriteNCRYPT1], (instregex "^PMUL")>;
def : InstRW<[M1WriteNCRYPT1], (instregex "^SHA1(H|SU)")>;
def : InstRW<[M1WriteNCRYPT5], (instregex "^SHA1[CMP]")>;
def : InstRW<[M1WriteNCRYPT1], (instregex "^SHA256SU0")>;
def : InstRW<[M1WriteNCRYPT5], (instregex "^SHA256(H|SU1)")>;
// CRC instructions.
def : InstRW<[M1WriteC2], (instregex "^CRC32")>;
} // SchedModel = ExynosM1Model

View File

@ -183,6 +183,7 @@ def FeatureISAVersion7_0_0 : SubtargetFeatureISAVersion <7,0,0>;
def FeatureISAVersion7_0_1 : SubtargetFeatureISAVersion <7,0,1>;
def FeatureISAVersion8_0_0 : SubtargetFeatureISAVersion <8,0,0>;
def FeatureISAVersion8_0_1 : SubtargetFeatureISAVersion <8,0,1>;
def FeatureISAVersion8_0_3 : SubtargetFeatureISAVersion <8,0,3>;
class SubtargetFeatureLocalMemorySize <int Value> : SubtargetFeature<
"localmemorysize"#Value,
@ -252,7 +253,7 @@ def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
def FeatureVolcanicIslands : SubtargetFeatureGeneration<"VOLCANIC_ISLANDS",
[Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
FeatureWavefrontSize64, FeatureFlatAddressSpace, FeatureGCN,
FeatureGCN3Encoding, FeatureCIInsts, FeatureLDSBankCount32]>;
FeatureGCN3Encoding, FeatureCIInsts]>;
//===----------------------------------------------------------------------===//

View File

@ -53,7 +53,8 @@ class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
ISAVersion7_0_0,
ISAVersion7_0_1,
ISAVersion8_0_0,
ISAVersion8_0_1
ISAVersion8_0_1,
ISAVersion8_0_3
};
private:

View File

@ -128,21 +128,23 @@ def : ProcessorModel<"mullins", SIQuarterSpeedModel,
//===----------------------------------------------------------------------===//
def : ProcessorModel<"tonga", SIQuarterSpeedModel,
[FeatureVolcanicIslands, FeatureSGPRInitBug, FeatureISAVersion8_0_0]
[FeatureVolcanicIslands, FeatureSGPRInitBug, FeatureISAVersion8_0_0,
FeatureLDSBankCount32]
>;
def : ProcessorModel<"iceland", SIQuarterSpeedModel,
[FeatureVolcanicIslands, FeatureSGPRInitBug, FeatureISAVersion8_0_0]
[FeatureVolcanicIslands, FeatureSGPRInitBug, FeatureISAVersion8_0_0,
FeatureLDSBankCount32]
>;
def : ProcessorModel<"carrizo", SIQuarterSpeedModel,
[FeatureVolcanicIslands, FeatureISAVersion8_0_1]
[FeatureVolcanicIslands, FeatureISAVersion8_0_1, FeatureLDSBankCount32]
>;
def : ProcessorModel<"fiji", SIQuarterSpeedModel,
[FeatureVolcanicIslands, FeatureISAVersion8_0_1]
[FeatureVolcanicIslands, FeatureISAVersion8_0_3, FeatureLDSBankCount32]
>;
def : ProcessorModel<"stoney", SIQuarterSpeedModel,
[FeatureVolcanicIslands, FeatureISAVersion8_0_1]
[FeatureVolcanicIslands, FeatureISAVersion8_0_1, FeatureLDSBankCount16]
>;

View File

@ -234,6 +234,7 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
bool IsLoad = TII->get(LoadStoreOp).mayLoad();
bool RanOutOfSGPRs = false;
bool Scavenged = false;
unsigned SOffset = ScratchOffset;
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
@ -244,6 +245,8 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
if (SOffset == AMDGPU::NoRegister) {
RanOutOfSGPRs = true;
SOffset = AMDGPU::SGPR0;
} else {
Scavenged = true;
}
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
.addReg(ScratchOffset)
@ -259,10 +262,14 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
Value;
unsigned SOffsetRegState = 0;
if (i + 1 == e && Scavenged)
SOffsetRegState |= RegState::Kill;
BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
.addReg(SubReg, getDefRegState(IsLoad))
.addReg(ScratchRsrcReg)
.addReg(SOffset)
.addReg(SOffset, SOffsetRegState)
.addImm(Offset)
.addImm(0) // glc
.addImm(0) // slc

View File

@ -41,6 +41,9 @@ IsaVersion getIsaVersion(const FeatureBitset &Features) {
if (Features.test(FeatureISAVersion8_0_1))
return {8, 0, 1};
if (Features.test(FeatureISAVersion8_0_3))
return {8, 0, 3};
return {0, 0, 0};
}

View File

@ -747,7 +747,7 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
// If Offset is a multiply-by-constant and it's profitable to extract a shift
// and use it in a shifted operand do so.
if (Offset.getOpcode() == ISD::MUL) {
if (Offset.getOpcode() == ISD::MUL && N.hasOneUse()) {
unsigned PowerOfTwo = 0;
SDValue NewMulConst;
if (canExtractShiftFromMul(Offset, 31, PowerOfTwo, NewMulConst)) {
@ -1422,7 +1422,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
// If OffReg is a multiply-by-constant and it's profitable to extract a shift
// and use it in a shifted operand do so.
if (OffReg.getOpcode() == ISD::MUL) {
if (OffReg.getOpcode() == ISD::MUL && N.hasOneUse()) {
unsigned PowerOfTwo = 0;
SDValue NewMulConst;
if (canExtractShiftFromMul(OffReg, 3, PowerOfTwo, NewMulConst)) {

View File

@ -1615,7 +1615,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
// extension rather than sign extension. Make sure we pass the return
// value extension property to integer materialization.
unsigned SrcReg =
PPCMaterializeInt(CI, MVT::i64, VA.getLocInfo() == CCValAssign::SExt);
PPCMaterializeInt(CI, MVT::i64, VA.getLocInfo() != CCValAssign::ZExt);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg);
@ -2091,25 +2091,21 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
&PPC::GPRCRegClass);
int64_t Imm = UseSExt ? CI->getSExtValue() : CI->getZExtValue();
// If the constant is in range, use a load-immediate.
if (UseSExt && isInt<16>(CI->getSExtValue())) {
// Since LI will sign extend the constant we need to make sure that for
// our zeroext constants that the sign extended constant fits into 16-bits -
// a range of 0..0x7fff.
if (isInt<16>(Imm)) {
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
unsigned ImmReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
.addImm(CI->getSExtValue());
return ImmReg;
} else if (!UseSExt && isUInt<16>(CI->getZExtValue())) {
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
unsigned ImmReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
.addImm(CI->getZExtValue());
.addImm(Imm);
return ImmReg;
}
// Construct the constant piecewise.
int64_t Imm = CI->getZExtValue();
if (VT == MVT::i64)
return PPCMaterialize64BitInt(Imm, RC);
else if (VT == MVT::i32)

View File

@ -736,7 +736,7 @@ def VPKSHSS : VX1_Int_Ty2<398, "vpkshss", int_ppc_altivec_vpkshss,
def VPKSHUS : VX1_Int_Ty2<270, "vpkshus", int_ppc_altivec_vpkshus,
v16i8, v8i16>;
def VPKSWSS : VX1_Int_Ty2<462, "vpkswss", int_ppc_altivec_vpkswss,
v16i8, v4i32>;
v8i16, v4i32>;
def VPKSWUS : VX1_Int_Ty2<334, "vpkswus", int_ppc_altivec_vpkswus,
v8i16, v4i32>;
def VPKUHUM : VXForm_1<14, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),

View File

@ -1849,7 +1849,7 @@ static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
if (CCMask == SystemZ::CCMASK_CMP_NE)
return SystemZ::CCMASK_TM_SOME_1;
}
if (EffectivelyUnsigned && CmpVal <= Low) {
if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
if (CCMask == SystemZ::CCMASK_CMP_LT)
return SystemZ::CCMASK_TM_ALL_0;
if (CCMask == SystemZ::CCMASK_CMP_GE)

View File

@ -1335,6 +1335,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
setOperationAction(ISD::SETCC, MVT::i1, Custom);
setOperationAction(ISD::SETCCE, MVT::i1, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
setOperationAction(ISD::XOR, MVT::i1, Legal);
setOperationAction(ISD::OR, MVT::i1, Legal);
@ -14975,8 +14976,11 @@ SDValue X86TargetLowering::LowerSETCCE(SDValue Op, SelectionDAG &DAG) const {
assert(Carry.getOpcode() != ISD::CARRY_FALSE);
SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry);
return DAG.getNode(X86ISD::SETCC, DL, Op.getValueType(),
DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
if (Op.getSimpleValueType() == MVT::i1)
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
return SetCC;
}
// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
@ -16315,6 +16319,11 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
const X86Subtarget *Subtarget,
SelectionDAG &DAG, SDLoc dl) {
if (isAllOnesConstant(Mask))
return DAG.getTargetConstant(1, dl, MaskVT);
if (X86::isZeroNode(Mask))
return DAG.getTargetConstant(0, dl, MaskVT);
if (MaskVT.bitsGT(Mask.getSimpleValueType())) {
// Mask should be extended
Mask = DAG.getNode(ISD::ANY_EXTEND, dl,
@ -17203,26 +17212,14 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
MVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
SDValue MaskInReg;
ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
if (MaskC)
MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
else {
MVT BitcastVT = MVT::getVectorVT(MVT::i1,
Mask.getSimpleValueType().getSizeInBits());
// In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
// are extracted by EXTRACT_SUBVECTOR.
MaskInReg = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getBitcast(BitcastVT, Mask),
DAG.getIntPtrConstant(0, dl));
}
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
if (Src.getOpcode() == ISD::UNDEF)
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
return DAG.getMergeValues(RetOps, dl);
@ -17230,7 +17227,8 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain) {
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = cast<ConstantSDNode>(ScaleOp);
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
@ -17238,29 +17236,18 @@ static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
SDValue MaskInReg;
ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
if (MaskC)
MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
else {
MVT BitcastVT = MVT::getVectorVT(MVT::i1,
Mask.getSimpleValueType().getSizeInBits());
// In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
// are extracted by EXTRACT_SUBVECTOR.
MaskInReg = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getBitcast(BitcastVT, Mask),
DAG.getIntPtrConstant(0, dl));
}
SDValue VMask = getMaskNode(Mask, MaskVT, &Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
return SDValue(Res, 1);
}
static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Mask, SDValue Base, SDValue Index,
SDValue ScaleOp, SDValue Chain) {
SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = cast<ConstantSDNode>(ScaleOp);
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
@ -17268,14 +17255,9 @@ static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT =
MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
SDValue MaskInReg;
ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
if (MaskC)
MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
else
MaskInReg = DAG.getBitcast(MaskVT, Mask);
SDValue VMask = getMaskNode(Mask, MaskVT, &Subtarget, DAG, dl);
//SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
@ -17509,7 +17491,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Src = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
Scale, Chain);
Scale, Chain, *Subtarget);
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
@ -17521,7 +17503,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Index = Op.getOperand(3);
SDValue Base = Op.getOperand(4);
SDValue Scale = Op.getOperand(5);
return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
*Subtarget);
}
// Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
case RDTSC: {

View File

@ -3560,7 +3560,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
BO1->getOperand(0));
}
if (CI->isMaxValue(true)) {
if (BO0->getOpcode() == Instruction::Xor && CI->isMaxValue(true)) {
ICmpInst::Predicate Pred = I.isSigned()
? I.getUnsignedPredicate()
: I.getSignedPredicate();

View File

@ -557,7 +557,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
ConstantInt::get(IdxType, i),
};
auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), EltName);
auto *L = IC.Builder->CreateLoad(ST->getTypeAtIndex(i), Ptr, LoadName);
auto *L = IC.Builder->CreateAlignedLoad(Ptr, LI.getAlignment(),
LoadName);
V = IC.Builder->CreateInsertValue(V, L, i);
}

View File

@ -380,6 +380,23 @@ static void replaceExtractElements(InsertElementInst *InsElt,
ExtendMask.push_back(UndefValue::get(IntType));
Value *ExtVecOp = ExtElt->getVectorOperand();
auto *ExtVecOpInst = dyn_cast<Instruction>(ExtVecOp);
BasicBlock *InsertionBlock = (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
? ExtVecOpInst->getParent()
: ExtElt->getParent();
// TODO: This restriction matches the basic block check below when creating
// new extractelement instructions. If that limitation is removed, this one
// could also be removed. But for now, we just bail out to ensure that we
// will replace the extractelement instruction that is feeding our
// insertelement instruction. This allows the insertelement to then be
// replaced by a shufflevector. If the insertelement is not replaced, we can
// induce infinite looping because there's an optimization for extractelement
// that will delete our widening shuffle. This would trigger another attempt
// here to create that shuffle, and we spin forever.
if (InsertionBlock != InsElt->getParent())
return;
auto *WideVec = new ShuffleVectorInst(ExtVecOp, UndefValue::get(ExtVecType),
ConstantVector::get(ExtendMask));
@ -387,7 +404,6 @@ static void replaceExtractElements(InsertElementInst *InsElt,
// (as long as it's not a PHI) or at the start of the basic block of the
// extract, so any subsequent extracts in the same basic block can use it.
// TODO: Insert before the earliest ExtractElementInst that is replaced.
auto *ExtVecOpInst = dyn_cast<Instruction>(ExtVecOp);
if (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
WideVec->insertAfter(ExtVecOpInst);
else

View File

@ -90,6 +90,11 @@ static cl::opt<bool> SpeculateOneExpensiveInst(
cl::desc("Allow exactly one expensive instruction to be speculatively "
"executed"));
static cl::opt<unsigned> MaxSpeculationDepth(
"max-speculation-depth", cl::Hidden, cl::init(10),
cl::desc("Limit maximum recursion depth when calculating costs of "
"speculatively executed instructions"));
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
STATISTIC(NumLinearMaps, "Number of switch instructions turned into linear mapping");
STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables");
@ -269,6 +274,13 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
unsigned &CostRemaining,
const TargetTransformInfo &TTI,
unsigned Depth = 0) {
// It is possible to hit a zero-cost cycle (phi/gep instructions for example),
// so limit the recursion depth.
// TODO: While this recursion limit does prevent pathological behavior, it
// would be better to track visited instructions to avoid cycles.
if (Depth == MaxSpeculationDepth)
return false;
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
// Non-instructions all dominate instructions, but not all constantexprs

View File

@ -2229,7 +2229,8 @@ class Sema {
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
@ -5388,7 +5389,8 @@ class Sema {
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
@ -7514,14 +7516,15 @@ class Sema {
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
@ -8613,6 +8616,7 @@ class Sema {
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);

File diff suppressed because it is too large Load Diff

View File

@ -6151,6 +6151,12 @@ class SystemZTargetInfo : public TargetInfo {
Builder.defineMacro("__s390x__");
Builder.defineMacro("__zarch__");
Builder.defineMacro("__LONG_DOUBLE_128__");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
if (HasTransactionalExecution)
Builder.defineMacro("__HTM__");
if (Opts.ZVector)

View File

@ -104,23 +104,15 @@ class ConstantAddress : public Address {
};
}
// Present a minimal LLVM-like casting interface.
template <class U> inline U cast(CodeGen::Address addr) {
return U::castImpl(addr);
}
template <class U> inline bool isa(CodeGen::Address addr) {
return U::isaImpl(addr);
}
namespace llvm {
// Present a minimal LLVM-like casting interface.
template <class U> inline U cast(clang::CodeGen::Address addr) {
return U::castImpl(addr);
}
template <class U> inline bool isa(clang::CodeGen::Address addr) {
return U::isaImpl(addr);
}
}
namespace clang {
// Make our custom isa and cast available in namespace clang, to mirror
// what we do for LLVM's versions in Basic/LLVM.h.
using llvm::isa;
using llvm::cast;
}
#endif

View File

@ -483,7 +483,7 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
if (ThreadID != nullptr)
return ThreadID;
}
if (auto OMPRegionInfo =
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (OMPRegionInfo->getThreadIDVariable()) {
// Check if this an outlined function with thread id passed as argument.
@ -1356,7 +1356,7 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// return the address of that temp.
Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
SourceLocation Loc) {
if (auto OMPRegionInfo =
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
@ -1717,15 +1717,10 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
}
// Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
// thread_id);
auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
// Do not emit barrier call in the single directive emitted in some rare cases
// for sections directives.
if (OMPRegionInfo && OMPRegionInfo->getDirectiveKind() == OMPD_single)
return;
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
getThreadID(CGF, Loc)};
if (OMPRegionInfo) {
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
auto *Result = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
@ -3649,8 +3644,6 @@ void CGOpenMPRuntime::emitCancellationPointCall(
// global_tid, kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (OMPRegionInfo->getDirectiveKind() == OMPD_single)
return;
if (OMPRegionInfo->hasCancel()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
@ -3687,8 +3680,6 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
// kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (OMPRegionInfo->getDirectiveKind() == OMPD_single)
return;
auto &&ThenGen = [this, Loc, CancelRegion,
OMPRegionInfo](CodeGenFunction &CGF) {
llvm::Value *Args[] = {

View File

@ -1657,50 +1657,51 @@ OpenMPDirectiveKind
CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
auto *CS = dyn_cast<CompoundStmt>(Stmt);
if (CS && CS->size() > 1) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
auto &C = CGF.CGM.getContext();
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
CGF.Builder.getInt32(0));
auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
LValue UB =
createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
CGF.Builder.getInt32(1));
LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
CGF.Builder.getInt32(0));
// Loop counter.
LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
OK_Ordinary, S.getLocStart(),
/*fpContractable=*/false);
// Increment for loop counter.
UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
OK_Ordinary, S.getLocStart());
auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
// case 0:
// <SectionStmt[0]>;
// break;
// ...
// case <NumSection> - 1:
// <SectionStmt[<NumSection> - 1]>;
// break;
// }
// .omp.sections.exit:
auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
auto *SwitchStmt = CGF.Builder.CreateSwitch(
CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
CS->size());
bool HasLastprivates = false;
auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF) {
auto &C = CGF.CGM.getContext();
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
CGF.Builder.getInt32(0));
auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
: CGF.Builder.getInt32(0);
LValue UB =
createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
CGF.Builder.getInt32(1));
LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
CGF.Builder.getInt32(0));
// Loop counter.
LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
OK_Ordinary, S.getLocStart(),
/*fpContractable=*/false);
// Increment for loop counter.
UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
S.getLocStart());
auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
// case 0:
// <SectionStmt[0]>;
// break;
// ...
// case <NumSection> - 1:
// <SectionStmt[<NumSection> - 1]>;
// break;
// }
// .omp.sections.exit:
auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
auto *SwitchStmt = CGF.Builder.CreateSwitch(
CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
CS == nullptr ? 1 : CS->size());
if (CS) {
unsigned CaseNumber = 0;
for (auto *SubStmt : CS->children()) {
auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
@ -1710,99 +1711,72 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.EmitBranch(ExitBB);
++CaseNumber;
}
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
};
CodeGenFunction::OMPPrivateScope LoopScope(CGF);
if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(
CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
} else {
auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
CGF.EmitBlock(CaseBB);
SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
CGF.EmitStmt(Stmt);
CGF.EmitBranch(ExitBB);
}
CGF.EmitOMPPrivateClause(S, LoopScope);
HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
// Emit static non-chunked loop.
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
/*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
LB.getAddress(), UB.getAddress(), ST.getAddress());
// UB = min(UB, GlobalUB);
auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
// IV = LB;
CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
// while (idx <= UB) { BODY; ++idx; }
CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
CGF.EmitOMPReductionClauseFinal(S);
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivates)
CGF.EmitOMPLastprivateClauseFinal(
S, CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart())));
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
};
bool HasCancel = false;
if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
HasCancel = OSD->hasCancel();
else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
HasCancel = OPSD->hasCancel();
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
HasCancel);
// Emit barrier for lastprivates only if 'sections' directive has 'nowait'
// clause. Otherwise the barrier will be generated by the codegen for the
// directive.
if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
CodeGenFunction::OMPPrivateScope LoopScope(CGF);
if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
OMPD_unknown);
CGF.CGM.getOpenMPRuntime().emitBarrierCall(
CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
}
return OMPD_sections;
}
// If only one section is found - no need to generate loop, emit as a single
// region.
bool HasFirstprivates;
// No need to generate reductions for sections with single section region, we
// can use original shared variables for all operations.
bool HasReductions = S.hasClausesOfKind<OMPReductionClause>();
// No need to generate lastprivates for sections with single section region,
// we can use original shared variable for all calculations with barrier at
// the end of the sections.
bool HasLastprivates = S.hasClausesOfKind<OMPLastprivateClause>();
auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
CodeGenFunction::OMPPrivateScope SingleScope(CGF);
HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
CGF.EmitOMPPrivateClause(S, SingleScope);
(void)SingleScope.Privatize();
CGF.EmitOMPPrivateClause(S, LoopScope);
HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
CGF.EmitStmt(Stmt);
// Emit static non-chunked loop.
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
/*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
UB.getAddress(), ST.getAddress());
// UB = min(UB, GlobalUB);
auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
// IV = LB;
CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
// while (idx <= UB) { BODY; ++idx; }
CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
CGF.EmitOMPReductionClauseFinal(S);
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivates)
CGF.EmitOMPLastprivateClauseFinal(
S, CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart())));
};
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
llvm::None, llvm::None, llvm::None,
llvm::None);
// Emit barrier for firstprivates, lastprivates or reductions only if
// 'sections' directive has 'nowait' clause. Otherwise the barrier will be
// generated by the codegen for the directive.
if ((HasFirstprivates || HasLastprivates || HasReductions) &&
S.getSingleClause<OMPNowaitClause>()) {
bool HasCancel = false;
if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
HasCancel = OSD->hasCancel();
else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
HasCancel = OPSD->hasCancel();
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
HasCancel);
// Emit barrier for lastprivates only if 'sections' directive has 'nowait'
// clause. Otherwise the barrier will be generated by the codegen for the
// directive.
if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown,
/*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
OMPD_unknown);
}
return OMPD_single;
return OMPD_sections;
}
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {

View File

@ -3233,6 +3233,8 @@ ToolChain::CXXStdlibType NetBSD::GetCXXStdlibType(const ArgList &Args) const {
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
case llvm::Triple::sparc:
case llvm::Triple::sparcv9:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return ToolChain::CST_Libcxx;

View File

@ -8317,6 +8317,8 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
case llvm::Triple::sparc:
case llvm::Triple::sparcv9:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
useLibgcc = false;

View File

@ -1742,13 +1742,18 @@ void Sema::BuildBasePathArray(const CXXBasePaths &Paths,
/// otherwise. Loc is the location where this routine should point to
/// if there is an error, and Range is the source range to highlight
/// if there is an error.
///
/// If either InaccessibleBaseID or AmbigiousBaseConvID are 0, then the
/// diagnostic for the respective type of error will be suppressed, but the
/// check for ill-formed code will still be performed.
bool
Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath) {
CXXCastPath *BasePath,
bool IgnoreAccess) {
// First, determine whether the path from Derived to Base is
// ambiguous. This is slightly more expensive than checking whether
// the Derived to Base conversion exists, because here we need to
@ -1761,7 +1766,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
(void)DerivationOkay;
if (!Paths.isAmbiguous(Context.getCanonicalType(Base).getUnqualifiedType())) {
if (InaccessibleBaseID) {
if (!IgnoreAccess) {
// Check that the base class can be accessed.
switch (CheckBaseClassAccess(Loc, Base, Derived, Paths.front(),
InaccessibleBaseID)) {
@ -1810,12 +1815,10 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath,
bool IgnoreAccess) {
return CheckDerivedToBaseConversion(Derived, Base,
IgnoreAccess ? 0
: diag::err_upcast_to_inaccessible_base,
diag::err_ambiguous_derived_to_base_conv,
Loc, Range, DeclarationName(),
BasePath);
return CheckDerivedToBaseConversion(
Derived, Base, diag::err_upcast_to_inaccessible_base,
diag::err_ambiguous_derived_to_base_conv, Loc, Range, DeclarationName(),
BasePath, IgnoreAccess);
}

View File

@ -3748,6 +3748,128 @@ bool Sema::CheckVecStepExpr(Expr *E) {
return CheckUnaryExprOrTypeTraitOperand(E, UETT_VecStep);
}
static void captureVariablyModifiedType(ASTContext &Context, QualType T,
CapturingScopeInfo *CSI) {
assert(T->isVariablyModifiedType());
assert(CSI != nullptr);
// We're going to walk down into the type and look for VLA expressions.
do {
const Type *Ty = T.getTypePtr();
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
T = QualType();
break;
// These types are never variably-modified.
case Type::Builtin:
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
case Type::TemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
break;
case Type::Decayed:
T = cast<DecayedType>(Ty)->getPointeeType();
break;
case Type::Pointer:
T = cast<PointerType>(Ty)->getPointeeType();
break;
case Type::BlockPointer:
T = cast<BlockPointerType>(Ty)->getPointeeType();
break;
case Type::LValueReference:
case Type::RValueReference:
T = cast<ReferenceType>(Ty)->getPointeeType();
break;
case Type::MemberPointer:
T = cast<MemberPointerType>(Ty)->getPointeeType();
break;
case Type::ConstantArray:
case Type::IncompleteArray:
// Losing element qualification here is fine.
T = cast<ArrayType>(Ty)->getElementType();
break;
case Type::VariableArray: {
// Losing element qualification here is fine.
const VariableArrayType *VAT = cast<VariableArrayType>(Ty);
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
if (auto Size = VAT->getSizeExpr()) {
if (!CSI->isVLATypeCaptured(VAT)) {
RecordDecl *CapRecord = nullptr;
if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
CapRecord = LSI->Lambda;
} else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
CapRecord = CRSI->TheRecordDecl;
}
if (CapRecord) {
auto ExprLoc = Size->getExprLoc();
auto SizeType = Context.getSizeType();
// Build the non-static data member.
auto Field =
FieldDecl::Create(Context, CapRecord, ExprLoc, ExprLoc,
/*Id*/ nullptr, SizeType, /*TInfo*/ nullptr,
/*BW*/ nullptr, /*Mutable*/ false,
/*InitStyle*/ ICIS_NoInit);
Field->setImplicit(true);
Field->setAccess(AS_private);
Field->setCapturedVLAType(VAT);
CapRecord->addDecl(Field);
CSI->addVLATypeCapture(ExprLoc, SizeType);
}
}
}
T = VAT->getElementType();
break;
}
case Type::FunctionProto:
case Type::FunctionNoProto:
T = cast<FunctionType>(Ty)->getReturnType();
break;
case Type::Paren:
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
case Type::SubstTemplateTypeParm:
case Type::PackExpansion:
// Keep walking after single level desugaring.
T = T.getSingleStepDesugaredType(Context);
break;
case Type::Typedef:
T = cast<TypedefType>(Ty)->desugar();
break;
case Type::Decltype:
T = cast<DecltypeType>(Ty)->desugar();
break;
case Type::Auto:
T = cast<AutoType>(Ty)->getDeducedType();
break;
case Type::TypeOfExpr:
T = cast<TypeOfExprType>(Ty)->getUnderlyingExpr()->getType();
break;
case Type::Atomic:
T = cast<AtomicType>(Ty)->getValueType();
break;
}
} while (!T.isNull() && T->isVariablyModifiedType());
}
/// \brief Build a sizeof or alignof expression given a type operand.
ExprResult
Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
@ -3763,6 +3885,20 @@ Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind))
return ExprError();
if (T->isVariablyModifiedType() && FunctionScopes.size() > 1) {
if (auto *TT = T->getAs<TypedefType>()) {
if (auto *CSI = dyn_cast<CapturingScopeInfo>(FunctionScopes.back())) {
DeclContext *DC = nullptr;
if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI))
DC = LSI->CallOperator;
else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI))
DC = CRSI->TheCapturedDecl;
if (DC && TT->getDecl()->getDeclContext() != DC)
captureVariablyModifiedType(Context, T, CSI);
}
}
}
// C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
return new (Context) UnaryExprOrTypeTraitExpr(
ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
@ -7354,11 +7490,14 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
LHSType->isBlockPointerType()) &&
RHS.get()->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull)) {
CastKind Kind;
CXXCastPath Path;
CheckPointerConversion(RHS.get(), LHSType, Kind, Path, false);
if (ConvertRHS)
RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path);
if (Diagnose || ConvertRHS) {
CastKind Kind;
CXXCastPath Path;
CheckPointerConversion(RHS.get(), LHSType, Kind, Path,
/*IgnoreBaseAccess=*/false, Diagnose);
if (ConvertRHS)
RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path);
}
return Compatible;
}
@ -7376,8 +7515,8 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
}
Expr *PRE = RHS.get()->IgnoreParenCasts();
if (ObjCProtocolExpr *OPE = dyn_cast<ObjCProtocolExpr>(PRE)) {
ObjCProtocolDecl *PDecl = OPE->getProtocol();
if (Diagnose && isa<ObjCProtocolExpr>(PRE)) {
ObjCProtocolDecl *PDecl = cast<ObjCProtocolExpr>(PRE)->getProtocol();
if (PDecl && !PDecl->hasDefinition()) {
Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl->getName();
Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
@ -7399,11 +7538,11 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
Expr *E = RHS.get();
if (getLangOpts().ObjCAutoRefCount)
CheckObjCARCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion,
DiagnoseCFAudited);
Diagnose, DiagnoseCFAudited);
if (getLangOpts().ObjC1 &&
(CheckObjCBridgeRelatedConversions(E->getLocStart(),
LHSType, E->getType(), E) ||
ConversionToObjCStringLiteralCheck(LHSType, E))) {
(CheckObjCBridgeRelatedConversions(E->getLocStart(), LHSType,
E->getType(), E, Diagnose) ||
ConversionToObjCStringLiteralCheck(LHSType, E, Diagnose))) {
RHS = E;
return Compatible;
}
@ -8961,8 +9100,9 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
else {
Expr *E = RHS.get();
if (getLangOpts().ObjCAutoRefCount)
CheckObjCARCConversion(SourceRange(), LHSType, E, CCK_ImplicitConversion, false,
Opc);
CheckObjCARCConversion(SourceRange(), LHSType, E,
CCK_ImplicitConversion, /*Diagnose=*/true,
/*DiagnoseCFAudited=*/false, Opc);
RHS = ImpCastExprToType(E, LHSType,
LPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
}
@ -11830,8 +11970,8 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
return new (Context) GNUNullExpr(Ty, TokenLoc);
}
bool
Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp) {
bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
bool Diagnose) {
if (!getLangOpts().ObjC1)
return false;
@ -11857,8 +11997,9 @@ Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp) {
StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
if (!SL || !SL->isAscii())
return false;
Diag(SL->getLocStart(), diag::err_missing_atsign_prefix)
<< FixItHint::CreateInsertion(SL->getLocStart(), "@");
if (Diagnose)
Diag(SL->getLocStart(), diag::err_missing_atsign_prefix)
<< FixItHint::CreateInsertion(SL->getLocStart(), "@");
Exp = BuildObjCStringLiteral(SL->getLocStart(), SL).get();
return true;
}
@ -13139,120 +13280,7 @@ bool Sema::tryCaptureVariable(
QualType QTy = Var->getType();
if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var))
QTy = PVD->getOriginalType();
do {
const Type *Ty = QTy.getTypePtr();
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
QTy = QualType();
break;
// These types are never variably-modified.
case Type::Builtin:
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
case Type::TemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
QTy = cast<AdjustedType>(Ty)->getOriginalType();
break;
case Type::Decayed:
QTy = cast<DecayedType>(Ty)->getPointeeType();
break;
case Type::Pointer:
QTy = cast<PointerType>(Ty)->getPointeeType();
break;
case Type::BlockPointer:
QTy = cast<BlockPointerType>(Ty)->getPointeeType();
break;
case Type::LValueReference:
case Type::RValueReference:
QTy = cast<ReferenceType>(Ty)->getPointeeType();
break;
case Type::MemberPointer:
QTy = cast<MemberPointerType>(Ty)->getPointeeType();
break;
case Type::ConstantArray:
case Type::IncompleteArray:
// Losing element qualification here is fine.
QTy = cast<ArrayType>(Ty)->getElementType();
break;
case Type::VariableArray: {
// Losing element qualification here is fine.
const VariableArrayType *VAT = cast<VariableArrayType>(Ty);
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
if (auto Size = VAT->getSizeExpr()) {
if (!CSI->isVLATypeCaptured(VAT)) {
RecordDecl *CapRecord = nullptr;
if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
CapRecord = LSI->Lambda;
} else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
CapRecord = CRSI->TheRecordDecl;
}
if (CapRecord) {
auto ExprLoc = Size->getExprLoc();
auto SizeType = Context.getSizeType();
// Build the non-static data member.
auto Field = FieldDecl::Create(
Context, CapRecord, ExprLoc, ExprLoc,
/*Id*/ nullptr, SizeType, /*TInfo*/ nullptr,
/*BW*/ nullptr, /*Mutable*/ false,
/*InitStyle*/ ICIS_NoInit);
Field->setImplicit(true);
Field->setAccess(AS_private);
Field->setCapturedVLAType(VAT);
CapRecord->addDecl(Field);
CSI->addVLATypeCapture(ExprLoc, SizeType);
}
}
}
QTy = VAT->getElementType();
break;
}
case Type::FunctionProto:
case Type::FunctionNoProto:
QTy = cast<FunctionType>(Ty)->getReturnType();
break;
case Type::Paren:
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
case Type::SubstTemplateTypeParm:
case Type::PackExpansion:
// Keep walking after single level desugaring.
QTy = QTy.getSingleStepDesugaredType(getASTContext());
break;
case Type::Typedef:
QTy = cast<TypedefType>(Ty)->desugar();
break;
case Type::Decltype:
QTy = cast<DecltypeType>(Ty)->desugar();
break;
case Type::Auto:
QTy = cast<AutoType>(Ty)->getDeducedType();
break;
case Type::TypeOfExpr:
QTy = cast<TypeOfExprType>(Ty)->getUnderlyingExpr()->getType();
break;
case Type::Atomic:
QTy = cast<AtomicType>(Ty)->getValueType();
break;
}
} while (!QTy.isNull() && QTy->isVariablyModifiedType());
captureVariablyModifiedType(Context, QTy, CSI);
}
if (getLangOpts().OpenMP) {

View File

@ -3816,7 +3816,7 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs) {
bool CfToNs, bool Diagnose) {
QualType T = CfToNs ? SrcType : DestType;
ObjCBridgeRelatedAttr *ObjCBAttr = ObjCBridgeRelatedAttrFromType(T, TDNDecl);
if (!ObjCBAttr)
@ -3832,20 +3832,24 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
LookupResult R(*this, DeclarationName(RCId), SourceLocation(),
Sema::LookupOrdinaryName);
if (!LookupName(R, TUScope)) {
Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId
<< SrcType << DestType;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId
<< SrcType << DestType;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
}
return false;
}
Target = R.getFoundDecl();
if (Target && isa<ObjCInterfaceDecl>(Target))
RelatedClass = cast<ObjCInterfaceDecl>(Target);
else {
Diag(Loc, diag::err_objc_bridged_related_invalid_class_name) << RCId
<< SrcType << DestType;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
if (Target)
Diag(Target->getLocStart(), diag::note_declared_at);
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_invalid_class_name) << RCId
<< SrcType << DestType;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
if (Target)
Diag(Target->getLocStart(), diag::note_declared_at);
}
return false;
}
@ -3854,9 +3858,11 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
Selector Sel = Context.Selectors.getUnarySelector(CMId);
ClassMethod = RelatedClass->lookupMethod(Sel, false);
if (!ClassMethod) {
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << Sel << false;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << Sel << false;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
}
return false;
}
}
@ -3866,9 +3872,11 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
Selector Sel = Context.Selectors.getNullarySelector(IMId);
InstanceMethod = RelatedClass->lookupMethod(Sel, true);
if (!InstanceMethod) {
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << Sel << true;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << Sel << true;
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
}
return false;
}
}
@ -3878,7 +3886,7 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
bool
Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr) {
Expr *&SrcExpr, bool Diagnose) {
ARCConversionTypeClass rhsExprACTC = classifyTypeForARCConversion(SrcType);
ARCConversionTypeClass lhsExprACTC = classifyTypeForARCConversion(DestType);
bool CfToNs = (rhsExprACTC == ACTC_coreFoundation && lhsExprACTC == ACTC_retainable);
@ -3891,27 +3899,29 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
ObjCMethodDecl *InstanceMethod = nullptr;
TypedefNameDecl *TDNDecl = nullptr;
if (!checkObjCBridgeRelatedComponents(Loc, DestType, SrcType, RelatedClass,
ClassMethod, InstanceMethod, TDNDecl, CfToNs))
ClassMethod, InstanceMethod, TDNDecl,
CfToNs, Diagnose))
return false;
if (CfToNs) {
// Implicit conversion from CF to ObjC object is needed.
if (ClassMethod) {
std::string ExpressionString = "[";
ExpressionString += RelatedClass->getNameAsString();
ExpressionString += " ";
ExpressionString += ClassMethod->getSelector().getAsString();
SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
// Provide a fixit: [RelatedClass ClassMethod SrcExpr]
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << ClassMethod->getSelector() << false
<< FixItHint::CreateInsertion(SrcExpr->getLocStart(), ExpressionString)
<< FixItHint::CreateInsertion(SrcExprEndLoc, "]");
Diag(RelatedClass->getLocStart(), diag::note_declared_at);
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
if (Diagnose) {
std::string ExpressionString = "[";
ExpressionString += RelatedClass->getNameAsString();
ExpressionString += " ";
ExpressionString += ClassMethod->getSelector().getAsString();
SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
// Provide a fixit: [RelatedClass ClassMethod SrcExpr]
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << ClassMethod->getSelector() << false
<< FixItHint::CreateInsertion(SrcExpr->getLocStart(), ExpressionString)
<< FixItHint::CreateInsertion(SrcExprEndLoc, "]");
Diag(RelatedClass->getLocStart(), diag::note_declared_at);
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
}
QualType receiverType =
Context.getObjCInterfaceType(RelatedClass);
QualType receiverType = Context.getObjCInterfaceType(RelatedClass);
// Argument.
Expr *args[] = { SrcExpr };
ExprResult msg = BuildClassMessageImplicit(receiverType, false,
@ -3925,30 +3935,34 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
else {
// Implicit conversion from ObjC type to CF object is needed.
if (InstanceMethod) {
std::string ExpressionString;
SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
if (InstanceMethod->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl = InstanceMethod->findPropertyDecl()) {
// fixit: ObjectExpr.propertyname when it is aproperty accessor.
ExpressionString = ".";
ExpressionString += PDecl->getNameAsString();
if (Diagnose) {
std::string ExpressionString;
SourceLocation SrcExprEndLoc =
getLocForEndOfToken(SrcExpr->getLocEnd());
if (InstanceMethod->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl =
InstanceMethod->findPropertyDecl()) {
// fixit: ObjectExpr.propertyname when it is aproperty accessor.
ExpressionString = ".";
ExpressionString += PDecl->getNameAsString();
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << InstanceMethod->getSelector() << true
<< FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
}
if (ExpressionString.empty()) {
// Provide a fixit: [ObjectExpr InstanceMethod]
ExpressionString = " ";
ExpressionString += InstanceMethod->getSelector().getAsString();
ExpressionString += "]";
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << InstanceMethod->getSelector() << true
<< FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
<< SrcType << DestType << InstanceMethod->getSelector() << true
<< FixItHint::CreateInsertion(SrcExpr->getLocStart(), "[")
<< FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
}
if (ExpressionString.empty()) {
// Provide a fixit: [ObjectExpr InstanceMethod]
ExpressionString = " ";
ExpressionString += InstanceMethod->getSelector().getAsString();
ExpressionString += "]";
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << InstanceMethod->getSelector() << true
<< FixItHint::CreateInsertion(SrcExpr->getLocStart(), "[")
<< FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
Diag(RelatedClass->getLocStart(), diag::note_declared_at);
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
}
Diag(RelatedClass->getLocStart(), diag::note_declared_at);
Diag(TDNDecl->getLocStart(), diag::note_declared_at);
ExprResult msg =
BuildInstanceMessageImplicit(SrcExpr, SrcType,
@ -3965,6 +3979,7 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
Sema::ARCConversionResult
Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
Expr *&castExpr, CheckedConversionKind CCK,
bool Diagnose,
bool DiagnoseCFAudited,
BinaryOperatorKind Opc) {
QualType castExprType = castExpr->getType();
@ -3980,9 +3995,9 @@ Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
if (exprACTC == castACTC) {
// check for viablity and report error if casting an rvalue to a
// life-time qualifier.
if ((castACTC == ACTC_retainable) &&
if (Diagnose && castACTC == ACTC_retainable &&
(CCK == CCK_CStyleCast || CCK == CCK_OtherCast) &&
(castType != castExprType)) {
castType != castExprType) {
const Type *DT = castType.getTypePtr();
QualType QDT = castType;
// We desugar some types but not others. We ignore those
@ -4051,19 +4066,20 @@ Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
// to 'NSString *'. Let caller issue a normal mismatched diagnostic with
// suitable fix-it.
if (castACTC == ACTC_retainable && exprACTC == ACTC_none &&
ConversionToObjCStringLiteralCheck(castType, castExpr))
ConversionToObjCStringLiteralCheck(castType, castExpr, Diagnose))
return ACR_okay;
// Do not issue "bridge cast" diagnostic when implicit casting
// a retainable object to a CF type parameter belonging to an audited
// CF API function. Let caller issue a normal type mismatched diagnostic
// instead.
if (!DiagnoseCFAudited || exprACTC != ACTC_retainable ||
castACTC != ACTC_coreFoundation)
if (Diagnose &&
(!DiagnoseCFAudited || exprACTC != ACTC_retainable ||
castACTC != ACTC_coreFoundation))
if (!(exprACTC == ACTC_voidPtr && castACTC == ACTC_retainable &&
(Opc == BO_NE || Opc == BO_EQ)))
diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
castExpr, castExpr, exprACTC, CCK);
diagnoseObjCARCConversion(*this, castRange, castType, castACTC, castExpr,
castExpr, exprACTC, CCK);
return ACR_okay;
}

View File

@ -2687,15 +2687,16 @@ bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess) {
bool IgnoreBaseAccess,
bool Diagnose) {
QualType FromType = From->getType();
bool IsCStyleOrFunctionalCast = IgnoreBaseAccess;
Kind = CK_BitCast;
if (!IsCStyleOrFunctionalCast && !FromType->isAnyPointerType() &&
if (Diagnose && !IsCStyleOrFunctionalCast && !FromType->isAnyPointerType() &&
From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) ==
Expr::NPCK_ZeroExpression) {
Expr::NPCK_ZeroExpression) {
if (Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy))
DiagRuntimeBehavior(From->getExprLoc(), From,
PDiag(diag::warn_impcast_bool_to_null_pointer)
@ -2713,18 +2714,24 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType)) {
// We must have a derived-to-base conversion. Check an
// ambiguous or inaccessible conversion.
if (CheckDerivedToBaseConversion(FromPointeeType, ToPointeeType,
From->getExprLoc(),
From->getSourceRange(), &BasePath,
IgnoreBaseAccess))
unsigned InaccessibleID = 0;
unsigned AmbigiousID = 0;
if (Diagnose) {
InaccessibleID = diag::err_upcast_to_inaccessible_base;
AmbigiousID = diag::err_ambiguous_derived_to_base_conv;
}
if (CheckDerivedToBaseConversion(
FromPointeeType, ToPointeeType, InaccessibleID, AmbigiousID,
From->getExprLoc(), From->getSourceRange(), DeclarationName(),
&BasePath, IgnoreBaseAccess))
return true;
// The conversion was successful.
Kind = CK_DerivedToBase;
}
if (!IsCStyleOrFunctionalCast && FromPointeeType->isFunctionType() &&
ToPointeeType->isVoidType()) {
if (Diagnose && !IsCStyleOrFunctionalCast &&
FromPointeeType->isFunctionType() && ToPointeeType->isVoidType()) {
assert(getLangOpts().MSVCCompat &&
"this should only be possible with MSVCCompat!");
Diag(From->getExprLoc(), diag::ext_ms_impcast_fn_obj)

View File

@ -60,6 +60,9 @@ class LLDB_API SBInstruction
bool
DoesBranch ();
bool
HasDelaySlot ();
void
Print (FILE *out);

View File

@ -1216,6 +1216,25 @@ namespace lldb_private {
}
return UINT32_MAX;
}
uint32_t
FindEntryIndexesThatContain(B addr, std::vector<uint32_t> &indexes) const
{
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert (IsSorted());
#endif
if (!m_entries.empty())
{
typename Collection::const_iterator pos;
for (const auto &entry : m_entries)
{
if (entry.Contains(addr))
indexes.push_back(entry.data);
}
}
return indexes.size() ;
}
Entry *
FindEntryThatContains (B addr)

View File

@ -81,6 +81,7 @@ class Symtab
Symbol * FindFirstSymbolWithNameAndType (const ConstString &name, lldb::SymbolType symbol_type, Debug symbol_debug_type, Visibility symbol_visibility);
Symbol * FindSymbolContainingFileAddress (lldb::addr_t file_addr, const uint32_t* indexes, uint32_t num_indexes);
Symbol * FindSymbolContainingFileAddress (lldb::addr_t file_addr);
void ForEachSymbolContainingFileAddress(lldb::addr_t file_addr, std::function<bool(Symbol *)> const &callback);
size_t FindFunctionSymbols (const ConstString &name, uint32_t name_type_mask, SymbolContextList& sc_list);
void CalculateSymbolSizes ();

View File

@ -160,6 +160,14 @@ SBInstruction::DoesBranch ()
return false;
}
bool
SBInstruction::HasDelaySlot ()
{
if (m_opaque_sp)
return m_opaque_sp->HasDelaySlot ();
return false;
}
void
SBInstruction::SetOpaque (const lldb::InstructionSP &inst_sp)
{

View File

@ -559,7 +559,18 @@ Module::ResolveSymbolContextForAddress (const Address& so_addr, uint32_t resolve
Symtab *symtab = sym_vendor->GetSymtab();
if (symtab && so_addr.IsSectionOffset())
{
sc.symbol = symtab->FindSymbolContainingFileAddress(so_addr.GetFileAddress());
Symbol *matching_symbol = nullptr;
symtab->ForEachSymbolContainingFileAddress(so_addr.GetFileAddress(),
[&matching_symbol](Symbol *symbol) -> bool {
if (symbol->GetType() != eSymbolTypeInvalid)
{
matching_symbol = symbol;
return false; // Stop iterating
}
return true; // Keep iterating
});
sc.symbol = matching_symbol;
if (!sc.symbol &&
resolve_scope & eSymbolContextFunction && !(resolved_flags & eSymbolContextFunction))
{

View File

@ -35,12 +35,12 @@ using namespace lldb_private;
static char ID;
#define VALID_POINTER_CHECK_NAME "$__lldb_valid_pointer_check"
#define VALID_POINTER_CHECK_NAME "_$__lldb_valid_pointer_check"
#define VALID_OBJC_OBJECT_CHECK_NAME "$__lldb_objc_object_check"
static const char g_valid_pointer_check_text[] =
"extern \"C\" void\n"
"$__lldb_valid_pointer_check (unsigned char *$__lldb_arg_ptr)\n"
"_$__lldb_valid_pointer_check (unsigned char *$__lldb_arg_ptr)\n"
"{\n"
" unsigned char $__lldb_local_val = *$__lldb_arg_ptr;\n"
"}";

View File

@ -242,16 +242,27 @@ ABISysV_mips::PrepareTrivialCall (Thread &thread,
const RegisterInfo *sp_reg_info = reg_ctx->GetRegisterInfo (eRegisterKindGeneric, LLDB_REGNUM_GENERIC_SP);
const RegisterInfo *ra_reg_info = reg_ctx->GetRegisterInfo (eRegisterKindGeneric, LLDB_REGNUM_GENERIC_RA);
const RegisterInfo *r25_info = reg_ctx->GetRegisterInfoByName("r25", 0);
const RegisterInfo *r0_info = reg_ctx->GetRegisterInfoByName("zero", 0);
if (log)
log->Printf("Writing SP: 0x%" PRIx64, (uint64_t)sp);
log->Printf("Writing R0: 0x%" PRIx64, (uint64_t)0);
/* Write r0 with 0, in case we are stopped in syscall,
* such setting prevents automatic decrement of the PC.
* This clears the bug 23659 for MIPS.
*/
if (!reg_ctx->WriteRegisterFromUnsigned (r0_info, (uint64_t)0))
return false;
if (log)
log->Printf("Writing SP: 0x%" PRIx64, (uint64_t)sp);
// Set "sp" to the requested value
if (!reg_ctx->WriteRegisterFromUnsigned (sp_reg_info, sp))
return false;
if (log)
log->Printf("Writing RA: 0x%" PRIx64, (uint64_t)return_addr);
log->Printf("Writing RA: 0x%" PRIx64, (uint64_t)return_addr);
// Set "ra" to the return address
if (!reg_ctx->WriteRegisterFromUnsigned (ra_reg_info, return_addr))

View File

@ -207,16 +207,27 @@ ABISysV_mips64::PrepareTrivialCall (Thread &thread,
const RegisterInfo *sp_reg_info = reg_ctx->GetRegisterInfo (eRegisterKindGeneric, LLDB_REGNUM_GENERIC_SP);
const RegisterInfo *ra_reg_info = reg_ctx->GetRegisterInfo (eRegisterKindGeneric, LLDB_REGNUM_GENERIC_RA);
const RegisterInfo *r25_info = reg_ctx->GetRegisterInfoByName("r25", 0);
const RegisterInfo *r0_info = reg_ctx->GetRegisterInfoByName("zero", 0);
if (log)
log->Printf("Writing SP: 0x%" PRIx64, (uint64_t)sp);
log->Printf("Writing R0: 0x%" PRIx64, (uint64_t)0);
/* Write r0 with 0, in case we are stopped in syscall,
* such setting prevents automatic decrement of the PC.
* This clears the bug 23659 for MIPS.
*/
if (!reg_ctx->WriteRegisterFromUnsigned (r0_info, (uint64_t)0))
return false;
if (log)
log->Printf("Writing SP: 0x%" PRIx64, (uint64_t)sp);
// Set "sp" to the requested value
if (!reg_ctx->WriteRegisterFromUnsigned (sp_reg_info, sp))
return false;
if (log)
log->Printf("Writing RA: 0x%" PRIx64, (uint64_t)return_addr);
log->Printf("Writing RA: 0x%" PRIx64, (uint64_t)return_addr);
// Set "ra" to the return address
if (!reg_ctx->WriteRegisterFromUnsigned (ra_reg_info, return_addr))

View File

@ -13,6 +13,7 @@
#include "lldb/Core/Module.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Core/StreamFile.h"
#include "lldb/Core/UniqueCStringMap.h"
#include "lldb/Core/ValueObject.h"
#include "lldb/DataFormatters/StringPrinter.h"
@ -1268,13 +1269,115 @@ GoASTContext::ConvertStringToFloatValue(lldb::opaque_compiler_type_t type, const
//----------------------------------------------------------------------
// Dumping types
//----------------------------------------------------------------------
#define DEPTH_INCREMENT 2
void
GoASTContext::DumpValue(lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, Stream *s, lldb::Format format,
const DataExtractor &data, lldb::offset_t data_offset, size_t data_byte_size,
const DataExtractor &data, lldb::offset_t data_byte_offset, size_t data_byte_size,
uint32_t bitfield_bit_size, uint32_t bitfield_bit_offset, bool show_types, bool show_summary,
bool verbose, uint32_t depth)
{
assert(false);
if (IsTypedefType(type))
type = GetTypedefedType(type).GetOpaqueQualType();
if (!type)
return;
GoType *t = static_cast<GoType *>(type);
if (GoStruct *st = t->GetStruct())
{
if (GetCompleteType(type))
{
uint32_t field_idx = 0;
for (auto* field = st->GetField(field_idx); field != nullptr; field_idx++)
{
// Print the starting squiggly bracket (if this is the
// first member) or comma (for member 2 and beyond) for
// the struct/union/class member.
if (field_idx == 0)
s->PutChar('{');
else
s->PutChar(',');
// Indent
s->Printf("\n%*s", depth + DEPTH_INCREMENT, "");
// Print the member type if requested
if (show_types)
{
ConstString field_type_name = field->m_type.GetTypeName();
s->Printf("(%s) ", field_type_name.AsCString());
}
// Print the member name and equal sign
s->Printf("%s = ", field->m_name.AsCString());
// Dump the value of the member
CompilerType field_type = field->m_type;
field_type.DumpValue (exe_ctx,
s, // Stream to dump to
field_type.GetFormat(), // The format with which to display the member
data, // Data buffer containing all bytes for this type
data_byte_offset + field->m_byte_offset,// Offset into "data" where to grab value from
field->m_type.GetByteSize(exe_ctx->GetBestExecutionContextScope()), // Size of this type in bytes
0, // Bitfield bit size
0, // Bitfield bit offset
show_types, // Boolean indicating if we should show the variable types
show_summary, // Boolean indicating if we should show a summary for the current type
verbose, // Verbose output?
depth + DEPTH_INCREMENT); // Scope depth for any types that have children
}
// Indent the trailing squiggly bracket
if (field_idx > 0)
s->Printf("\n%*s}", depth, "");
}
}
if (GoArray *a = t->GetArray()) {
CompilerType element_clang_type = a->GetElementType();
lldb::Format element_format = element_clang_type.GetFormat();
uint32_t element_byte_size = element_clang_type.GetByteSize(exe_ctx->GetBestExecutionContextScope());
uint64_t element_idx;
for (element_idx = 0; element_idx < a->GetLength(); ++element_idx)
{
// Print the starting squiggly bracket (if this is the
// first member) or comman (for member 2 and beyong) for
// the struct/union/class member.
if (element_idx == 0)
s->PutChar('{');
else
s->PutChar(',');
// Indent and print the index
s->Printf("\n%*s[%" PRIu64 "] ", depth + DEPTH_INCREMENT, "", element_idx);
// Figure out the field offset within the current struct/union/class type
uint64_t element_offset = element_idx * element_byte_size;
// Dump the value of the member
element_clang_type.DumpValue (exe_ctx,
s, // Stream to dump to
element_format, // The format with which to display the element
data, // Data buffer containing all bytes for this type
data_byte_offset + element_offset,// Offset into "data" where to grab value from
element_byte_size, // Size of this type in bytes
0, // Bitfield bit size
0, // Bitfield bit offset
show_types, // Boolean indicating if we should show the variable types
show_summary, // Boolean indicating if we should show a summary for the current type
verbose, // Verbose output?
depth + DEPTH_INCREMENT); // Scope depth for any types that have children
}
// Indent the trailing squiggly bracket
if (element_idx > 0)
s->Printf("\n%*s}", depth, "");
}
if (show_summary)
DumpSummary (type, exe_ctx, s, data, data_byte_offset, data_byte_size);
}
bool
@ -1371,19 +1474,55 @@ void
GoASTContext::DumpSummary(lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, Stream *s, const DataExtractor &data,
lldb::offset_t data_offset, size_t data_byte_size)
{
assert(false);
if (type && GoType::KIND_STRING == static_cast<GoType *>(type)->GetGoKind())
{
// TODO(ribrdb): read length and data
}
}
void
GoASTContext::DumpTypeDescription(lldb::opaque_compiler_type_t type)
{
assert(false);
} // Dump to stdout
// Dump to stdout
StreamFile s (stdout, false);
DumpTypeDescription (type, &s);
}
void
GoASTContext::DumpTypeDescription(lldb::opaque_compiler_type_t type, Stream *s)
{
assert(false);
if (!type)
return;
ConstString name = GetTypeName(type);
GoType *t = static_cast<GoType *>(type);
if (GoStruct *st = t->GetStruct())
{
if (GetCompleteType(type))
{
if (NULL == strchr(name.AsCString(), '{'))
s->Printf("type %s ", name.AsCString());
s->PutCString("struct {");
if (st->GetNumFields() == 0) {
s->PutChar('}');
return;
}
s->IndentMore();
uint32_t field_idx = 0;
for (auto* field = st->GetField(field_idx); field != nullptr; field_idx++)
{
s->PutChar('\n');
s->Indent();
s->Printf("%s %s", field->m_name.AsCString(), field->m_type.GetTypeName().AsCString());
}
s->IndentLess();
s->PutChar('\n');
s->Indent("}");
return;
}
}
s->PutCString(name.AsCString());
}
CompilerType

View File

@ -1070,6 +1070,26 @@ Symtab::FindSymbolContainingFileAddress (addr_t file_addr)
return nullptr;
}
void
Symtab::ForEachSymbolContainingFileAddress(addr_t file_addr, std::function<bool(Symbol *)> const &callback)
{
Mutex::Locker locker (m_mutex);
if (!m_file_addr_to_index_computed)
InitAddressIndexes();
std::vector<uint32_t> all_addr_indexes;
// Get all symbols with file_addr
const size_t addr_match_count = m_file_addr_to_index.FindEntryIndexesThatContain(file_addr, all_addr_indexes);
for (size_t i = 0; i < addr_match_count; ++i)
{
if (!callback(SymbolAtIndex(all_addr_indexes[i])))
break;
}
}
void
Symtab::SymbolIndicesToSymbolContextList (std::vector<uint32_t> &symbol_indexes, SymbolContextList &sc_list)
{

View File

@ -2442,18 +2442,18 @@ Target::GetBreakableLoadAddress (lldb::addr_t addr)
SymbolContext sc;
uint32_t resolve_scope = eSymbolContextFunction | eSymbolContextSymbol;
temp_addr_module_sp->ResolveSymbolContextForAddress(resolved_addr, resolve_scope, sc);
Address sym_addr;
if (sc.function)
{
function_start = sc.function->GetAddressRange().GetBaseAddress().GetLoadAddress(this);
if (function_start == LLDB_INVALID_ADDRESS)
function_start = sc.function->GetAddressRange().GetBaseAddress().GetFileAddress();
}
sym_addr = sc.function->GetAddressRange().GetBaseAddress();
else if (sc.symbol)
{
Address sym_addr = sc.symbol->GetAddress();
sym_addr = sc.symbol->GetAddress();
function_start = sym_addr.GetLoadAddress(this);
if (function_start == LLDB_INVALID_ADDRESS)
function_start = sym_addr.GetFileAddress();
}
current_offset = addr - function_start;
if (function_start)
current_offset = addr - function_start;
}
// If breakpoint address is start of function then we dont have to do anything.

View File

@ -7,4 +7,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "258968"
#define SVN_REVISION "260756"