Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp
release/11.x llvmorg-11.0.0-rc5-0-g60a25202a7d. MFC after: 4 weeks X-MFC-With: r364284
This commit is contained in:
commit
8833aad7be
@ -5,11 +5,6 @@ lld 11.0.0 Release Notes
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
.. warning::
|
||||
These are in-progress notes for the upcoming LLVM 11.0.0 release.
|
||||
Release notes for previous releases can be found on
|
||||
`the Download Page <https://releases.llvm.org/download.html>`_.
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
@ -176,12 +171,3 @@ MinGW Improvements
|
||||
``--disable-runtime-pseudo-reloc``), the ``--no-seh`` flag and options
|
||||
for selecting file and section alignment (``--file-alignment`` and
|
||||
``--section-alignment``).
|
||||
|
||||
MachO Improvements
|
||||
------------------
|
||||
|
||||
* Item 1.
|
||||
|
||||
WebAssembly Improvements
|
||||
------------------------
|
||||
|
||||
|
@ -3636,7 +3636,7 @@ void LLVMAddDestination(LLVMValueRef IndirectBr, LLVMBasicBlockRef Dest);
|
||||
/* Get the number of clauses on the landingpad instruction */
|
||||
unsigned LLVMGetNumClauses(LLVMValueRef LandingPad);
|
||||
|
||||
/* Get the value of the clause at idnex Idx on the landingpad instruction */
|
||||
/* Get the value of the clause at index Idx on the landingpad instruction */
|
||||
LLVMValueRef LLVMGetClause(LLVMValueRef LandingPad, unsigned Idx);
|
||||
|
||||
/* Add a catch or filter clause to the landingpad instruction */
|
||||
@ -3937,6 +3937,26 @@ LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
|
||||
LLVMAtomicOrdering FailureOrdering,
|
||||
LLVMBool SingleThread);
|
||||
|
||||
/**
|
||||
* Get the number of elements in the mask of a ShuffleVector instruction.
|
||||
*/
|
||||
unsigned LLVMGetNumMaskElements(LLVMValueRef ShuffleVectorInst);
|
||||
|
||||
/**
|
||||
* \returns a constant that specifies that the result of a \c ShuffleVectorInst
|
||||
* is undefined.
|
||||
*/
|
||||
int LLVMGetUndefMaskElem(void);
|
||||
|
||||
/**
|
||||
* Get the mask value at position Elt in the mask of a ShuffleVector
|
||||
* instruction.
|
||||
*
|
||||
* \Returns the result of \c LLVMGetUndefMaskElem() if the mask value is undef
|
||||
* at that position.
|
||||
*/
|
||||
int LLVMGetMaskValue(LLVMValueRef ShuffleVectorInst, unsigned Elt);
|
||||
|
||||
LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst);
|
||||
void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool SingleThread);
|
||||
|
||||
|
@ -378,6 +378,9 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase {
|
||||
iterator find(ConstPtrType Ptr) const {
|
||||
return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
|
||||
}
|
||||
bool contains(ConstPtrType Ptr) const {
|
||||
return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
|
||||
}
|
||||
|
||||
template <typename IterT>
|
||||
void insert(IterT I, IterT E) {
|
||||
|
@ -2779,7 +2779,7 @@ static void emitGlobalConstantImpl(const DataLayout &DL, const Constant *CV,
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
|
||||
const uint64_t StoreSize = DL.getTypeStoreSize(CV->getType());
|
||||
|
||||
if (StoreSize < 8) {
|
||||
if (StoreSize <= 8) {
|
||||
if (AP.isVerbose())
|
||||
AP.OutStreamer->GetCommentOS() << format("0x%" PRIx64 "\n",
|
||||
CI->getZExtValue());
|
||||
|
@ -375,13 +375,15 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
|
||||
<< "Load/store a split arg to/from the stack not implemented yet");
|
||||
return false;
|
||||
}
|
||||
MVT VT = MVT::getVT(Args[i].Ty);
|
||||
unsigned Size = VT == MVT::iPTR ? DL.getPointerSize()
|
||||
: alignTo(VT.getSizeInBits(), 8) / 8;
|
||||
|
||||
EVT LocVT = VA.getValVT();
|
||||
unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize()
|
||||
: LocVT.getStoreSize();
|
||||
|
||||
unsigned Offset = VA.getLocMemOffset();
|
||||
MachinePointerInfo MPO;
|
||||
Register StackAddr = Handler.getStackAddress(Size, Offset, MPO);
|
||||
Handler.assignValueToAddress(Args[i], StackAddr, Size, MPO, VA);
|
||||
Register StackAddr = Handler.getStackAddress(MemSize, Offset, MPO);
|
||||
Handler.assignValueToAddress(Args[i], StackAddr, MemSize, MPO, VA);
|
||||
} else {
|
||||
// FIXME: Support byvals and other weirdness
|
||||
return false;
|
||||
|
@ -2368,11 +2368,12 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
MI.RemoveOperand(1);
|
||||
Observer.changedInstr(MI);
|
||||
|
||||
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
|
||||
|
||||
auto HiPart = MIRBuilder.buildInstr(Opcode, {Ty}, {LHS, RHS});
|
||||
auto Zero = MIRBuilder.buildConstant(Ty, 0);
|
||||
|
||||
// Move insert point forward so we can use the Res register if needed.
|
||||
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
|
||||
|
||||
// For *signed* multiply, overflow is detected by checking:
|
||||
// (hi != (lo >> bitwidth-1))
|
||||
if (Opcode == TargetOpcode::G_SMULH) {
|
||||
|
@ -27,31 +27,35 @@ llvm::findPHICopyInsertPoint(MachineBasicBlock* MBB, MachineBasicBlock* SuccMBB,
|
||||
// Usually, we just want to insert the copy before the first terminator
|
||||
// instruction. However, for the edge going to a landing pad, we must insert
|
||||
// the copy before the call/invoke instruction. Similarly for an INLINEASM_BR
|
||||
// going to an indirect target.
|
||||
if (!SuccMBB->isEHPad() && !SuccMBB->isInlineAsmBrIndirectTarget())
|
||||
// going to an indirect target. This is similar to SplitKit.cpp's
|
||||
// computeLastInsertPoint, and similarly assumes that there cannot be multiple
|
||||
// instructions that are Calls with EHPad successors or INLINEASM_BR in a
|
||||
// block.
|
||||
bool EHPadSuccessor = SuccMBB->isEHPad();
|
||||
if (!EHPadSuccessor && !SuccMBB->isInlineAsmBrIndirectTarget())
|
||||
return MBB->getFirstTerminator();
|
||||
|
||||
// Discover any defs/uses in this basic block.
|
||||
SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
|
||||
// Discover any defs in this basic block.
|
||||
SmallPtrSet<MachineInstr *, 8> DefsInMBB;
|
||||
MachineRegisterInfo& MRI = MBB->getParent()->getRegInfo();
|
||||
for (MachineInstr &RI : MRI.reg_instructions(SrcReg)) {
|
||||
for (MachineInstr &RI : MRI.def_instructions(SrcReg))
|
||||
if (RI.getParent() == MBB)
|
||||
DefUsesInMBB.insert(&RI);
|
||||
}
|
||||
DefsInMBB.insert(&RI);
|
||||
|
||||
MachineBasicBlock::iterator InsertPoint;
|
||||
if (DefUsesInMBB.empty()) {
|
||||
// No defs. Insert the copy at the start of the basic block.
|
||||
InsertPoint = MBB->begin();
|
||||
} else if (DefUsesInMBB.size() == 1) {
|
||||
// Insert the copy immediately after the def/use.
|
||||
InsertPoint = *DefUsesInMBB.begin();
|
||||
++InsertPoint;
|
||||
} else {
|
||||
// Insert the copy immediately after the last def/use.
|
||||
InsertPoint = MBB->end();
|
||||
while (!DefUsesInMBB.count(&*--InsertPoint)) {}
|
||||
++InsertPoint;
|
||||
MachineBasicBlock::iterator InsertPoint = MBB->begin();
|
||||
// Insert the copy at the _latest_ point of:
|
||||
// 1. Immediately AFTER the last def
|
||||
// 2. Immediately BEFORE a call/inlineasm_br.
|
||||
for (auto I = MBB->rbegin(), E = MBB->rend(); I != E; ++I) {
|
||||
if (DefsInMBB.contains(&*I)) {
|
||||
InsertPoint = std::next(I.getReverse());
|
||||
break;
|
||||
}
|
||||
if ((EHPadSuccessor && I->isCall()) ||
|
||||
I->getOpcode() == TargetOpcode::INLINEASM_BR) {
|
||||
InsertPoint = I.getReverse();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the copy goes after any phi nodes but before
|
||||
|
@ -169,32 +169,6 @@ static cl::opt<unsigned> SwitchPeelThreshold(
|
||||
// store [4096 x i8] %data, [4096 x i8]* %buffer
|
||||
static const unsigned MaxParallelChains = 64;
|
||||
|
||||
// Return the calling convention if the Value passed requires ABI mangling as it
|
||||
// is a parameter to a function or a return value from a function which is not
|
||||
// an intrinsic.
|
||||
static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
|
||||
if (auto *R = dyn_cast<ReturnInst>(V))
|
||||
return R->getParent()->getParent()->getCallingConv();
|
||||
|
||||
if (auto *CI = dyn_cast<CallInst>(V)) {
|
||||
const bool IsInlineAsm = CI->isInlineAsm();
|
||||
const bool IsIndirectFunctionCall =
|
||||
!IsInlineAsm && !CI->getCalledFunction();
|
||||
|
||||
// It is possible that the call instruction is an inline asm statement or an
|
||||
// indirect function call in which case the return value of
|
||||
// getCalledFunction() would be nullptr.
|
||||
const bool IsInstrinsicCall =
|
||||
!IsInlineAsm && !IsIndirectFunctionCall &&
|
||||
CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
|
||||
|
||||
if (!IsInlineAsm && !IsInstrinsicCall)
|
||||
return CI->getCallingConv();
|
||||
}
|
||||
|
||||
return None;
|
||||
}
|
||||
|
||||
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
|
||||
const SDValue *Parts, unsigned NumParts,
|
||||
MVT PartVT, EVT ValueVT, const Value *V,
|
||||
@ -1624,7 +1598,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
|
||||
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
|
||||
|
||||
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
|
||||
Inst->getType(), getABIRegCopyCC(V));
|
||||
Inst->getType(), None);
|
||||
SDValue Chain = DAG.getEntryNode();
|
||||
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
|
||||
}
|
||||
@ -5555,7 +5529,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
|
||||
if (VMI != FuncInfo.ValueMap.end()) {
|
||||
const auto &TLI = DAG.getTargetLoweringInfo();
|
||||
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
|
||||
V->getType(), getABIRegCopyCC(V));
|
||||
V->getType(), None);
|
||||
if (RFV.occupiesMultipleRegs()) {
|
||||
splitMultiRegDbgValue(RFV.getRegsAndSizes());
|
||||
return true;
|
||||
|
@ -5751,8 +5751,10 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
|
||||
// If we already have the use of the negated floating constant, it is free
|
||||
// to negate it even it has multiple uses.
|
||||
if (!Op.hasOneUse() && CFP.use_empty())
|
||||
if (!Op.hasOneUse() && CFP.use_empty()) {
|
||||
RemoveDeadNode(CFP);
|
||||
break;
|
||||
}
|
||||
Cost = NegatibleCost::Neutral;
|
||||
return CFP;
|
||||
}
|
||||
@ -5810,6 +5812,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
if (NegX && (CostX <= CostY)) {
|
||||
Cost = CostX;
|
||||
SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags);
|
||||
if (NegY != N)
|
||||
RemoveDeadNode(NegY);
|
||||
return N;
|
||||
}
|
||||
@ -5818,6 +5821,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
if (NegY) {
|
||||
Cost = CostY;
|
||||
SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags);
|
||||
if (NegX != N)
|
||||
RemoveDeadNode(NegX);
|
||||
return N;
|
||||
}
|
||||
@ -5857,6 +5861,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
if (NegX && (CostX <= CostY)) {
|
||||
Cost = CostX;
|
||||
SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags);
|
||||
if (NegY != N)
|
||||
RemoveDeadNode(NegY);
|
||||
return N;
|
||||
}
|
||||
@ -5870,6 +5875,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
if (NegY) {
|
||||
Cost = CostY;
|
||||
SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags);
|
||||
if (NegX != N)
|
||||
RemoveDeadNode(NegX);
|
||||
return N;
|
||||
}
|
||||
@ -5901,6 +5907,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
if (NegX && (CostX <= CostY)) {
|
||||
Cost = std::min(CostX, CostZ);
|
||||
SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags);
|
||||
if (NegY != N)
|
||||
RemoveDeadNode(NegY);
|
||||
return N;
|
||||
}
|
||||
@ -5909,6 +5916,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
|
||||
if (NegY) {
|
||||
Cost = std::min(CostY, CostZ);
|
||||
SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags);
|
||||
if (NegX != N)
|
||||
RemoveDeadNode(NegX);
|
||||
return N;
|
||||
}
|
||||
|
@ -3952,6 +3952,19 @@ LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
|
||||
singleThread ? SyncScope::SingleThread : SyncScope::System));
|
||||
}
|
||||
|
||||
unsigned LLVMGetNumMaskElements(LLVMValueRef SVInst) {
|
||||
Value *P = unwrap<Value>(SVInst);
|
||||
ShuffleVectorInst *I = cast<ShuffleVectorInst>(P);
|
||||
return I->getShuffleMask().size();
|
||||
}
|
||||
|
||||
int LLVMGetMaskValue(LLVMValueRef SVInst, unsigned Elt) {
|
||||
Value *P = unwrap<Value>(SVInst);
|
||||
ShuffleVectorInst *I = cast<ShuffleVectorInst>(P);
|
||||
return I->getMaskValue(Elt);
|
||||
}
|
||||
|
||||
int LLVMGetUndefMaskElem(void) { return UndefMaskElem; }
|
||||
|
||||
LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) {
|
||||
Value *P = unwrap<Value>(AtomicInst);
|
||||
|
@ -104,7 +104,8 @@ bool GlobalValue::isInterposable() const {
|
||||
|
||||
bool GlobalValue::canBenefitFromLocalAlias() const {
|
||||
// See AsmPrinter::getSymbolPreferLocal().
|
||||
return GlobalObject::isExternalLinkage(getLinkage()) && !isDeclaration() &&
|
||||
return hasDefaultVisibility() &&
|
||||
GlobalObject::isExternalLinkage(getLinkage()) && !isDeclaration() &&
|
||||
!isa<GlobalIFunc>(this) && !hasComdat();
|
||||
}
|
||||
|
||||
|
@ -2242,6 +2242,21 @@ IEEEFloat::opStatus IEEEFloat::convert(const fltSemantics &toSemantics,
|
||||
if (!X86SpecialNan && semantics == &semX87DoubleExtended)
|
||||
APInt::tcSetBit(significandParts(), semantics->precision - 1);
|
||||
|
||||
// If we are truncating NaN, it is possible that we shifted out all of the
|
||||
// set bits in a signalling NaN payload. But NaN must remain NaN, so some
|
||||
// bit in the significand must be set (otherwise it is Inf).
|
||||
// This can only happen with sNaN. Set the 1st bit after the quiet bit,
|
||||
// so that we still have an sNaN.
|
||||
// FIXME: Set quiet and return opInvalidOp (on convert of any sNaN).
|
||||
// But this requires fixing LLVM to parse 32-bit hex FP or ignoring
|
||||
// conversions while parsing IR.
|
||||
if (APInt::tcIsZero(significandParts(), newPartCount)) {
|
||||
assert(shift < 0 && "Should not lose NaN payload on extend");
|
||||
assert(semantics->precision >= 3 && "Unexpectedly narrow significand");
|
||||
assert(*losesInfo && "Missing payload should have set lost info");
|
||||
APInt::tcSetBit(significandParts(), semantics->precision - 3);
|
||||
}
|
||||
|
||||
// gcc forces the Quiet bit on, which means (float)(double)(float_sNan)
|
||||
// does not give you back the same bits. This is dubious, and we
|
||||
// don't currently do it. You're really supposed to get
|
||||
|
@ -84,11 +84,16 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t MemSize,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
|
||||
// The reported memory location may be wider than the value.
|
||||
const LLT RegTy = MRI.getType(ValVReg);
|
||||
MemSize = std::min(static_cast<uint64_t>(RegTy.getSizeInBytes()), MemSize);
|
||||
|
||||
auto MMO = MF.getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
|
||||
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemSize,
|
||||
inferAlignFromPtrInfo(MF, MPO));
|
||||
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
||||
}
|
||||
|
@ -129,13 +129,17 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t MemSize,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
|
||||
// The reported memory location may be wider than the value.
|
||||
const LLT RegTy = MRI.getType(ValVReg);
|
||||
MemSize = std::min(static_cast<uint64_t>(RegTy.getSizeInBytes()), MemSize);
|
||||
|
||||
// FIXME: Get alignment
|
||||
auto MMO = MF.getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
|
||||
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemSize,
|
||||
inferAlignFromPtrInfo(MF, MPO));
|
||||
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
||||
}
|
||||
|
@ -1,14 +1,14 @@
|
||||
// $FreeBSD$
|
||||
|
||||
#define LLVM_REVISION "llvmorg-11.0.0-rc2-91-g6e042866c30"
|
||||
#define LLVM_REVISION "llvmorg-11.0.0-rc5-0-g60a25202a7d"
|
||||
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
||||
#define CLANG_REVISION "llvmorg-11.0.0-rc2-91-g6e042866c30"
|
||||
#define CLANG_REVISION "llvmorg-11.0.0-rc5-0-g60a25202a7d"
|
||||
#define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
||||
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
|
||||
#define LLD_REVISION "llvmorg-11.0.0-rc2-91-g6e042866c30-1300007"
|
||||
#define LLD_REVISION "llvmorg-11.0.0-rc5-0-g60a25202a7d-1300007"
|
||||
#define LLD_REPOSITORY "FreeBSD"
|
||||
|
||||
#define LLDB_REVISION "llvmorg-11.0.0-rc2-91-g6e042866c30"
|
||||
#define LLDB_REVISION "llvmorg-11.0.0-rc5-0-g60a25202a7d"
|
||||
#define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
@ -1,3 +1,3 @@
|
||||
/* $FreeBSD$ */
|
||||
#define LLVM_REVISION "llvmorg-11.0.0-rc2-91-g6e042866c30"
|
||||
#define LLVM_REVISION "llvmorg-11.0.0-rc5-0-g60a25202a7d"
|
||||
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
Loading…
Reference in New Issue
Block a user