Merge llvm, clang, lld, lldb, compiler-rt and libc++ release_70 branch

r348686 (effectively 7.0.1 rc3), resolve conflicts, and bump version
numbers.

PR:		230240, 230355
This commit is contained in:
Dimitry Andric 2018-12-09 11:36:04 +00:00
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang700-import/; revision=341763
57 changed files with 645 additions and 312 deletions

View File

@ -352,7 +352,6 @@ struct FileDescriptor {
~FileDescriptor() { close(); }
FileDescriptor() = default;
FileDescriptor(FileDescriptor const&) = delete;
FileDescriptor& operator=(FileDescriptor const&) = delete;

View File

@ -30,8 +30,6 @@ class GSIHashIterator
GSIHashIterator, FixedStreamArrayIterator<PSHashRecord>,
std::random_access_iterator_tag, const uint32_t> {
public:
GSIHashIterator() = default;
template <typename T>
GSIHashIterator(T &&v)
: GSIHashIterator::iterator_adaptor_base(std::forward<T &&>(v)) {}

View File

@ -49,7 +49,7 @@ class ModuleDebugStreamRef {
BinarySubstreamRef getC13LinesSubstream() const;
BinarySubstreamRef getGlobalRefsSubstream() const;
ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = default;
ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = delete;
iterator_range<DebugSubsectionIterator> subsections() const;
codeview::DebugSubsectionArray getSubsectionsArray() const {

View File

@ -126,7 +126,7 @@ class MaterializationResponsibility {
public:
MaterializationResponsibility(MaterializationResponsibility &&) = default;
MaterializationResponsibility &
operator=(MaterializationResponsibility &&) = default;
operator=(MaterializationResponsibility &&) = delete;
/// Destruct a MaterializationResponsibility instance. In debug mode
/// this asserts that all symbols being tracked have been either

View File

@ -70,8 +70,7 @@ class OrcRemoteTargetClient
RemoteRTDyldMemoryManager &
operator=(const RemoteRTDyldMemoryManager &) = delete;
RemoteRTDyldMemoryManager(RemoteRTDyldMemoryManager &&) = default;
RemoteRTDyldMemoryManager &
operator=(RemoteRTDyldMemoryManager &&) = default;
RemoteRTDyldMemoryManager &operator=(RemoteRTDyldMemoryManager &&) = delete;
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,

View File

@ -165,6 +165,11 @@ class MCAsmBackend {
return 0;
}
/// Check whether a given symbol has been flagged with MICROMIPS flag.
virtual bool isMicroMips(const MCSymbol *Sym) const {
return false;
}
/// Handles all target related code padding when starting to write a new
/// basic block to an object file.
///

View File

@ -641,8 +641,6 @@ class LineCoverageIterator
this->operator++();
}
LineCoverageIterator &operator=(const LineCoverageIterator &R) = default;
bool operator==(const LineCoverageIterator &R) const {
return &CD == &R.CD && Next == R.Next && Ended == R.Ended;
}

View File

@ -1186,6 +1186,20 @@ struct SemiNCAInfo {
<< '\t' << U << "\n");
LLVM_DEBUG(dbgs() << "\n");
// Recalculate the DominatorTree when the number of updates
// exceeds a threshold, which usually makes direct updating slower than
// recalculation. We select this threshold proportional to the
// size of the DominatorTree. The constant is selected
// by choosing the one with an acceptable performance on some real-world
// inputs.
// Make unittests of the incremental algorithm work
if (DT.DomTreeNodes.size() <= 100) {
if (NumLegalized > DT.DomTreeNodes.size())
CalculateFromScratch(DT, &BUI);
} else if (NumLegalized > DT.DomTreeNodes.size() / 40)
CalculateFromScratch(DT, &BUI);
// If the DominatorTree was recalculated at some point, stop the batch
// updates. Full recalculations ignore batch updates and look at the actual
// CFG.

View File

@ -76,6 +76,10 @@ class SSAUpdater {
/// block.
bool HasValueForBlock(BasicBlock *BB) const;
/// Return the value for the specified block if the SSAUpdater has one,
/// otherwise return nullptr.
Value *FindValueForBlock(BasicBlock *BB) const;
/// Construct SSA form, materializing a value that is live at the end
/// of the specified block.
Value *GetValueAtEndOfBlock(BasicBlock *BB);

View File

@ -357,10 +357,9 @@ class SSAUpdaterImpl {
BBInfo *Info = *I;
if (Info->DefBB != Info) {
// Record the available value at join nodes to speed up subsequent
// uses of this SSAUpdater for the same value.
if (Info->NumPreds > 1)
(*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
// Record the available value to speed up subsequent uses of this
// SSAUpdater for the same value.
(*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
continue;
}

View File

@ -119,7 +119,6 @@ class MemoryLocOrCall {
public:
bool IsCall = false;
MemoryLocOrCall() = default;
MemoryLocOrCall(MemoryUseOrDef *MUD)
: MemoryLocOrCall(MUD->getMemoryInst()) {}
MemoryLocOrCall(const MemoryUseOrDef *MUD)

View File

@ -1156,10 +1156,11 @@ MCSection *TargetLoweringObjectFileCOFF::SelectSectionForGlobal(
MCSymbol *Sym = TM.getSymbol(ComdatGV);
StringRef COMDATSymName = Sym->getName();
// Append "$symbol" to the section name when targetting mingw. The ld.bfd
// Append "$symbol" to the section name *before* IR-level mangling is
// applied when targetting mingw. This is what GCC does, and the ld.bfd
// COFF linker will not properly handle comdats otherwise.
if (getTargetTriple().isWindowsGNUEnvironment())
raw_svector_ostream(Name) << '$' << COMDATSymName;
raw_svector_ostream(Name) << '$' << ComdatGV->getName();
return getContext().getCOFFSection(Name, Characteristics, Kind,
COMDATSymName, Selection, UniqueID);

View File

@ -275,7 +275,7 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
uint64_t Size = I->getCommonSize();
if (!CommonAlign)
CommonAlign = Align;
CommonSize += alignTo(CommonSize, Align) + Size;
CommonSize = alignTo(CommonSize, Align) + Size;
CommonSymbolsToAllocate.push_back(*I);
}
} else

View File

@ -524,6 +524,11 @@ static void AttemptToFoldSymbolOffsetDifference(
if (Asm->isThumbFunc(&SA))
Addend |= 1;
// If symbol is labeled as micromips, we set low-bit to ensure
// correct offset in .gcc_except_table
if (Asm->getBackend().isMicroMips(&SA))
Addend |= 1;
// Clear the symbol expr pointers to indicate we have folded these
// operands.
A = B = nullptr;

View File

@ -1515,39 +1515,50 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
/// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
/// a comparison. They set the NZCV flags to a predefined value if their
/// predicate is false. This allows to express arbitrary conjunctions, for
/// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B))))"
/// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
/// expressed as:
/// cmp A
/// ccmp B, inv(CB), CA
/// check for CB flags
///
/// In general we can create code for arbitrary "... (and (and A B) C)"
/// sequences. We can also implement some "or" expressions, because "(or A B)"
/// is equivalent to "not (and (not A) (not B))" and we can implement some
/// negation operations:
/// We can negate the results of a single comparison by inverting the flags
/// used when the predicate fails and inverting the flags tested in the next
/// instruction; We can also negate the results of the whole previous
/// conditional compare sequence by inverting the flags tested in the next
/// instruction. However there is no way to negate the result of a partial
/// sequence.
/// This naturally lets us implement chains of AND operations with SETCC
/// operands. And we can even implement some other situations by transforming
/// them:
/// - We can implement (NEG SETCC) i.e. negating a single comparison by
/// negating the flags used in a CCMP/FCCMP operations.
/// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
/// by negating the flags we test for afterwards. i.e.
/// NEG (CMP CCMP CCCMP ...) can be implemented.
/// - Note that we can only ever negate all previously processed results.
/// What we can not implement by flipping the flags to test is a negation
/// of two sub-trees (because the negation affects all sub-trees emitted so
/// far, so the 2nd sub-tree we emit would also affect the first).
/// With those tools we can implement some OR operations:
/// - (OR (SETCC A) (SETCC B)) can be implemented via:
/// NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
/// - After transforming OR to NEG/AND combinations we may be able to use NEG
/// elimination rules from earlier to implement the whole thing as a
/// CCMP/FCCMP chain.
///
/// Therefore on encountering an "or" expression we can negate the subtree on
/// one side and have to be able to push the negate to the leafs of the subtree
/// on the other side (see also the comments in code). As complete example:
/// "or (or (setCA (cmp A)) (setCB (cmp B)))
/// (and (setCC (cmp C)) (setCD (cmp D)))"
/// is transformed to
/// "not (and (not (and (setCC (cmp C)) (setCC (cmp D))))
/// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
/// and implemented as:
/// As complete example:
/// or (or (setCA (cmp A)) (setCB (cmp B)))
/// (and (setCC (cmp C)) (setCD (cmp D)))"
/// can be reassociated to:
/// or (and (setCC (cmp C)) setCD (cmp D))
// (or (setCA (cmp A)) (setCB (cmp B)))
/// can be transformed to:
/// not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
/// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
/// which can be implemented as:
/// cmp C
/// ccmp D, inv(CD), CC
/// ccmp A, CA, inv(CD)
/// ccmp B, CB, inv(CA)
/// check for CB flags
/// A counterexample is "or (and A B) (and C D)" which cannot be implemented
/// by conditional compare sequences.
///
/// A counterexample is "or (and A B) (and C D)" which translates to
/// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
/// can only implement 1 of the inner (not) operations, but not both!
/// @{
/// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
@ -1585,14 +1596,23 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
}
/// Returns true if @p Val is a tree of AND/OR/SETCC operations.
/// CanPushNegate is set to true if we can push a negate operation through
/// the tree in a was that we are left with AND operations and negate operations
/// at the leafs only. i.e. "not (or (or x y) z)" can be changed to
/// "and (and (not x) (not y)) (not z)"; "not (or (and x y) z)" cannot be
/// brought into such a form.
static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate,
unsigned Depth = 0) {
/// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
/// expressed as a conjunction. See \ref AArch64CCMP.
/// \param CanNegate Set to true if we can negate the whole sub-tree just by
/// changing the conditions on the SETCC tests.
/// (this means we can call emitConjunctionRec() with
/// Negate==true on this sub-tree)
/// \param MustBeFirst Set to true if this subtree needs to be negated and we
/// cannot do the negation naturally. We are required to
/// emit the subtree first in this case.
/// \param WillNegate Is true if are called when the result of this
/// subexpression must be negated. This happens when the
/// outer expression is an OR. We can use this fact to know
/// that we have a double negation (or (or ...) ...) that
/// can be implemented for free.
static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
bool &MustBeFirst, bool WillNegate,
unsigned Depth = 0) {
if (!Val.hasOneUse())
return false;
unsigned Opcode = Val->getOpcode();
@ -1600,39 +1620,44 @@ static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate,
if (Val->getOperand(0).getValueType() == MVT::f128)
return false;
CanNegate = true;
MustBeFirst = false;
return true;
}
// Protect against exponential runtime and stack overflow.
if (Depth > 6)
return false;
if (Opcode == ISD::AND || Opcode == ISD::OR) {
bool IsOR = Opcode == ISD::OR;
SDValue O0 = Val->getOperand(0);
SDValue O1 = Val->getOperand(1);
bool CanNegateL;
if (!isConjunctionDisjunctionTree(O0, CanNegateL, Depth+1))
bool MustBeFirstL;
if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1))
return false;
bool CanNegateR;
if (!isConjunctionDisjunctionTree(O1, CanNegateR, Depth+1))
bool MustBeFirstR;
if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1))
return false;
if (Opcode == ISD::OR) {
// For an OR expression we need to be able to negate at least one side or
// we cannot do the transformation at all.
if (MustBeFirstL && MustBeFirstR)
return false;
if (IsOR) {
// For an OR expression we need to be able to naturally negate at least
// one side or we cannot do the transformation at all.
if (!CanNegateL && !CanNegateR)
return false;
// We can however change a (not (or x y)) to (and (not x) (not y)) if we
// can negate the x and y subtrees.
CanNegate = CanNegateL && CanNegateR;
// If we the result of the OR will be negated and we can naturally negate
// the leafs, then this sub-tree as a whole negates naturally.
CanNegate = WillNegate && CanNegateL && CanNegateR;
// If we cannot naturally negate the whole sub-tree, then this must be
// emitted first.
MustBeFirst = !CanNegate;
} else {
// If the operands are OR expressions then we finally need to negate their
// outputs, we can only do that for the operand with emitted last by
// negating OutCC, not for both operands.
bool NeedsNegOutL = O0->getOpcode() == ISD::OR;
bool NeedsNegOutR = O1->getOpcode() == ISD::OR;
if (NeedsNegOutL && NeedsNegOutR)
return false;
// We cannot negate an AND operation (it would become an OR),
assert(Opcode == ISD::AND && "Must be OR or AND");
// We cannot naturally negate an AND operation.
CanNegate = false;
MustBeFirst = MustBeFirstL || MustBeFirstR;
}
return true;
}
@ -1645,11 +1670,9 @@ static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate,
/// and conditional compare operations. @returns an NZCV flags producing node
/// and sets @p OutCC to the flags that should be tested or returns SDValue() if
/// transformation was not possible.
/// On recursive invocations @p PushNegate may be set to true to have negation
/// effects pushed to the tree leafs; @p Predicate is an NZCV flag predicate
/// for the comparisons in the current subtree; @p Depth limits the search
/// depth to avoid stack overflow.
static SDValue emitConjunctionDisjunctionTreeRec(SelectionDAG &DAG, SDValue Val,
/// \p Negate is true if we want this sub-tree being negated just by changing
/// SETCC conditions.
static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
AArch64CC::CondCode Predicate) {
// We're at a tree leaf, produce a conditional comparison operation.
@ -1690,76 +1713,85 @@ static SDValue emitConjunctionDisjunctionTreeRec(SelectionDAG &DAG, SDValue Val,
return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
DAG);
}
assert((Opcode == ISD::AND || (Opcode == ISD::OR && Val->hasOneUse())) &&
"Valid conjunction/disjunction tree");
assert(Val->hasOneUse() && "Valid conjunction/disjunction tree");
bool IsOR = Opcode == ISD::OR;
// Check if both sides can be transformed.
SDValue LHS = Val->getOperand(0);
bool CanNegateL;
bool MustBeFirstL;
bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR);
assert(ValidL && "Valid conjunction/disjunction tree");
(void)ValidL;
SDValue RHS = Val->getOperand(1);
bool CanNegateR;
bool MustBeFirstR;
bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR);
assert(ValidR && "Valid conjunction/disjunction tree");
(void)ValidR;
// In case of an OR we need to negate our operands and the result.
// (A v B) <=> not(not(A) ^ not(B))
bool NegateOpsAndResult = Opcode == ISD::OR;
// We can negate the results of all previous operations by inverting the
// predicate flags giving us a free negation for one side. The other side
// must be negatable by itself.
if (NegateOpsAndResult) {
// See which side we can negate.
bool CanNegateL;
bool isValidL = isConjunctionDisjunctionTree(LHS, CanNegateL);
assert(isValidL && "Valid conjunction/disjunction tree");
(void)isValidL;
#ifndef NDEBUG
bool CanNegateR;
bool isValidR = isConjunctionDisjunctionTree(RHS, CanNegateR);
assert(isValidR && "Valid conjunction/disjunction tree");
assert((CanNegateL || CanNegateR) && "Valid conjunction/disjunction tree");
#endif
// Order the side which we cannot negate to RHS so we can emit it first.
if (!CanNegateL)
std::swap(LHS, RHS);
} else {
bool NeedsNegOutL = LHS->getOpcode() == ISD::OR;
assert((!NeedsNegOutL || RHS->getOpcode() != ISD::OR) &&
"Valid conjunction/disjunction tree");
// Order the side where we need to negate the output flags to RHS so it
// gets emitted first.
if (NeedsNegOutL)
std::swap(LHS, RHS);
// Swap sub-tree that must come first to the right side.
if (MustBeFirstL) {
assert(!MustBeFirstR && "Valid conjunction/disjunction tree");
std::swap(LHS, RHS);
std::swap(CanNegateL, CanNegateR);
std::swap(MustBeFirstL, MustBeFirstR);
}
// Emit RHS. If we want to negate the tree we only need to push a negate
// through if we are already in a PushNegate case, otherwise we can negate
// the "flags to test" afterwards.
bool NegateR;
bool NegateAfterR;
bool NegateL;
bool NegateAfterAll;
if (Opcode == ISD::OR) {
// Swap the sub-tree that we can negate naturally to the left.
if (!CanNegateL) {
assert(CanNegateR && "at least one side must be negatable");
assert(!MustBeFirstR && "invalid conjunction/disjunction tree");
assert(!Negate);
std::swap(LHS, RHS);
NegateR = false;
NegateAfterR = true;
} else {
// Negate the left sub-tree if possible, otherwise negate the result.
NegateR = CanNegateR;
NegateAfterR = !CanNegateR;
}
NegateL = true;
NegateAfterAll = !Negate;
} else {
assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree");
assert(!Negate && "Valid conjunction/disjunction tree");
NegateL = false;
NegateR = false;
NegateAfterR = false;
NegateAfterAll = false;
}
// Emit sub-trees.
AArch64CC::CondCode RHSCC;
SDValue CmpR = emitConjunctionDisjunctionTreeRec(DAG, RHS, RHSCC, Negate,
CCOp, Predicate);
if (NegateOpsAndResult && !Negate)
SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate);
if (NegateAfterR)
RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
// Emit LHS. We may need to negate it.
SDValue CmpL = emitConjunctionDisjunctionTreeRec(DAG, LHS, OutCC,
NegateOpsAndResult, CmpR,
RHSCC);
// If we transformed an OR to and AND then we have to negate the result
// (or absorb the Negate parameter).
if (NegateOpsAndResult && !Negate)
SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC);
if (NegateAfterAll)
OutCC = AArch64CC::getInvertedCondCode(OutCC);
return CmpL;
}
/// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
/// of CCMP/CFCMP ops. See @ref AArch64CCMP.
/// \see emitConjunctionDisjunctionTreeRec().
static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
AArch64CC::CondCode &OutCC) {
bool CanNegate;
if (!isConjunctionDisjunctionTree(Val, CanNegate))
/// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
/// In some cases this is even possible with OR operations in the expression.
/// See \ref AArch64CCMP.
/// \see emitConjunctionRec().
static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
AArch64CC::CondCode &OutCC) {
bool DummyCanNegate;
bool DummyMustBeFirst;
if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false))
return SDValue();
return emitConjunctionDisjunctionTreeRec(DAG, Val, OutCC, false, SDValue(),
AArch64CC::AL);
return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL);
}
/// @}
@ -1859,7 +1891,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
}
if (!Cmp && (RHSC->isNullValue() || RHSC->isOne())) {
if ((Cmp = emitConjunctionDisjunctionTree(DAG, LHS, AArch64CC))) {
if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) {
if ((CC == ISD::SETNE) ^ RHSC->isNullValue())
AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
}

View File

@ -90,7 +90,6 @@ class UnmangledFuncInfo {
public:
using ID = AMDGPULibFunc::EFuncId;
UnmangledFuncInfo() = default;
UnmangledFuncInfo(StringRef _Name, unsigned _NumArgs)
: Name(_Name), NumArgs(_NumArgs) {}
// Get index to Table by function name.

View File

@ -25,6 +25,7 @@
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/ErrorHandling.h"
@ -568,6 +569,14 @@ bool MipsAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
}
}
bool MipsAsmBackend::isMicroMips(const MCSymbol *Sym) const {
if (const auto *ElfSym = dyn_cast<const MCSymbolELF>(Sym)) {
if (ElfSym->getOther() & ELF::STO_MIPS_MICROMIPS)
return true;
}
return false;
}
MCAsmBackend *llvm::createMipsAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,

View File

@ -25,6 +25,7 @@ class MCAssembler;
struct MCFixupKindInfo;
class MCObjectWriter;
class MCRegisterInfo;
class MCSymbolELF;
class Target;
class MipsAsmBackend : public MCAsmBackend {
@ -90,6 +91,7 @@ class MipsAsmBackend : public MCAsmBackend {
bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target) override;
bool isMicroMips(const MCSymbol *Sym) const override;
}; // class MipsAsmBackend
} // namespace

View File

@ -15,6 +15,7 @@
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSymbolELF.h"
@ -53,6 +54,22 @@ void MipsELFStreamer::EmitInstruction(const MCInst &Inst,
createPendingLabelRelocs();
}
void MipsELFStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
Frame.Begin = getContext().createTempSymbol();
MCELFStreamer::EmitLabel(Frame.Begin);
}
MCSymbol *MipsELFStreamer::EmitCFILabel() {
MCSymbol *Label = getContext().createTempSymbol("cfi", true);
MCELFStreamer::EmitLabel(Label);
return Label;
}
void MipsELFStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
Frame.End = getContext().createTempSymbol();
MCELFStreamer::EmitLabel(Frame.End);
}
void MipsELFStreamer::createPendingLabelRelocs() {
MipsTargetELFStreamer *ELFTargetStreamer =
static_cast<MipsTargetELFStreamer *>(getTargetStreamer());

View File

@ -26,6 +26,7 @@ class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
class MCSubtargetInfo;
struct MCDwarfFrameInfo;
class MipsELFStreamer : public MCELFStreamer {
SmallVector<std::unique_ptr<MipsOptionRecord>, 8> MipsOptionRecords;
@ -60,6 +61,12 @@ class MipsELFStreamer : public MCELFStreamer {
void EmitValueImpl(const MCExpr *Value, unsigned Size, SMLoc Loc) override;
void EmitIntValue(uint64_t Value, unsigned Size) override;
// Overriding these functions allows us to avoid recording of these labels
// in EmitLabel and later marking them as microMIPS.
void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
MCSymbol *EmitCFILabel() override;
/// Emits all the option records stored up until the point it's called.
void EmitMipsOptionRecords();

View File

@ -1733,7 +1733,7 @@ defm S_MMR6 : Cmp_Pats<f32, NOR_MMR6, ZERO>, ISA_MICROMIPS32R6;
defm D_MMR6 : Cmp_Pats<f64, NOR_MMR6, ZERO>, ISA_MICROMIPS32R6;
def : MipsPat<(f32 fpimm0), (MTC1_MMR6 ZERO)>, ISA_MICROMIPS32R6;
def : MipsPat<(f32 fpimm0neg), (FNEG_S_MMR6 (MTC1 ZERO))>, ISA_MICROMIPS32R6;
def : MipsPat<(f32 fpimm0neg), (FNEG_S_MMR6 (MTC1_MMR6 ZERO))>, ISA_MICROMIPS32R6;
def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
(TRUNC_W_D_MMR6 FGR64Opnd:$src)>, ISA_MICROMIPS32R6;

View File

@ -838,7 +838,7 @@ def : MipsPat<(i64 (sext (i32 (sub GPR32:$src, GPR32:$src2)))),
(SUBu GPR32:$src, GPR32:$src2), sub_32)>;
def : MipsPat<(i64 (sext (i32 (mul GPR32:$src, GPR32:$src2)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(MUL GPR32:$src, GPR32:$src2), sub_32)>;
(MUL GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS3_NOT_32R6_64R6;
def : MipsPat<(i64 (sext (i32 (MipsMFHI ACC64:$src)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(PseudoMFHI ACC64:$src), sub_32)>;
@ -1139,3 +1139,6 @@ def SLTUImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rs),
"sltu\t$rs, $rt, $imm">, GPR_64;
def : MipsInstAlias<"sltu\t$rs, $imm", (SLTUImm64 GPR64Opnd:$rs, GPR64Opnd:$rs,
imm64:$imm)>, GPR_64;
def : MipsInstAlias<"rdhwr $rt, $rs",
(RDHWR64 GPR64Opnd:$rt, HWRegsOpnd:$rs, 0), 1>, GPR_64;

View File

@ -301,6 +301,9 @@ def : MipsPat<(select (i32 (seteq i32:$cond, immz)), immz, i64:$f),
// Patterns used for matching away redundant sign extensions.
// MIPS32 arithmetic instructions sign extend their result implicitly.
def : MipsPat<(i64 (sext (i32 (mul GPR32:$src, GPR32:$src2)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(MUL_R6 GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS64R6;
def : MipsPat<(i64 (sext (i32 (sdiv GPR32:$src, GPR32:$src2)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(DIV GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS64R6;

View File

@ -953,6 +953,11 @@ bool MipsFastISel::selectBranch(const Instruction *I) {
MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
// For now, just try the simplest case where it's fed by a compare.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
MVT CIMVT =
TLI.getValueType(DL, CI->getOperand(0)->getType(), true).getSimpleVT();
if (CIMVT == MVT::i1)
return false;
unsigned CondReg = getRegForValue(CI);
BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ))
.addReg(CondReg)

View File

@ -485,14 +485,14 @@ let AdditionalPredicates = [NotInMicroMips] in {
def CTC1 : MMRel, MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, II_CTC1>, MFC1_FM<6>,
ISA_MIPS1;
def MFC1 : MMRel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, II_MFC1,
bitconvert>, MFC1_FM<0>, ISA_MIPS1;
def MFC1 : MMRel, StdMMR6Rel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, II_MFC1,
bitconvert>, MFC1_FM<0>, ISA_MIPS1;
def MFC1_D64 : MFC1_FT<"mfc1", GPR32Opnd, FGR64Opnd, II_MFC1>, MFC1_FM<0>,
ISA_MIPS1, FGR_64 {
let DecoderNamespace = "MipsFP64";
}
def MTC1 : MMRel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, II_MTC1,
bitconvert>, MFC1_FM<4>, ISA_MIPS1;
def MTC1 : MMRel, StdMMR6Rel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, II_MTC1,
bitconvert>, MFC1_FM<4>, ISA_MIPS1;
def MTC1_D64 : MTC1_FT<"mtc1", FGR64Opnd, GPR32Opnd, II_MTC1>, MFC1_FM<4>,
ISA_MIPS1, FGR_64 {
let DecoderNamespace = "MipsFP64";

View File

@ -299,8 +299,12 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
// register). Unfortunately, we have to make this decision before register
// allocation so for now we use a spill/reload sequence for all
// double-precision values in regardless of being an odd/even register.
if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
(FP64 && !Subtarget.useOddSPReg())) {
//
// For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
// implicit operand, so other passes (like ShrinkWrapping) are aware that
// stack is used.
if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
&& I->getOperand(3).getReg() == Mips::SP) {
unsigned DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg();
unsigned HiReg = I->getOperand(2).getReg();
@ -360,9 +364,12 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
// register). Unfortunately, we have to make this decision before register
// allocation so for now we use a spill/reload sequence for all
// double-precision values in regardless of being an odd/even register.
if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
(FP64 && !Subtarget.useOddSPReg())) {
//
// For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
// implicit operand, so other passes (like ShrinkWrapping) are aware that
// stack is used.
if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
&& I->getOperand(3).getReg() == Mips::SP) {
unsigned DstReg = I->getOperand(0).getReg();
unsigned SrcReg = Op1.getReg();
unsigned N = Op2.getImm();

View File

@ -238,6 +238,18 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
case Mips::WRDSP:
addDSPCtrlRegOperands(true, MI, MF);
break;
case Mips::BuildPairF64_64:
case Mips::ExtractElementF64_64:
if (!Subtarget->useOddSPReg()) {
MI.addOperand(MachineOperand::CreateReg(Mips::SP, false, true));
break;
}
// fallthrough
case Mips::BuildPairF64:
case Mips::ExtractElementF64:
if (Subtarget->isABI_FPXX() && !Subtarget->hasMTHC1())
MI.addOperand(MachineOperand::CreateReg(Mips::SP, false, true));
break;
default:
replaceUsesWithZeroReg(MRI, MI);
}

View File

@ -25,9 +25,14 @@
using namespace llvm;
static unsigned getUnconditionalBranch(const MipsSubtarget &STI) {
if (STI.inMicroMipsMode())
return STI.isPositionIndependent() ? Mips::B_MM : Mips::J_MM;
return STI.isPositionIndependent() ? Mips::B : Mips::J;
}
MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
: MipsInstrInfo(STI, STI.isPositionIndependent() ? Mips::B : Mips::J),
RI() {}
: MipsInstrInfo(STI, getUnconditionalBranch(STI)), RI() {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
return RI;
@ -643,7 +648,7 @@ unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
Opc == Mips::BNE64 || Opc == Mips::BGTZ64 || Opc == Mips::BGEZ64 ||
Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 || Opc == Mips::BC1T ||
Opc == Mips::BC1F || Opc == Mips::B || Opc == Mips::J ||
Opc == Mips::B_MM || Opc == Mips::BEQZC_MM ||
Opc == Mips::J_MM || Opc == Mips::B_MM || Opc == Mips::BEQZC_MM ||
Opc == Mips::BNEZC_MM || Opc == Mips::BEQC || Opc == Mips::BNEC ||
Opc == Mips::BLTC || Opc == Mips::BGEC || Opc == Mips::BLTUC ||
Opc == Mips::BGEUC || Opc == Mips::BGTZC || Opc == Mips::BLEZC ||

View File

@ -592,6 +592,7 @@ def : InstRW<[P9_PM_3C, IP_EXECO_1C, IP_EXECE_1C, DISP_1C, DISP_1C, DISP_1C],
XXPERM,
XXPERMR,
XXSLDWI,
XXSLDWIs,
XXSPLTIB,
XXSPLTW,
XXSPLTWs,

View File

@ -8454,17 +8454,6 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
// If the source for the shuffle is a scalar_to_vector that came from a
// 32-bit load, it will have used LXVWSX so we don't need to splat again.
if (Subtarget.hasP9Vector() &&
((isLittleEndian && SplatIdx == 3) ||
(!isLittleEndian && SplatIdx == 0))) {
SDValue Src = V1.getOperand(0);
if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR &&
Src.getOperand(0).getOpcode() == ISD::LOAD &&
Src.getOperand(0).hasOneUse())
return V1;
}
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
DAG.getConstant(SplatIdx, dl, MVT::i32));

View File

@ -877,6 +877,12 @@ let Uses = [RM] in {
"xxsldwi $XT, $XA, $XB, $SHW", IIC_VecPerm,
[(set v4i32:$XT, (PPCvecshl v4i32:$XA, v4i32:$XB,
imm32SExt16:$SHW))]>;
let isCodeGenOnly = 1 in
def XXSLDWIs : XX3Form_2s<60, 2,
(outs vsrc:$XT), (ins vsfrc:$XA, u2imm:$SHW),
"xxsldwi $XT, $XA, $XA, $SHW", IIC_VecPerm, []>;
def XXSPLTW : XX2Form_2<60, 164,
(outs vsrc:$XT), (ins vsrc:$XB, u2imm:$UIM),
"xxspltw $XT, $XB, $UIM", IIC_VecPerm,
@ -886,6 +892,7 @@ let Uses = [RM] in {
def XXSPLTWs : XX2Form_2<60, 164,
(outs vsrc:$XT), (ins vfrc:$XB, u2imm:$UIM),
"xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>;
} // hasSideEffects
} // UseVSXReg = 1
@ -1466,8 +1473,6 @@ let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
(f64 (PPCmtvsra (i64 (vector_extract v2i64:$S, 1)))))),
(f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
}
def : Pat<(v4i32 (scalar_to_vector ScalarLoads.Li32)),
(v4i32 (XXSPLTWs (LIWAX xoaddr:$src), 1))>;
// Instructions for converting float to i64 feeding a store.
let Predicates = [NoP9Vector] in {
@ -3050,13 +3055,47 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in {
(STXVX $rS, xoaddr:$dst)>;
def : Pat<(int_ppc_vsx_stxvd2x v2f64:$rS, xoaddr:$dst),
(STXVX $rS, xoaddr:$dst)>;
def : Pat<(v4i32 (scalar_to_vector (i32 (load xoaddr:$src)))),
(v4i32 (LXVWSX xoaddr:$src))>;
def : Pat<(v4f32 (scalar_to_vector (f32 (load xoaddr:$src)))),
(v4f32 (LXVWSX xoaddr:$src))>;
def : Pat<(v4f32 (scalar_to_vector
(f32 (fpround (f64 (extloadf32 xoaddr:$src)))))),
(v4f32 (LXVWSX xoaddr:$src))>;
let AddedComplexity = 400 in {
// LIWAX - This instruction is used for sign extending i32 -> i64.
// LIWZX - This instruction will be emitted for i32, f32, and when
// zero-extending i32 to i64 (zext i32 -> i64).
let Predicates = [IsLittleEndian] in {
def : Pat<(v2i64 (scalar_to_vector (i64 (sextloadi32 xoaddr:$src)))),
(v2i64 (XXPERMDIs
(COPY_TO_REGCLASS (LIWAX xoaddr:$src), VSRC), 2))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (zextloadi32 xoaddr:$src)))),
(v2i64 (XXPERMDIs
(COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 2))>;
def : Pat<(v4i32 (scalar_to_vector (i32 (load xoaddr:$src)))),
(v4i32 (XXPERMDIs
(COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 2))>;
def : Pat<(v4f32 (scalar_to_vector (f32 (load xoaddr:$src)))),
(v4f32 (XXPERMDIs
(COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 2))>;
}
let Predicates = [IsBigEndian] in {
def : Pat<(v2i64 (scalar_to_vector (i64 (sextloadi32 xoaddr:$src)))),
(v2i64 (COPY_TO_REGCLASS (LIWAX xoaddr:$src), VSRC))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (zextloadi32 xoaddr:$src)))),
(v2i64 (COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC))>;
def : Pat<(v4i32 (scalar_to_vector (i32 (load xoaddr:$src)))),
(v4i32 (XXSLDWIs
(COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 1))>;
def : Pat<(v4f32 (scalar_to_vector (f32 (load xoaddr:$src)))),
(v4f32 (XXSLDWIs
(COPY_TO_REGCLASS (LIWZX xoaddr:$src), VSRC), 1))>;
}
}
// Build vectors from i8 loads
def : Pat<(v16i8 (scalar_to_vector ScalarLoads.Li8)),
@ -3218,6 +3257,39 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in {
def : Pat<(f32 (fpround (f64 (extloadf32 ixaddr:$src)))),
(f32 (DFLOADf32 ixaddr:$src))>;
let AddedComplexity = 400 in {
// The following pseudoinstructions are used to ensure the utilization
// of all 64 VSX registers.
let Predicates = [IsLittleEndian, HasP9Vector] in {
def : Pat<(v2i64 (scalar_to_vector (i64 (load ixaddr:$src)))),
(v2i64 (XXPERMDIs
(COPY_TO_REGCLASS (DFLOADf64 ixaddr:$src), VSRC), 2))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (load xaddr:$src)))),
(v2i64 (XXPERMDIs
(COPY_TO_REGCLASS (XFLOADf64 xaddr:$src), VSRC), 2))>;
def : Pat<(v2f64 (scalar_to_vector (f64 (load ixaddr:$src)))),
(v2f64 (XXPERMDIs
(COPY_TO_REGCLASS (DFLOADf64 ixaddr:$src), VSRC), 2))>;
def : Pat<(v2f64 (scalar_to_vector (f64 (load xaddr:$src)))),
(v2f64 (XXPERMDIs
(COPY_TO_REGCLASS (XFLOADf64 xaddr:$src), VSRC), 2))>;
}
let Predicates = [IsBigEndian, HasP9Vector] in {
def : Pat<(v2i64 (scalar_to_vector (i64 (load ixaddr:$src)))),
(v2i64 (COPY_TO_REGCLASS (DFLOADf64 ixaddr:$src), VSRC))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (load xaddr:$src)))),
(v2i64 (COPY_TO_REGCLASS (XFLOADf64 xaddr:$src), VSRC))>;
def : Pat<(v2f64 (scalar_to_vector (f64 (load ixaddr:$src)))),
(v2f64 (COPY_TO_REGCLASS (DFLOADf64 ixaddr:$src), VSRC))>;
def : Pat<(v2f64 (scalar_to_vector (f64 (load xaddr:$src)))),
(v2f64 (COPY_TO_REGCLASS (XFLOADf64 xaddr:$src), VSRC))>;
}
}
let Predicates = [IsBigEndian, HasP9Vector] in {
// (Un)Signed DWord vector extract -> QP
@ -3932,3 +4004,4 @@ let AddedComplexity = 400 in {
(v4i32 (VEXTSH2W $A))>;
}
}

View File

@ -2924,12 +2924,20 @@ static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
// x & (-1 >> y) s>= x -> x s<= (-1 >> y)
if (X != I.getOperand(1)) // X must be on RHS of comparison!
return nullptr; // Ignore the other case.
if (!match(M, m_Constant())) // Can not do this fold with non-constant.
return nullptr;
if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
return nullptr;
DstPred = ICmpInst::Predicate::ICMP_SLE;
break;
case ICmpInst::Predicate::ICMP_SLT:
// x & (-1 >> y) s< x -> x s> (-1 >> y)
if (X != I.getOperand(1)) // X must be on RHS of comparison!
return nullptr; // Ignore the other case.
if (!match(M, m_Constant())) // Can not do this fold with non-constant.
return nullptr;
if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
return nullptr;
DstPred = ICmpInst::Predicate::ICMP_SGT;
break;
case ICmpInst::Predicate::ICMP_SLE:

View File

@ -231,17 +231,17 @@ struct TransformedFunction {
TransformedFunction& operator=(TransformedFunction&&) = default;
/// Type of the function before the transformation.
FunctionType* const OriginalType;
FunctionType *OriginalType;
/// Type of the function after the transformation.
FunctionType* const TransformedType;
FunctionType *TransformedType;
/// Transforming a function may change the position of arguments. This
/// member records the mapping from each argument's old position to its new
/// position. Argument positions are zero-indexed. If the transformation
/// from F to F' made the first argument of F into the third argument of F',
/// then ArgumentIndexMapping[0] will equal 2.
const std::vector<unsigned> ArgumentIndexMapping;
std::vector<unsigned> ArgumentIndexMapping;
};
/// Given function attributes from a call site for the original function,

View File

@ -41,6 +41,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils.h"
@ -201,6 +202,21 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
SSAUpdate.RewriteUse(*UseToRewrite);
}
SmallVector<DbgValueInst *, 4> DbgValues;
llvm::findDbgValues(DbgValues, I);
// Update pre-existing debug value uses that reside outside the loop.
auto &Ctx = I->getContext();
for (auto DVI : DbgValues) {
BasicBlock *UserBB = DVI->getParent();
if (InstBB == UserBB || L->contains(UserBB))
continue;
// We currently only handle debug values residing in blocks where we have
// inserted a PHI instruction.
if (Value *V = SSAUpdate.FindValueForBlock(UserBB))
DVI->setOperand(0, MetadataAsValue::get(Ctx, ValueAsMetadata::get(V)));
}
// SSAUpdater might have inserted phi-nodes inside other loops. We'll need
// to post-process them to keep LCSSA form.
for (PHINode *InsertedPN : InsertedPHIs) {

View File

@ -64,6 +64,11 @@ bool SSAUpdater::HasValueForBlock(BasicBlock *BB) const {
return getAvailableVals(AV).count(BB);
}
Value *SSAUpdater::FindValueForBlock(BasicBlock *BB) const {
AvailableValsTy::iterator AVI = getAvailableVals(AV).find(BB);
return (AVI != getAvailableVals(AV).end()) ? AVI->second : nullptr;
}
void SSAUpdater::AddAvailableValue(BasicBlock *BB, Value *V) {
assert(ProtoType && "Need to initialize SSAUpdater");
assert(ProtoType == V->getType() &&

View File

@ -1216,7 +1216,6 @@ class DeclContextLookupResult {
value_type SingleElement;
public:
iterator() = default;
explicit iterator(pointer Pos, value_type Single = nullptr)
: IteratorBase(Pos), SingleElement(Single) {}

View File

@ -1953,6 +1953,39 @@ def Target : InheritableAttr {
return parse(getFeaturesStr());
}
StringRef getArchitecture() const {
StringRef Features = getFeaturesStr();
if (Features == "default") return {};
SmallVector<StringRef, 1> AttrFeatures;
Features.split(AttrFeatures, ",");
for (auto &Feature : AttrFeatures) {
Feature = Feature.trim();
if (Feature.startswith("arch="))
return Feature.drop_front(sizeof("arch=") - 1);
}
return "";
}
// Gets the list of features as simple string-refs with no +/- or 'no-'.
// Only adds the items to 'Out' that are additions.
void getAddedFeatures(llvm::SmallVectorImpl<StringRef> &Out) const {
StringRef Features = getFeaturesStr();
if (Features == "default") return;
SmallVector<StringRef, 1> AttrFeatures;
Features.split(AttrFeatures, ",");
for (auto &Feature : AttrFeatures) {
Feature = Feature.trim();
if (!Feature.startswith("no-") && !Feature.startswith("arch=") &&
!Feature.startswith("fpmath=") && !Feature.startswith("tune="))
Out.push_back(Feature);
}
}
template<class Compare>
ParsedTargetAttr parse(Compare cmp) const {
ParsedTargetAttr Attrs = parse();

View File

@ -3364,7 +3364,7 @@ def IFuncDocs : Documentation {
let Content = [{
``__attribute__((ifunc("resolver")))`` is used to mark that the address of a declaration should be resolved at runtime by calling a resolver function.
The symbol name of the resolver function is given in quotes. A function with this name (after mangling) must be defined in the current translation unit; it may be ``static``. The resolver function should take no arguments and return a pointer.
The symbol name of the resolver function is given in quotes. A function with this name (after mangling) must be defined in the current translation unit; it may be ``static``. The resolver function should return a pointer.
The ``ifunc`` attribute may only be used on a function declaration. A function declaration with an ``ifunc`` attribute is considered to be a definition of the declared entity. The entity must not have weak linkage; for example, in C++, it cannot be applied to a declaration if a definition at that location would be considered inline.

View File

@ -238,7 +238,7 @@ def warn_option_invalid_ocl_version : Warning<
"OpenCL version %0 does not support the option '%1'">, InGroup<Deprecated>;
def warn_stdlibcxx_not_found : Warning<
"include path for stdlibc++ headers not found; pass '-std=libc++' on the "
"include path for stdlibc++ headers not found; pass '-stdlib=libc++' on the "
"command line to use the libc++ standard library instead">,
InGroup<DiagGroup<"stdlibcxx-not-found">>;
}

View File

@ -2857,8 +2857,6 @@ def err_cyclic_alias : Error<
"%select{alias|ifunc}0 definition is part of a cycle">;
def err_ifunc_resolver_return : Error<
"ifunc resolver function must return a pointer">;
def err_ifunc_resolver_params : Error<
"ifunc resolver function must have no parameters">;
def warn_attribute_wrong_decl_type_str : Warning<
"%0 attribute only applies to %1">, InGroup<IgnoredAttributes>;
def err_attribute_wrong_decl_type_str : Error<

View File

@ -9734,6 +9734,10 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Multiversioned functions always have to be emitted, because they are used
// by the resolver.
if (FD->isMultiVersion())
return true;
// Forward declarations aren't required.
if (!FD->doesThisDeclarationHaveABody())
return FD->doesDeclarationForceExternallyVisibleDefinition();

View File

@ -725,7 +725,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// If we're paying attention to global visibility, apply
// -finline-visibility-hidden if this is an inline method.
if (useInlineVisibilityHidden(D))
LV.mergeVisibility(HiddenVisibility, true);
LV.mergeVisibility(HiddenVisibility, /*visibilityExplicit=*/false);
}
}
@ -915,7 +915,7 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// Note that we do this before merging information about
// the class visibility.
if (!LV.isVisibilityExplicit() && useInlineVisibilityHidden(D))
LV.mergeVisibility(HiddenVisibility, true);
LV.mergeVisibility(HiddenVisibility, /*visibilityExplicit=*/false);
}
// If this class member has an explicit visibility attribute, the only
@ -1262,7 +1262,27 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
!isTemplateInstantiation(FD->getTemplateSpecializationKind()))
return LinkageInfo::none();
// If a function is hidden by -fvisibility-inlines-hidden option and
// is not explicitly attributed as a hidden function,
// we should not make static local variables in the function hidden.
LV = getLVForDecl(FD, computation);
if (isa<VarDecl>(D) && useInlineVisibilityHidden(FD) &&
!LV.isVisibilityExplicit()) {
assert(cast<VarDecl>(D)->isStaticLocal());
// If this was an implicitly hidden inline method, check again for
// explicit visibility on the parent class, and use that for static locals
// if present.
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
LV = getLVForDecl(MD->getParent(), computation);
if (!LV.isVisibilityExplicit()) {
Visibility globalVisibility =
computation.isValueVisibility()
? Context.getLangOpts().getValueVisibilityMode()
: Context.getLangOpts().getTypeVisibilityMode();
return LinkageInfo(VisibleNoLinkage, globalVisibility,
/*visibilityExplicit=*/false);
}
}
}
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo::none();

View File

@ -2359,91 +2359,53 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
CGM.getSanStats().create(IRB, SSK);
}
llvm::Value *CodeGenFunction::FormResolverCondition(
const TargetMultiVersionResolverOption &RO) {
llvm::Value *TrueCondition = nullptr;
if (!RO.ParsedAttribute.Architecture.empty())
TrueCondition = EmitX86CpuIs(RO.ParsedAttribute.Architecture);
llvm::Value *
CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
llvm::Value *Condition = nullptr;
if (!RO.ParsedAttribute.Features.empty()) {
SmallVector<StringRef, 8> FeatureList;
llvm::for_each(RO.ParsedAttribute.Features,
[&FeatureList](const std::string &Feature) {
FeatureList.push_back(StringRef{Feature}.substr(1));
});
llvm::Value *FeatureCmp = EmitX86CpuSupports(FeatureList);
TrueCondition = TrueCondition ? Builder.CreateAnd(TrueCondition, FeatureCmp)
: FeatureCmp;
if (!RO.Conditions.Architecture.empty())
Condition = EmitX86CpuIs(RO.Conditions.Architecture);
if (!RO.Conditions.Features.empty()) {
llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
Condition =
Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
}
return TrueCondition;
return Condition;
}
void CodeGenFunction::EmitTargetMultiVersionResolver(
llvm::Function *Resolver,
ArrayRef<TargetMultiVersionResolverOption> Options) {
void CodeGenFunction::EmitMultiVersionResolver(
llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
assert((getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86 ||
getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86_64) &&
"Only implemented for x86 targets");
// Main function's basic block.
llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver);
Builder.SetInsertPoint(CurBlock);
EmitX86CpuInit();
llvm::Function *DefaultFunc = nullptr;
for (const TargetMultiVersionResolverOption &RO : Options) {
Builder.SetInsertPoint(CurBlock);
llvm::Value *TrueCondition = FormResolverCondition(RO);
if (!TrueCondition) {
DefaultFunc = RO.Function;
} else {
llvm::BasicBlock *RetBlock = createBasicBlock("ro_ret", Resolver);
llvm::IRBuilder<> RetBuilder(RetBlock);
RetBuilder.CreateRet(RO.Function);
CurBlock = createBasicBlock("ro_else", Resolver);
Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
}
}
assert(DefaultFunc && "No default version?");
// Emit return from the 'else-ist' block.
Builder.SetInsertPoint(CurBlock);
Builder.CreateRet(DefaultFunc);
}
void CodeGenFunction::EmitCPUDispatchMultiVersionResolver(
llvm::Function *Resolver,
ArrayRef<CPUDispatchMultiVersionResolverOption> Options) {
assert((getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86 ||
getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86_64) &&
"Only implemented for x86 targets");
// Main function's basic block.
llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
Builder.SetInsertPoint(CurBlock);
EmitX86CpuInit();
for (const CPUDispatchMultiVersionResolverOption &RO : Options) {
for (const MultiVersionResolverOption &RO : Options) {
Builder.SetInsertPoint(CurBlock);
llvm::Value *Condition = FormResolverCondition(RO);
// "generic" case should catch-all.
if (RO.FeatureMask == 0) {
// The 'default' or 'generic' case.
if (!Condition) {
assert(&RO == Options.end() - 1 &&
"Default or Generic case must be last");
Builder.CreateRet(RO.Function);
return;
}
llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
llvm::IRBuilder<> RetBuilder(RetBlock);
RetBuilder.CreateRet(RO.Function);
CurBlock = createBasicBlock("resolver_else", Resolver);
llvm::Value *TrueCondition = EmitX86CpuSupports(RO.FeatureMask);
Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
Builder.CreateCondBr(Condition, RetBlock, CurBlock);
}
// If no generic/default, emit an unreachable.
Builder.SetInsertPoint(CurBlock);
llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
TrapCall->setDoesNotReturn();

View File

@ -4247,30 +4247,26 @@ class CodeGenFunction : public CodeGenTypeCache {
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
struct TargetMultiVersionResolverOption {
struct MultiVersionResolverOption {
llvm::Function *Function;
TargetAttr::ParsedTargetAttr ParsedAttribute;
unsigned Priority;
TargetMultiVersionResolverOption(
const TargetInfo &TargInfo, llvm::Function *F,
const clang::TargetAttr::ParsedTargetAttr &PT)
: Function(F), ParsedAttribute(PT), Priority(0u) {
for (StringRef Feat : PT.Features)
Priority = std::max(Priority,
TargInfo.multiVersionSortPriority(Feat.substr(1)));
struct Conds {
StringRef Architecture;
llvm::SmallVector<StringRef, 8> Features;
if (!PT.Architecture.empty())
Priority = std::max(Priority,
TargInfo.multiVersionSortPriority(PT.Architecture));
}
Conds(StringRef Arch, ArrayRef<StringRef> Feats)
: Architecture(Arch), Features(Feats.begin(), Feats.end()) {}
} Conditions;
bool operator>(const TargetMultiVersionResolverOption &Other) const {
return Priority > Other.Priority;
}
MultiVersionResolverOption(llvm::Function *F, StringRef Arch,
ArrayRef<StringRef> Feats)
: Function(F), Conditions(Arch, Feats) {}
};
void EmitTargetMultiVersionResolver(
llvm::Function *Resolver,
ArrayRef<TargetMultiVersionResolverOption> Options);
// Emits the body of a multiversion function's resolver. Assumes that the
// options are already sorted in the proper order, with the 'default' option
// last (if it exists).
void EmitMultiVersionResolver(llvm::Function *Resolver,
ArrayRef<MultiVersionResolverOption> Options);
struct CPUDispatchMultiVersionResolverOption {
llvm::Function *Function;
@ -4306,8 +4302,7 @@ class CodeGenFunction : public CodeGenTypeCache {
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
llvm::Value *EmitX86CpuSupports(uint32_t Mask);
llvm::Value *EmitX86CpuInit();
llvm::Value *
FormResolverCondition(const TargetMultiVersionResolverOption &RO);
llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
};
inline DominatingLLVMValue::saved_type

View File

@ -2399,9 +2399,22 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Function *NewFn);
static unsigned
TargetMVPriority(const TargetInfo &TI,
const CodeGenFunction::MultiVersionResolverOption &RO) {
unsigned Priority = 0;
for (StringRef Feat : RO.Conditions.Features)
Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
if (!RO.Conditions.Architecture.empty())
Priority = std::max(
Priority, TI.multiVersionSortPriority(RO.Conditions.Architecture));
return Priority;
}
void CodeGenModule::emitMultiVersionFunctions() {
for (GlobalDecl GD : MultiVersionFuncs) {
SmallVector<CodeGenFunction::TargetMultiVersionResolverOption, 10> Options;
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
getContext().forEachMultiversionedFunctionVersion(
FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
@ -2422,8 +2435,13 @@ void CodeGenModule::emitMultiVersionFunctions() {
}
assert(Func && "This should have just been created");
}
Options.emplace_back(getTarget(), cast<llvm::Function>(Func),
CurFD->getAttr<TargetAttr>()->parse());
const auto *TA = CurFD->getAttr<TargetAttr>();
llvm::SmallVector<StringRef, 8> Feats;
TA->getAddedFeatures(Feats);
Options.emplace_back(cast<llvm::Function>(Func),
TA->getArchitecture(), Feats);
});
llvm::Function *ResolverFunc = cast<llvm::Function>(
@ -2431,11 +2449,16 @@ void CodeGenModule::emitMultiVersionFunctions() {
if (supportsCOMDAT())
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
const TargetInfo &TI = getTarget();
std::stable_sort(
Options.begin(), Options.end(),
std::greater<CodeGenFunction::TargetMultiVersionResolverOption>());
[&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
});
CodeGenFunction CGF(*this);
CGF.EmitTargetMultiVersionResolver(ResolverFunc, Options);
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
}
@ -2444,7 +2467,13 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
assert(FD && "Not a FunctionDecl?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
llvm::Type *DeclTy = getTypes().ConvertTypeForMem(FD->getType());
QualType CanonTy = Context.getCanonicalType(FD->getType());
llvm::Type *DeclTy = getTypes().ConvertFunctionType(CanonTy, FD);
if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
DeclTy = getTypes().GetFunctionType(FInfo);
}
StringRef ResolverName = getMangledName(GD);
llvm::Type *ResolverType = llvm::FunctionType::get(
@ -2455,15 +2484,14 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
/*ForVTable=*/false));
SmallVector<CodeGenFunction::CPUDispatchMultiVersionResolverOption, 10>
Options;
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
const TargetInfo &Target = getTarget();
for (const IdentifierInfo *II : DD->cpus()) {
// Get the name of the target function so we can look it up/create it.
std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
getCPUSpecificMangling(*this, II->getName());
llvm::Constant *Func = GetOrCreateLLVMFunction(
MangledName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/false,
MangledName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/true,
/*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
llvm::SmallVector<StringRef, 32> Features;
Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
@ -2473,15 +2501,34 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
Features.begin(), Features.end(), [&Target](StringRef Feat) {
return !Target.validateCpuSupports(Feat);
}), Features.end());
Options.emplace_back(cast<llvm::Function>(Func),
CodeGenFunction::GetX86CpuSupportsMask(Features));
Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
}
llvm::sort(
Options.begin(), Options.end(),
std::greater<CodeGenFunction::CPUDispatchMultiVersionResolverOption>());
[](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
return CodeGenFunction::GetX86CpuSupportsMask(LHS.Conditions.Features) >
CodeGenFunction::GetX86CpuSupportsMask(RHS.Conditions.Features);
});
// If the list contains multiple 'default' versions, such as when it contains
// 'pentium' and 'generic', don't emit the call to the generic one (since we
// always run on at least a 'pentium'). We do this by deleting the 'least
// advanced' (read, lowest mangling letter).
while (Options.size() > 1 &&
CodeGenFunction::GetX86CpuSupportsMask(
(Options.end() - 2)->Conditions.Features) == 0) {
StringRef LHSName = (Options.end() - 2)->Function->getName();
StringRef RHSName = (Options.end() - 1)->Function->getName();
if (LHSName.compare(RHSName) < 0)
Options.erase(Options.end() - 2);
else
Options.erase(Options.end() - 1);
}
CodeGenFunction CGF(*this);
CGF.EmitCPUDispatchMultiVersionResolver(ResolverFunc, Options);
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
/// If an ifunc for the specified mangled name is not in the module, create and

View File

@ -553,6 +553,15 @@ struct CounterCoverageMappingBuilder
completeDeferred(Count, DeferredEndLoc);
}
size_t locationDepth(SourceLocation Loc) {
size_t Depth = 0;
while (Loc.isValid()) {
Loc = getIncludeOrExpansionLoc(Loc);
Depth++;
}
return Depth;
}
/// Pop regions from the stack into the function's list of regions.
///
/// Adds all regions from \c ParentIndex to the top of the stack to the
@ -567,19 +576,41 @@ struct CounterCoverageMappingBuilder
SourceLocation EndLoc = Region.hasEndLoc()
? Region.getEndLoc()
: RegionStack[ParentIndex].getEndLoc();
size_t StartDepth = locationDepth(StartLoc);
size_t EndDepth = locationDepth(EndLoc);
while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) {
// The region ends in a nested file or macro expansion. Create a
// separate region for each expansion.
SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
bool UnnestStart = StartDepth >= EndDepth;
bool UnnestEnd = EndDepth >= StartDepth;
if (UnnestEnd) {
// The region ends in a nested file or macro expansion. Create a
// separate region for each expansion.
SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
if (EndLoc.isInvalid())
llvm::report_fatal_error("File exit not handled before popRegions");
EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
if (EndLoc.isInvalid())
llvm::report_fatal_error("File exit not handled before popRegions");
EndDepth--;
}
if (UnnestStart) {
// The region begins in a nested file or macro expansion. Create a
// separate region for each expansion.
SourceLocation NestedLoc = getEndOfFileOrMacro(StartLoc);
assert(SM.isWrittenInSameFile(StartLoc, NestedLoc));
if (!isRegionAlreadyAdded(StartLoc, NestedLoc))
SourceRegions.emplace_back(Region.getCounter(), StartLoc, NestedLoc);
StartLoc = getIncludeOrExpansionLoc(StartLoc);
if (StartLoc.isInvalid())
llvm::report_fatal_error("File exit not handled before popRegions");
StartDepth--;
}
}
Region.setStartLoc(StartLoc);
Region.setEndLoc(EndLoc);
MostRecentLocation = EndLoc;

View File

@ -107,15 +107,19 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (FloatABI == ppc::FloatABI::Soft)
Features.push_back("-hard-float");
ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Args);
ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Triple, Args);
if (ReadGOT == ppc::ReadGOTPtrMode::SecurePlt)
Features.push_back("+secure-plt");
}
ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const ArgList &Args) {
ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args) {
if (Args.getLastArg(options::OPT_msecure_plt))
return ppc::ReadGOTPtrMode::SecurePlt;
return ppc::ReadGOTPtrMode::Bss;
if (Triple.isOSOpenBSD())
return ppc::ReadGOTPtrMode::SecurePlt;
else
return ppc::ReadGOTPtrMode::Bss;
}
ppc::FloatABI ppc::getPPCFloatABI(const Driver &D, const ArgList &Args) {

View File

@ -38,7 +38,7 @@ FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
std::string getPPCTargetCPU(const llvm::opt::ArgList &Args);
const char *getPPCAsmModeForCPU(StringRef Name);
ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D,
ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,

View File

@ -138,7 +138,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pie))
CmdArgs.push_back("-pie");
if (Args.hasArg(options::OPT_nopie))
if (Args.hasArg(options::OPT_nopie) || Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-nopie");
if (Output.isFilename()) {

View File

@ -16353,67 +16353,82 @@ vec_revb(vector unsigned __int128 __a) {
/* vec_xl */
typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
typedef vector float unaligned_vec_float __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
signed char *__ptr) {
return *(vector signed char *)(__ptr + __offset);
return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
vec_xl(signed long long __offset, unsigned char *__ptr) {
return *(vector unsigned char *)(__ptr + __offset);
return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
signed short *__ptr) {
return *(vector signed short *)(__ptr + __offset);
return *(unaligned_vec_sshort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned short
vec_xl(signed long long __offset, unsigned short *__ptr) {
return *(vector unsigned short *)(__ptr + __offset);
return *(unaligned_vec_ushort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
signed int *__ptr) {
return *(vector signed int *)(__ptr + __offset);
return *(unaligned_vec_sint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
unsigned int *__ptr) {
return *(vector unsigned int *)(__ptr + __offset);
return *(unaligned_vec_uint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
float *__ptr) {
return *(vector float *)(__ptr + __offset);
return *(unaligned_vec_float *)(__ptr + __offset);
}
#ifdef __VSX__
typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
typedef vector double unaligned_vec_double __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed long long
vec_xl(signed long long __offset, signed long long *__ptr) {
return *(vector signed long long *)(__ptr + __offset);
return *(unaligned_vec_sll *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned long long
vec_xl(signed long long __offset, unsigned long long *__ptr) {
return *(vector unsigned long long *)(__ptr + __offset);
return *(unaligned_vec_ull *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
double *__ptr) {
return *(vector double *)(__ptr + __offset);
return *(unaligned_vec_double *)(__ptr + __offset);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
typedef vector unsigned __int128 unaligned_vec_ui128
__attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
vec_xl(signed long long __offset, signed __int128 *__ptr) {
return *(vector signed __int128 *)(__ptr + __offset);
return *(unaligned_vec_si128 *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned __int128
vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
return *(vector unsigned __int128 *)(__ptr + __offset);
return *(unaligned_vec_ui128 *)(__ptr + __offset);
}
#endif
@ -16498,62 +16513,62 @@ vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
signed long long __offset,
signed char *__ptr) {
*(vector signed char *)(__ptr + __offset) = __vec;
*(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
signed long long __offset,
unsigned char *__ptr) {
*(vector unsigned char *)(__ptr + __offset) = __vec;
*(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
signed long long __offset,
signed short *__ptr) {
*(vector signed short *)(__ptr + __offset) = __vec;
*(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
signed long long __offset,
unsigned short *__ptr) {
*(vector unsigned short *)(__ptr + __offset) = __vec;
*(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
signed long long __offset,
signed int *__ptr) {
*(vector signed int *)(__ptr + __offset) = __vec;
*(unaligned_vec_sint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
signed long long __offset,
unsigned int *__ptr) {
*(vector unsigned int *)(__ptr + __offset) = __vec;
*(unaligned_vec_uint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec,
signed long long __offset,
float *__ptr) {
*(vector float *)(__ptr + __offset) = __vec;
*(unaligned_vec_float *)(__ptr + __offset) = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
signed long long __offset,
signed long long *__ptr) {
*(vector signed long long *)(__ptr + __offset) = __vec;
*(unaligned_vec_sll *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
signed long long __offset,
unsigned long long *__ptr) {
*(vector unsigned long long *)(__ptr + __offset) = __vec;
*(unaligned_vec_ull *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec,
signed long long __offset,
double *__ptr) {
*(vector double *)(__ptr + __offset) = __vec;
*(unaligned_vec_double *)(__ptr + __offset) = __vec;
}
#endif
@ -16561,13 +16576,13 @@ static inline __ATTRS_o_ai void vec_xst(vector double __vec,
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
signed long long __offset,
signed __int128 *__ptr) {
*(vector signed __int128 *)(__ptr + __offset) = __vec;
*(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
signed long long __offset,
unsigned __int128 *__ptr) {
*(vector unsigned __int128 *)(__ptr + __offset) = __vec;
*(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
}
#endif

View File

@ -6092,7 +6092,10 @@ PerformConstructorInitialization(Sema &S,
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
if (!TSInfo)
TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
SourceRange ParenOrBraceRange = Kind.getParenOrBraceRange();
SourceRange ParenOrBraceRange =
(Kind.getKind() == InitializationKind::IK_DirectList)
? SourceRange(LBraceLoc, RBraceLoc)
: Kind.getParenOrBraceRange();
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {

View File

@ -5022,13 +5022,16 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteFPPragmaOptions(SemaRef.getFPOptions());
WriteOpenCLExtensions(SemaRef);
WriteOpenCLExtensionTypes(SemaRef);
WriteOpenCLExtensionDecls(SemaRef);
WriteCUDAPragmas(SemaRef);
// If we're emitting a module, write out the submodule information.
if (WritingModule)
WriteSubmodules(WritingModule);
// We need to have information about submodules to correctly deserialize
// decls from OpenCLExtensionDecls block
WriteOpenCLExtensionDecls(SemaRef);
Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
// Write the record containing external, unnamed definitions.

View File

@ -475,9 +475,6 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
SingleTy = ResultTy;
if (LSym->getType() != SingleTy)
return None;
// Substracting unsigned integers is a nightmare.
if (!SingleTy->isSignedIntegerOrEnumerationType())
return None;
} else {
// Don't rearrange other operations.
return None;
@ -485,6 +482,10 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
assert(!SingleTy.isNull() && "We should have figured out the type by now!");
// Rearrange signed symbolic expressions only
if (!SingleTy->isSignedIntegerOrEnumerationType())
return None;
SymbolRef RSym = Rhs.getAsSymbol();
if (!RSym || RSym->getType() != SingleTy)
return None;

View File

@ -261,7 +261,11 @@ void DWARFUnit::ExtractDIEsRWLocked() {
}
if (!m_die_array.empty()) {
lldbassert(!m_first_die || m_first_die == m_die_array.front());
if (m_first_die) {
// Only needed for the assertion.
m_first_die.SetEmptyChildren(m_die_array.front().GetEmptyChildren());
lldbassert(m_first_die == m_die_array.front());
}
m_first_die = m_die_array.front();
}

View File

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "346007"
#define SVN_REVISION "348686"

View File

@ -7,4 +7,4 @@
#define LLD_REPOSITORY_STRING "FreeBSD"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION_STRING "346007-1300001"
#define LLD_REVISION_STRING "348686-1300001"

View File

@ -1,2 +1,2 @@
/* $FreeBSD$ */
#define LLVM_REVISION "svn-r346007"
#define LLVM_REVISION "svn-r348686"