Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to

6.0.0 (branches/release_60 r325330).

MFC after:	3 months
X-MFC-With:	r327952
PR:		224669
This commit is contained in:
Dimitry Andric 2018-02-16 20:45:32 +00:00
commit 954b921d66
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=329410
54 changed files with 655 additions and 509 deletions

View File

@ -32,6 +32,7 @@
#include <sys/types.h>
#include <dlfcn.h>
#include <fcntl.h>
#include <limits.h>
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
@ -214,7 +215,7 @@ void AsanCheckIncompatibleRT() {
// the functions in dynamic ASan runtime instead of the functions in
// system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128];
char filename[PATH_MAX];
MemoryMappedSegment segment(filename, sizeof(filename));
while (proc_maps.Next(&segment)) {
if (IsDynamicRTName(segment.filename)) {

View File

@ -238,6 +238,26 @@ def int_amdgcn_cvt_pkrtz : Intrinsic<
[IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cvt_pknorm_i16 : Intrinsic<
[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
[IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cvt_pknorm_u16 : Intrinsic<
[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
[IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cvt_pk_i16 : Intrinsic<
[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cvt_pk_u16 : Intrinsic<
[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_class : Intrinsic<
[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable]

View File

@ -3738,6 +3738,15 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_kxnor_w : // TODO: remove this intrinsic
Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_avx512_kunpck_bw : GCCBuiltin<"__builtin_ia32_kunpckhi">,
Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_avx512_kunpck_wd : GCCBuiltin<"__builtin_ia32_kunpcksi">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_avx512_kunpck_dq : GCCBuiltin<"__builtin_ia32_kunpckdi">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_x86_avx512_kortestz_w : GCCBuiltin<"__builtin_ia32_kortestzhi">,
Intrinsic<[llvm_i32_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;

View File

@ -0,0 +1,38 @@
//===- MCAsmMacro.h - Assembly Macros ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCASMMACRO_H
#define LLVM_MC_MCASMMACRO_H
#include "llvm/MC/MCParser/MCAsmLexer.h"
namespace llvm {
struct MCAsmMacroParameter {
StringRef Name;
std::vector<AsmToken> Value;
bool Required = false;
bool Vararg = false;
MCAsmMacroParameter() = default;
};
typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
struct MCAsmMacro {
StringRef Name;
StringRef Body;
MCAsmMacroParameters Parameters;
public:
MCAsmMacro(StringRef N, StringRef B, MCAsmMacroParameters P)
: Name(N), Body(B), Parameters(std::move(P)) {}
};
}; // namespace llvm
#endif

View File

@ -18,6 +18,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/MC/MCAsmMacro.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/SectionKind.h"
@ -268,6 +269,9 @@ namespace llvm {
unsigned UniqueID,
const MCSymbolELF *Associated);
/// \brief Map of currently defined macros.
StringMap<MCAsmMacro> MacroMap;
public:
explicit MCContext(const MCAsmInfo *MAI, const MCRegisterInfo *MRI,
const MCObjectFileInfo *MOFI,
@ -618,6 +622,17 @@ namespace llvm {
// FIXME: We should really do something about that.
LLVM_ATTRIBUTE_NORETURN void reportFatalError(SMLoc L,
const Twine &Msg);
const MCAsmMacro *lookupMacro(StringRef Name) {
StringMap<MCAsmMacro>::iterator I = MacroMap.find(Name);
return (I == MacroMap.end()) ? nullptr : &I->getValue();
}
void defineMacro(StringRef Name, MCAsmMacro Macro) {
MacroMap.insert(std::make_pair(Name, std::move(Macro)));
}
void undefineMacro(StringRef Name) { MacroMap.erase(Name); }
};
} // end namespace llvm

View File

@ -698,24 +698,20 @@ struct SemiNCAInfo {
return;
// Recalculate the set of roots.
DT.Roots = FindRoots(DT, BUI);
for (const NodePtr R : DT.Roots) {
const TreeNodePtr TN = DT.getNode(R);
// A CFG node was selected as a tree root, but the corresponding tree node
// is not connected to the virtual root. This is because the incremental
// algorithm does not really know or use the set of roots and can make a
// different (implicit) decision about which nodes within an infinite loop
// becomes a root.
if (TN && !DT.isVirtualRoot(TN->getIDom())) {
DEBUG(dbgs() << "Root " << BlockNamePrinter(R)
<< " is not virtual root's child\n"
<< "The entire tree needs to be rebuilt\n");
// It should be possible to rotate the subtree instead of recalculating
// the whole tree, but this situation happens extremely rarely in
// practice.
CalculateFromScratch(DT, BUI);
return;
}
auto Roots = FindRoots(DT, BUI);
if (DT.Roots.size() != Roots.size() ||
!std::is_permutation(DT.Roots.begin(), DT.Roots.end(), Roots.begin())) {
// The roots chosen in the CFG have changed. This is because the
// incremental algorithm does not really know or use the set of roots and
// can make a different (implicit) decision about which node within an
// infinite loop becomes a root.
DEBUG(dbgs() << "Roots are different in updated trees\n"
<< "The entire tree needs to be rebuilt\n");
// It may be possible to update the tree without recalculating it, but
// we do not know yet how to do it, and it happens rarely in practise.
CalculateFromScratch(DT, BUI);
return;
}
}

View File

@ -163,7 +163,8 @@ uint64_t DebugHandlerBase::getBaseTypeSize(const DITypeRef TyRef) {
DIType *BaseType = DDTy->getBaseType().resolve();
assert(BaseType && "Unexpected invalid base type");
if (!BaseType)
return 0;
// If this is a derived type, go ahead and get the base type, unless it's a
// reference then it's just the size of the field. Pointer types have no need

View File

@ -1391,7 +1391,8 @@ void DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) {
if (!Name.empty())
addString(MemberDie, dwarf::DW_AT_name, Name);
addType(MemberDie, resolve(DT->getBaseType()));
if (DIType *Resolved = resolve(DT->getBaseType()))
addType(MemberDie, Resolved);
addSourceLine(MemberDie, DT);

View File

@ -205,14 +205,18 @@ void LivePhysRegs::addPristines(const MachineFunction &MF) {
}
void LivePhysRegs::addLiveOutsNoPristines(const MachineBasicBlock &MBB) {
if (!MBB.succ_empty()) {
// To get the live-outs we simply merge the live-ins of all successors.
for (const MachineBasicBlock *Succ : MBB.successors())
addBlockLiveIns(*Succ);
} else if (MBB.isReturnBlock()) {
// For the return block: Add all callee saved registers that are saved and
// restored (somewhere); This does not include callee saved registers that
// are unused and hence not saved and restored; they are called pristine.
// To get the live-outs we simply merge the live-ins of all successors.
for (const MachineBasicBlock *Succ : MBB.successors())
addBlockLiveIns(*Succ);
if (MBB.isReturnBlock()) {
// Return blocks are a special case because we currently don't mark up
// return instructions completely: specifically, there is no explicit
// use for callee-saved registers. So we add all callee saved registers
// that are saved and restored (somewhere). This does not include
// callee saved registers that are unused and hence not saved and
// restored; they are called pristine.
// FIXME: PEI should add explicit markings to return instructions
// instead of implicitly handling them here.
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (MFI.isCalleeSavedInfoValid()) {
@ -225,15 +229,8 @@ void LivePhysRegs::addLiveOutsNoPristines(const MachineBasicBlock &MBB) {
void LivePhysRegs::addLiveOuts(const MachineBasicBlock &MBB) {
const MachineFunction &MF = *MBB.getParent();
if (!MBB.succ_empty()) {
addPristines(MF);
addLiveOutsNoPristines(MBB);
} else if (MBB.isReturnBlock()) {
// For the return block: Add all callee saved registers.
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (MFI.isCalleeSavedInfoValid())
addCalleeSavedRegs(*this, MF);
}
addPristines(MF);
addLiveOutsNoPristines(MBB);
}
void LivePhysRegs::addLiveIns(const MachineBasicBlock &MBB) {

View File

@ -16409,7 +16409,9 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(0).getOperand(1) == N2 &&
N1.getOperand(0).getOperand(0).getValueType().getVectorNumElements() ==
VT.getVectorNumElements()) {
VT.getVectorNumElements() &&
N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
VT.getSizeInBits()) {
return DAG.getBitcast(VT, N1.getOperand(0).getOperand(0));
}

View File

@ -491,9 +491,8 @@ VNInfo *SplitEditor::defValue(unsigned RegIdx,
return VNI;
}
void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo *ParentVNI) {
assert(ParentVNI && "Mapping NULL value");
ValueForcePair &VFP = Values[std::make_pair(RegIdx, ParentVNI->id)];
void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo &ParentVNI) {
ValueForcePair &VFP = Values[std::make_pair(RegIdx, ParentVNI.id)];
VNInfo *VNI = VFP.getPointer();
// ParentVNI was either unmapped or already complex mapped. Either way, just
@ -777,7 +776,7 @@ SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
// the source live range. The spiller also won't try to hoist this copy.
if (SpillMode && !SlotIndex::isSameInstr(ParentVNI->def, Idx) &&
MI->readsVirtualRegister(Edit->getReg())) {
forceRecompute(0, ParentVNI);
forceRecompute(0, *ParentVNI);
defFromParent(0, ParentVNI, Idx, *MI->getParent(), MI);
return Idx;
}
@ -835,7 +834,7 @@ void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
// The complement interval will be extended as needed by LRCalc.extend().
if (ParentVNI)
forceRecompute(0, ParentVNI);
forceRecompute(0, *ParentVNI);
DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
RegAssign.insert(Start, End, OpenIdx);
DEBUG(dump());
@ -878,7 +877,7 @@ void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
unsigned RegIdx = AssignI.value();
if (AtBegin || !MBBI->readsVirtualRegister(Edit->getReg())) {
DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx << '\n');
forceRecompute(RegIdx, Edit->getParent().getVNInfoAt(Def));
forceRecompute(RegIdx, *Edit->getParent().getVNInfoAt(Def));
} else {
SlotIndex Kill = LIS.getInstructionIndex(*MBBI).getRegSlot();
DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
@ -982,7 +981,7 @@ void SplitEditor::computeRedundantBackCopies(
}
}
if (!DominatedVNIs.empty()) {
forceRecompute(0, ParentVNI);
forceRecompute(0, *ParentVNI);
for (auto VNI : DominatedVNIs) {
BackCopies.push_back(VNI);
}
@ -1102,7 +1101,7 @@ void SplitEditor::hoistCopies() {
NotToHoistSet.count(ParentVNI->id))
continue;
BackCopies.push_back(VNI);
forceRecompute(0, ParentVNI);
forceRecompute(0, *ParentVNI);
}
// If it is not beneficial to hoist all the BackCopies, simply remove
@ -1428,6 +1427,41 @@ void SplitEditor::deleteRematVictims() {
Edit->eliminateDeadDefs(Dead, None, &AA);
}
void SplitEditor::forceRecomputeVNI(const VNInfo &ParentVNI) {
// Fast-path for common case.
if (!ParentVNI.isPHIDef()) {
for (unsigned I = 0, E = Edit->size(); I != E; ++I)
forceRecompute(I, ParentVNI);
return;
}
// Trace value through phis.
SmallPtrSet<const VNInfo *, 8> Visited; ///< whether VNI was/is in worklist.
SmallVector<const VNInfo *, 4> WorkList;
Visited.insert(&ParentVNI);
WorkList.push_back(&ParentVNI);
const LiveInterval &ParentLI = Edit->getParent();
const SlotIndexes &Indexes = *LIS.getSlotIndexes();
do {
const VNInfo &VNI = *WorkList.back();
WorkList.pop_back();
for (unsigned I = 0, E = Edit->size(); I != E; ++I)
forceRecompute(I, VNI);
if (!VNI.isPHIDef())
continue;
MachineBasicBlock &MBB = *Indexes.getMBBFromIndex(VNI.def);
for (const MachineBasicBlock *Pred : MBB.predecessors()) {
SlotIndex PredEnd = Indexes.getMBBEndIdx(Pred);
VNInfo *PredVNI = ParentLI.getVNInfoBefore(PredEnd);
assert(PredVNI && "Value available in PhiVNI predecessor");
if (Visited.insert(PredVNI).second)
WorkList.push_back(PredVNI);
}
} while(!WorkList.empty());
}
void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
++NumFinished;
@ -1444,8 +1478,7 @@ void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
// Force rematted values to be recomputed everywhere.
// The new live ranges may be truncated.
if (Edit->didRematerialize(ParentVNI))
for (unsigned i = 0, e = Edit->size(); i != e; ++i)
forceRecompute(i, ParentVNI);
forceRecomputeVNI(*ParentVNI);
}
// Hoist back-copies to the complement interval when in spill mode.

View File

@ -357,7 +357,11 @@ class LLVM_LIBRARY_VISIBILITY SplitEditor {
/// recomputed by LiveRangeCalc::extend regardless of the number of defs.
/// This is used for values whose live range doesn't match RegAssign exactly.
/// They could have rematerialized, or back-copies may have been moved.
void forceRecompute(unsigned RegIdx, const VNInfo *ParentVNI);
void forceRecompute(unsigned RegIdx, const VNInfo &ParentVNI);
/// Calls forceRecompute() on any affected regidx and on ParentVNI
/// predecessors in case of a phi definition.
void forceRecomputeVNI(const VNInfo &ParentVNI);
/// defFromParent - Define Reg from ParentVNI at UseIdx using either
/// rematerialization or a COPY from parent. Return the new value.

View File

@ -75,7 +75,6 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
Name=="ssse3.pabs.d.128" || // Added in 6.0
Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
Name.startswith("avx512.kunpck") || //added in 6.0
Name.startswith("avx2.pabs.") || // Added in 6.0
Name.startswith("avx512.mask.pabs.") || // Added in 6.0
Name.startswith("avx512.broadcastm") || // Added in 6.0
@ -1063,12 +1062,6 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
CI->getArgOperand(1));
} else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
uint64_t Shift = CI->getType()->getScalarSizeInBits() / 2;
uint64_t And = (1ULL << Shift) - 1;
Value* LowBits = Builder.CreateAnd(CI->getArgOperand(0), And);
Value* HighBits = Builder.CreateShl(CI->getArgOperand(1), Shift);
Rep = Builder.CreateOr(LowBits, HighBits);
} else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) {
Type *I32Ty = Type::getInt32Ty(C);
Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),

View File

@ -83,27 +83,6 @@ namespace {
typedef std::vector<AsmToken> MCAsmMacroArgument;
typedef std::vector<MCAsmMacroArgument> MCAsmMacroArguments;
struct MCAsmMacroParameter {
StringRef Name;
MCAsmMacroArgument Value;
bool Required = false;
bool Vararg = false;
MCAsmMacroParameter() = default;
};
typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
struct MCAsmMacro {
StringRef Name;
StringRef Body;
MCAsmMacroParameters Parameters;
public:
MCAsmMacro(StringRef N, StringRef B, MCAsmMacroParameters P)
: Name(N), Body(B), Parameters(std::move(P)) {}
};
/// \brief Helper class for storing information about an active macro
/// instantiation.
struct MacroInstantiation {
@ -164,9 +143,6 @@ class AsmParser : public MCAsmParser {
/// addDirectiveHandler.
StringMap<ExtensionDirectiveHandler> ExtensionDirectiveMap;
/// \brief Map of currently defined macros.
StringMap<MCAsmMacro> MacroMap;
/// \brief Stack of active macro instantiations.
std::vector<MacroInstantiation*> ActiveMacros;
@ -308,17 +284,6 @@ class AsmParser : public MCAsmParser {
/// \brief Control a flag in the parser that enables or disables macros.
void setMacrosEnabled(bool Flag) {MacrosEnabledFlag = Flag;}
/// \brief Lookup a previously defined macro.
/// \param Name Macro name.
/// \returns Pointer to macro. NULL if no such macro was defined.
const MCAsmMacro* lookupMacro(StringRef Name);
/// \brief Define a new macro with the given name and information.
void defineMacro(StringRef Name, MCAsmMacro Macro);
/// \brief Undefine a macro. If no such macro was defined, it's a no-op.
void undefineMacro(StringRef Name);
/// \brief Are we inside a macro instantiation?
bool isInsideMacroInstantiation() {return !ActiveMacros.empty();}
@ -1841,7 +1806,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
// If macros are enabled, check to see if this is a macro instantiation.
if (areMacrosEnabled())
if (const MCAsmMacro *M = lookupMacro(IDVal)) {
if (const MCAsmMacro *M = getContext().lookupMacro(IDVal)) {
return handleMacroEntry(M, IDLoc);
}
@ -2720,17 +2685,6 @@ bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
return TokError("too many positional arguments");
}
const MCAsmMacro *AsmParser::lookupMacro(StringRef Name) {
StringMap<MCAsmMacro>::iterator I = MacroMap.find(Name);
return (I == MacroMap.end()) ? nullptr : &I->getValue();
}
void AsmParser::defineMacro(StringRef Name, MCAsmMacro Macro) {
MacroMap.insert(std::make_pair(Name, std::move(Macro)));
}
void AsmParser::undefineMacro(StringRef Name) { MacroMap.erase(Name); }
bool AsmParser::handleMacroEntry(const MCAsmMacro *M, SMLoc NameLoc) {
// Arbitrarily limit macro nesting depth (default matches 'as'). We can
// eliminate this, although we should protect against infinite loops.
@ -4249,7 +4203,7 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
eatToEndOfStatement();
}
if (lookupMacro(Name)) {
if (getContext().lookupMacro(Name)) {
return Error(DirectiveLoc, "macro '" + Name + "' is already defined");
}
@ -4257,7 +4211,7 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
const char *BodyEnd = EndToken.getLoc().getPointer();
StringRef Body = StringRef(BodyStart, BodyEnd - BodyStart);
checkForBadMacro(DirectiveLoc, Name, Body, Parameters);
defineMacro(Name, MCAsmMacro(Name, Body, std::move(Parameters)));
getContext().defineMacro(Name, MCAsmMacro(Name, Body, std::move(Parameters)));
return false;
}
@ -4416,10 +4370,10 @@ bool AsmParser::parseDirectivePurgeMacro(SMLoc DirectiveLoc) {
"unexpected token in '.purgem' directive"))
return true;
if (!lookupMacro(Name))
if (!getContext().lookupMacro(Name))
return Error(DirectiveLoc, "macro '" + Name + "' is not defined");
undefineMacro(Name);
getContext().undefineMacro(Name);
return false;
}

View File

@ -3957,6 +3957,10 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(CVT_F32_UBYTE2)
NODE_NAME_CASE(CVT_F32_UBYTE3)
NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
NODE_NAME_CASE(CVT_PKNORM_I16_F32)
NODE_NAME_CASE(CVT_PKNORM_U16_F32)
NODE_NAME_CASE(CVT_PK_I16_I32)
NODE_NAME_CASE(CVT_PK_U16_U32)
NODE_NAME_CASE(FP_TO_FP16)
NODE_NAME_CASE(FP16_ZEXT)
NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)

View File

@ -417,6 +417,10 @@ enum NodeType : unsigned {
// Convert two float 32 numbers into a single register holding two packed f16
// with round to zero.
CVT_PKRTZ_F16_F32,
CVT_PKNORM_I16_F32,
CVT_PKNORM_U16_F32,
CVT_PK_I16_I32,
CVT_PK_U16_U32,
// Same as the standard node, except the high bits of the resulting integer
// are known 0.

View File

@ -108,3 +108,21 @@ int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
return MCOp;
}
// TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence.
bool AMDGPUInstrInfo::isUniformMMO(const MachineMemOperand *MMO) {
const Value *Ptr = MMO->getValue();
// UndefValue means this is a load of a kernel input. These are uniform.
// Sometimes LDS instructions have constant pointers.
// If Ptr is null, then that means this mem operand contains a
// PseudoSourceValue like GOT.
if (!Ptr || isa<UndefValue>(Ptr) ||
isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
return true;
if (const Argument *Arg = dyn_cast<Argument>(Ptr))
return AMDGPU::isArgPassedInSGPR(Arg);
const Instruction *I = dyn_cast<Instruction>(Ptr);
return I && I->getMetadata("amdgpu.uniform");
}

View File

@ -50,6 +50,8 @@ class AMDGPUInstrInfo : public AMDGPUGenInstrInfo {
/// Return -1 if the target-specific opcode for the pseudo instruction does
/// not exist. If Opcode is not a pseudo instruction, this is identity.
int pseudoToMCOpcode(int Opcode) const;
static bool isUniformMMO(const MachineMemOperand *MMO);
};
} // End llvm namespace

View File

@ -35,6 +35,10 @@ def AMDGPUFPPackOp : SDTypeProfile<1, 2,
[SDTCisFP<1>, SDTCisSameAs<1, 2>]
>;
def AMDGPUIntPackOp : SDTypeProfile<1, 2,
[SDTCisInt<1>, SDTCisSameAs<1, 2>]
>;
def AMDGPUDivScaleOp : SDTypeProfile<2, 3,
[SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>]
>;
@ -142,6 +146,10 @@ def AMDGPUrsq_clamp : SDNode<"AMDGPUISD::RSQ_CLAMP", SDTFPUnaryOp>;
def AMDGPUldexp : SDNode<"AMDGPUISD::LDEXP", AMDGPULdExpOp>;
def AMDGPUpkrtz_f16_f32 : SDNode<"AMDGPUISD::CVT_PKRTZ_F16_F32", AMDGPUFPPackOp>;
def AMDGPUpknorm_i16_f32 : SDNode<"AMDGPUISD::CVT_PKNORM_I16_F32", AMDGPUFPPackOp>;
def AMDGPUpknorm_u16_f32 : SDNode<"AMDGPUISD::CVT_PKNORM_U16_F32", AMDGPUFPPackOp>;
def AMDGPUpk_i16_i32 : SDNode<"AMDGPUISD::CVT_PK_I16_I32", AMDGPUIntPackOp>;
def AMDGPUpk_u16_u32 : SDNode<"AMDGPUISD::CVT_PK_U16_U32", AMDGPUIntPackOp>;
def AMDGPUfp_to_f16 : SDNode<"AMDGPUISD::FP_TO_FP16" , SDTFPToIntOp>;
def AMDGPUfp16_zext : SDNode<"AMDGPUISD::FP16_ZEXT" , SDTFPToIntOp>;

View File

@ -120,7 +120,7 @@ static bool isInstrUniform(const MachineInstr &MI) {
return false;
const MachineMemOperand *MMO = *MI.memoperands_begin();
return AMDGPU::isUniformMMO(MMO);
return AMDGPUInstrInfo::isUniformMMO(MMO);
}
const RegisterBankInfo::InstructionMapping &

View File

@ -205,6 +205,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
@ -1085,7 +1086,7 @@ bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
const MemSDNode *MemNode = cast<MemSDNode>(N);
return AMDGPU::isUniformMMO(MemNode->getMemOperand());
return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
}
TargetLoweringBase::LegalizeTypeAction
@ -3517,7 +3518,8 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N,
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
if (IID == Intrinsic::amdgcn_cvt_pkrtz) {
switch (IID) {
case Intrinsic::amdgcn_cvt_pkrtz: {
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
SDLoc SL(N);
@ -3526,6 +3528,29 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
return;
}
case Intrinsic::amdgcn_cvt_pknorm_i16:
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16: {
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
SDLoc SL(N);
unsigned Opcode;
if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
Opcode = AMDGPUISD::CVT_PK_I16_I32;
else
Opcode = AMDGPUISD::CVT_PK_U16_U32;
SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
return;
}
}
break;
}
case ISD::SELECT: {
@ -4424,10 +4449,27 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::amdgcn_ubfe:
return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_cvt_pkrtz: {
// FIXME: Stop adding cast if v2f16 legal.
case Intrinsic::amdgcn_cvt_pkrtz:
case Intrinsic::amdgcn_cvt_pknorm_i16:
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16: {
// FIXME: Stop adding cast if v2f16/v2i16 are legal.
EVT VT = Op.getValueType();
SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32,
unsigned Opcode;
if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
Opcode = AMDGPUISD::CVT_PK_I16_I32;
else
Opcode = AMDGPUISD::CVT_PK_U16_U32;
SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Op.getOperand(1), Op.getOperand(2));
return DAG.getNode(ISD::BITCAST, DL, VT, Node);
}

View File

@ -871,24 +871,6 @@ bool isArgPassedInSGPR(const Argument *A) {
}
}
// TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence.
bool isUniformMMO(const MachineMemOperand *MMO) {
const Value *Ptr = MMO->getValue();
// UndefValue means this is a load of a kernel input. These are uniform.
// Sometimes LDS instructions have constant pointers.
// If Ptr is null, then that means this mem operand contains a
// PseudoSourceValue like GOT.
if (!Ptr || isa<UndefValue>(Ptr) ||
isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
return true;
if (const Argument *Arg = dyn_cast<Argument>(Ptr))
return isArgPassedInSGPR(Arg);
const Instruction *I = dyn_cast<Instruction>(Ptr);
return I && I->getMetadata("amdgpu.uniform");
}
int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
if (isGCN3Encoding(ST))
return ByteOffset;

View File

@ -363,7 +363,6 @@ LLVM_READNONE
bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi);
bool isArgPassedInSGPR(const Argument *Arg);
bool isUniformMMO(const MachineMemOperand *MMO);
/// \returns The encoding that will be used for \p ByteOffset in the SMRD
/// offset field.

View File

@ -407,11 +407,11 @@ defm V_MBCNT_LO_U32_B32 : VOP2Inst <"v_mbcnt_lo_u32_b32", VOP_NO_EXT<VOP_I32_I32
defm V_MBCNT_HI_U32_B32 : VOP2Inst <"v_mbcnt_hi_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>, int_amdgcn_mbcnt_hi>;
defm V_LDEXP_F32 : VOP2Inst <"v_ldexp_f32", VOP_NO_EXT<VOP_F32_F32_I32>, AMDGPUldexp>;
defm V_CVT_PKACCUM_U8_F32 : VOP2Inst <"v_cvt_pkaccum_u8_f32", VOP_NO_EXT<VOP_I32_F32_I32>>; // TODO: set "Uses = dst"
defm V_CVT_PKNORM_I16_F32 : VOP2Inst <"v_cvt_pknorm_i16_f32", VOP_NO_EXT<VOP_I32_F32_F32>>;
defm V_CVT_PKNORM_U16_F32 : VOP2Inst <"v_cvt_pknorm_u16_f32", VOP_NO_EXT<VOP_I32_F32_F32>>;
defm V_CVT_PKNORM_I16_F32 : VOP2Inst <"v_cvt_pknorm_i16_f32", VOP_NO_EXT<VOP_I32_F32_F32>, AMDGPUpknorm_i16_f32>;
defm V_CVT_PKNORM_U16_F32 : VOP2Inst <"v_cvt_pknorm_u16_f32", VOP_NO_EXT<VOP_I32_F32_F32>, AMDGPUpknorm_u16_f32>;
defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <"v_cvt_pkrtz_f16_f32", VOP_NO_EXT<VOP_I32_F32_F32>, AMDGPUpkrtz_f16_f32>;
defm V_CVT_PK_U16_U32 : VOP2Inst <"v_cvt_pk_u16_u32", VOP_NO_EXT<VOP_I32_I32_I32>>;
defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_NO_EXT<VOP_I32_I32_I32>>;
defm V_CVT_PK_U16_U32 : VOP2Inst <"v_cvt_pk_u16_u32", VOP_NO_EXT<VOP_I32_I32_I32>, AMDGPUpk_u16_u32>;
defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_NO_EXT<VOP_I32_I32_I32>, AMDGPUpk_i16_i32>;
} // End SubtargetPredicate = isGCN

View File

@ -396,10 +396,14 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
// rip-relative addressing is actually relative to the *next* instruction.
// Since an immediate can follow the mod/rm byte for an instruction, this
// means that we need to bias the immediate field of the instruction with
// the size of the immediate field. If we have this case, add it into the
// means that we need to bias the displacement field of the instruction with
// the size of the immediate field. If we have this case, add it into the
// expression to emit.
int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
// Note: rip-relative addressing using immediate displacement values should
// not be adjusted, assuming it was the user's intent.
int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
? X86II::getSizeOfImm(TSFlags)
: 0;
EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
CurByte, OS, Fixups, -ImmSize);

View File

@ -370,6 +370,8 @@ static void printIntelMemReference(X86AsmPrinter &P, const MachineInstr *MI,
static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
char Mode, raw_ostream &O) {
unsigned Reg = MO.getReg();
bool EmitPercent = true;
switch (Mode) {
default: return true; // Unknown mode.
case 'b': // Print QImode register
@ -384,6 +386,9 @@ static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
case 'k': // Print SImode register
Reg = getX86SubSuperRegister(Reg, 32);
break;
case 'V':
EmitPercent = false;
LLVM_FALLTHROUGH;
case 'q':
// Print 64-bit register names if 64-bit integer registers are available.
// Otherwise, print 32-bit register names.
@ -391,7 +396,10 @@ static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
break;
}
O << '%' << X86ATTInstPrinter::getRegisterName(Reg);
if (EmitPercent)
O << '%';
O << X86ATTInstPrinter::getRegisterName(Reg);
return false;
}
@ -464,6 +472,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
case 'w': // Print HImode register
case 'k': // Print SImode register
case 'q': // Print DImode register
case 'V': // Print native register without '%'
if (MO.isReg())
return printAsmMRegister(*this, MO, ExtraCode[0], O);
printOperand(*this, MI, OpNo, O);

View File

@ -663,8 +663,10 @@ void X86DomainReassignment::initConverters() {
createReplacer(X86::XOR32rr, X86::KXORDrr);
createReplacer(X86::XOR64rr, X86::KXORQrr);
createReplacer(X86::TEST32rr, X86::KTESTDrr);
createReplacer(X86::TEST64rr, X86::KTESTQrr);
// TODO: KTEST is not a replacement for TEST due to flag differences. Need
// to prove only Z flag is used.
//createReplacer(X86::TEST32rr, X86::KTESTDrr);
//createReplacer(X86::TEST64rr, X86::KTESTQrr);
}
if (STI->hasDQI()) {
@ -684,8 +686,10 @@ void X86DomainReassignment::initConverters() {
createReplacer(X86::SHR8ri, X86::KSHIFTRBri);
createReplacer(X86::SHL8ri, X86::KSHIFTLBri);
createReplacer(X86::TEST8rr, X86::KTESTBrr);
createReplacer(X86::TEST16rr, X86::KTESTWrr);
// TODO: KTEST is not a replacement for TEST due to flag differences. Need
// to prove only Z flag is used.
//createReplacer(X86::TEST8rr, X86::KTESTBrr);
//createReplacer(X86::TEST16rr, X86::KTESTWrr);
createReplacer(X86::XOR8rr, X86::KXORBrr);
}

View File

@ -17017,24 +17017,6 @@ static bool hasNonFlagsUse(SDValue Op) {
return false;
}
// Emit KTEST instruction for bit vectors on AVX-512
static SDValue EmitKTEST(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (Op.getOpcode() == ISD::BITCAST) {
auto hasKTEST = [&](MVT VT) {
unsigned SizeInBits = VT.getSizeInBits();
return (Subtarget.hasDQI() && (SizeInBits == 8 || SizeInBits == 16)) ||
(Subtarget.hasBWI() && (SizeInBits == 32 || SizeInBits == 64));
};
SDValue Op0 = Op.getOperand(0);
MVT Op0VT = Op0.getValueType().getSimpleVT();
if (Op0VT.isVector() && Op0VT.getVectorElementType() == MVT::i1 &&
hasKTEST(Op0VT))
return DAG.getNode(X86ISD::KTEST, SDLoc(Op), Op0VT, Op0, Op0);
}
return SDValue();
}
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
@ -17079,9 +17061,6 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
if (Op.getResNo() != 0 || NeedOF || NeedCF) {
// Emit KTEST for bit vectors
if (auto Node = EmitKTEST(Op, DAG, Subtarget))
return Node;
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
@ -17310,10 +17289,6 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
}
if (Opcode == 0) {
// Emit KTEST for bit vectors
if (auto Node = EmitKTEST(Op, DAG, Subtarget))
return Node;
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
@ -18093,6 +18068,34 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
return Result;
}
// Try to select this as a KTEST+SETCC if possible.
static SDValue EmitKTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Only support equality comparisons.
if (CC != ISD::SETEQ && CC != ISD::SETNE)
return SDValue();
// Must be a bitcast from vXi1.
if (Op0.getOpcode() != ISD::BITCAST)
return SDValue();
Op0 = Op0.getOperand(0);
MVT VT = Op0.getSimpleValueType();
if (!(Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1)) &&
!(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
return SDValue();
X86::CondCode X86CC;
if (isNullConstant(Op1)) {
X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
} else
return SDValue();
SDValue KTEST = DAG.getNode(X86ISD::KTEST, dl, MVT::i32, Op0, Op0);
return getSETCC(X86CC, KTEST, dl, DAG);
}
SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
@ -18115,6 +18118,10 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
return NewSetCC;
}
// Try to lower using KTEST.
if (SDValue NewSetCC = EmitKTEST(Op0, Op1, CC, dl, DAG, Subtarget))
return NewSetCC;
// Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
// these.
if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
@ -20525,6 +20532,18 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Mask = DAG.getBitcast(MaskVT, Mask);
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Mask);
}
case KUNPCK: {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2);
SDValue Src1 = getMaskNode(Op.getOperand(1), MaskVT, Subtarget, DAG, dl);
SDValue Src2 = getMaskNode(Op.getOperand(2), MaskVT, Subtarget, DAG, dl);
// Arguments should be swapped.
SDValue Res = DAG.getNode(IntrData->Opc0, dl,
MVT::getVectorVT(MVT::i1, VT.getSizeInBits()),
Src2, Src1);
return DAG.getBitcast(VT, Res);
}
case MASK_BINOP: {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits());
@ -27094,28 +27113,57 @@ static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
unsigned Reg) {
if (Subtarget.useRetpolineExternalThunk()) {
// When using an external thunk for retpolines, we pick names that match the
// names GCC happens to use as well. This helps simplify the implementation
// of the thunks for kernels where they have no easy ability to create
// aliases and are doing non-trivial configuration of the thunk's body. For
// example, the Linux kernel will do boot-time hot patching of the thunk
// bodies and cannot easily export aliases of these to loaded modules.
//
// Note that at any point in the future, we may need to change the semantics
// of how we implement retpolines and at that time will likely change the
// name of the called thunk. Essentially, there is no hard guarantee that
// LLVM will generate calls to specific thunks, we merely make a best-effort
// attempt to help out kernels and other systems where duplicating the
// thunks is costly.
switch (Reg) {
case X86::EAX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_eax";
case X86::ECX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_ecx";
case X86::EDX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_edx";
case X86::EDI:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_edi";
case X86::R11:
assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
return "__x86_indirect_thunk_r11";
}
llvm_unreachable("unexpected reg for retpoline");
}
// When targeting an internal COMDAT thunk use an LLVM-specific name.
switch (Reg) {
case 0:
assert(!Subtarget.is64Bit() && "R11 should always be available on x64");
return Subtarget.useRetpolineExternalThunk()
? "__llvm_external_retpoline_push"
: "__llvm_retpoline_push";
case X86::EAX:
return Subtarget.useRetpolineExternalThunk()
? "__llvm_external_retpoline_eax"
: "__llvm_retpoline_eax";
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_eax";
case X86::ECX:
return Subtarget.useRetpolineExternalThunk()
? "__llvm_external_retpoline_ecx"
: "__llvm_retpoline_ecx";
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_ecx";
case X86::EDX:
return Subtarget.useRetpolineExternalThunk()
? "__llvm_external_retpoline_edx"
: "__llvm_retpoline_edx";
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_edx";
case X86::EDI:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_edi";
case X86::R11:
return Subtarget.useRetpolineExternalThunk()
? "__llvm_external_retpoline_r11"
: "__llvm_retpoline_r11";
assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
return "__llvm_retpoline_r11";
}
llvm_unreachable("unexpected reg for retpoline");
}
@ -27134,15 +27182,13 @@ X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
// just use R11, but we scan for uses anyway to ensure we don't generate
// incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
// already a register use operand to the call to hold the callee. If none
// are available, push the callee instead. This is less efficient, but is
// necessary for functions using 3 regparms. Such function calls are
// (currently) not eligible for tail call optimization, because there is no
// scratch register available to hold the address of the callee.
// are available, use EDI instead. EDI is chosen because EBX is the PIC base
// register and ESI is the base pointer to realigned stack frames with VLAs.
SmallVector<unsigned, 3> AvailableRegs;
if (Subtarget.is64Bit())
AvailableRegs.push_back(X86::R11);
else
AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX});
AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
// Zero out any registers that are already used.
for (const auto &MO : MI.operands()) {
@ -27160,30 +27206,18 @@ X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
break;
}
}
if (!AvailableReg)
report_fatal_error("calling convention incompatible with retpoline, no "
"available registers");
const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
if (AvailableReg == 0) {
// No register available. Use PUSH. This must not be a tailcall, and this
// must not be x64.
if (Subtarget.is64Bit())
report_fatal_error(
"Cannot make an indirect call on x86-64 using both retpoline and a "
"calling convention that preservers r11");
if (Opc != X86::CALLpcrel32)
report_fatal_error("Cannot make an indirect tail call on x86 using "
"retpoline without a preserved register");
BuildMI(*BB, MI, DL, TII->get(X86::PUSH32r)).addReg(CalleeVReg);
MI.getOperand(0).ChangeToES(Symbol);
MI.setDesc(TII->get(Opc));
} else {
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
.addReg(CalleeVReg);
MI.getOperand(0).ChangeToES(Symbol);
MI.setDesc(TII->get(Opc));
MachineInstrBuilder(*BB->getParent(), &MI)
.addReg(AvailableReg, RegState::Implicit | RegState::Kill);
}
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
.addReg(CalleeVReg);
MI.getOperand(0).ChangeToES(Symbol);
MI.setDesc(TII->get(Opc));
MachineInstrBuilder(*BB->getParent(), &MI)
.addReg(AvailableReg, RegState::Implicit | RegState::Kill);
return BB;
}
@ -30432,53 +30466,6 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
SDValue N0 = BitCast.getOperand(0);
EVT VecVT = N0->getValueType(0);
if (VT.isVector() && VecVT.isScalarInteger() && Subtarget.hasAVX512() &&
N0->getOpcode() == ISD::OR) {
SDValue Op0 = N0->getOperand(0);
SDValue Op1 = N0->getOperand(1);
MVT TrunckVT;
MVT BitcastVT;
switch (VT.getSimpleVT().SimpleTy) {
default:
return SDValue();
case MVT::v16i1:
TrunckVT = MVT::i8;
BitcastVT = MVT::v8i1;
break;
case MVT::v32i1:
TrunckVT = MVT::i16;
BitcastVT = MVT::v16i1;
break;
case MVT::v64i1:
TrunckVT = MVT::i32;
BitcastVT = MVT::v32i1;
break;
}
bool isArg0UndefRight = Op0->getOpcode() == ISD::SHL;
bool isArg0UndefLeft =
Op0->getOpcode() == ISD::ZERO_EXTEND || Op0->getOpcode() == ISD::AND;
bool isArg1UndefRight = Op1->getOpcode() == ISD::SHL;
bool isArg1UndefLeft =
Op1->getOpcode() == ISD::ZERO_EXTEND || Op1->getOpcode() == ISD::AND;
SDValue OpLeft;
SDValue OpRight;
if (isArg0UndefRight && isArg1UndefLeft) {
OpLeft = Op0;
OpRight = Op1;
} else if (isArg1UndefRight && isArg0UndefLeft) {
OpLeft = Op1;
OpRight = Op0;
} else
return SDValue();
SDLoc DL(BitCast);
SDValue Shr = OpLeft->getOperand(0);
SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, TrunckVT, Shr);
SDValue Bitcast1 = DAG.getBitcast(BitcastVT, Trunc1);
SDValue Trunc2 = DAG.getNode(ISD::TRUNCATE, DL, TrunckVT, OpRight);
SDValue Bitcast2 = DAG.getBitcast(BitcastVT, Trunc2);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Bitcast1, Bitcast2);
}
if (!VT.isScalarInteger() || !VecVT.isSimple())
return SDValue();

View File

@ -36,7 +36,7 @@ enum IntrinsicType : uint16_t {
COMPRESS_EXPAND_IN_REG, COMPRESS_TO_MEM,
TRUNCATE_TO_MEM_VI8, TRUNCATE_TO_MEM_VI16, TRUNCATE_TO_MEM_VI32,
EXPAND_FROM_MEM,
TERLOG_OP_MASK, TERLOG_OP_MASKZ, BROADCASTM, FIXUPIMM, FIXUPIMM_MASKZ, FIXUPIMMS,
TERLOG_OP_MASK, TERLOG_OP_MASKZ, BROADCASTM, KUNPCK, FIXUPIMM, FIXUPIMM_MASKZ, FIXUPIMMS,
FIXUPIMMS_MASKZ, CONVERT_TO_MASK, GATHER_AVX2, MASK_BINOP,
ROUNDP, ROUNDS
};
@ -479,6 +479,9 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_exp2_ps, INTR_TYPE_1OP_MASK_RM, X86ISD::EXP2, 0),
X86_INTRINSIC_DATA(avx512_kand_w, MASK_BINOP, ISD::AND, 0),
X86_INTRINSIC_DATA(avx512_kor_w, MASK_BINOP, ISD::OR, 0),
X86_INTRINSIC_DATA(avx512_kunpck_bw, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kunpck_dq, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kunpck_wd, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kxor_w, MASK_BINOP, ISD::XOR, 0),
X86_INTRINSIC_DATA(avx512_mask_add_pd_512, INTR_TYPE_2OP_MASK, ISD::FADD,
X86ISD::FADD_RND),

View File

@ -43,7 +43,7 @@ static const char R11ThunkName[] = "__llvm_retpoline_r11";
static const char EAXThunkName[] = "__llvm_retpoline_eax";
static const char ECXThunkName[] = "__llvm_retpoline_ecx";
static const char EDXThunkName[] = "__llvm_retpoline_edx";
static const char PushThunkName[] = "__llvm_retpoline_push";
static const char EDIThunkName[] = "__llvm_retpoline_edi";
namespace {
class X86RetpolineThunks : public MachineFunctionPass {
@ -74,7 +74,6 @@ class X86RetpolineThunks : public MachineFunctionPass {
void createThunkFunction(Module &M, StringRef Name);
void insertRegReturnAddrClobber(MachineBasicBlock &MBB, unsigned Reg);
void insert32BitPushReturnAddrClobber(MachineBasicBlock &MBB);
void populateThunk(MachineFunction &MF, Optional<unsigned> Reg = None);
};
@ -127,7 +126,7 @@ bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
createThunkFunction(M, R11ThunkName);
else
for (StringRef Name :
{EAXThunkName, ECXThunkName, EDXThunkName, PushThunkName})
{EAXThunkName, ECXThunkName, EDXThunkName, EDIThunkName})
createThunkFunction(M, Name);
InsertedThunks = true;
return true;
@ -151,9 +150,8 @@ bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
populateThunk(MF, X86::R11);
} else {
// For 32-bit targets we need to emit a collection of thunks for various
// possible scratch registers as well as a fallback that is used when
// there are no scratch registers and assumes the retpoline target has
// been pushed.
// possible scratch registers as well as a fallback that uses EDI, which is
// normally callee saved.
// __llvm_retpoline_eax:
// calll .Leax_call_target
// .Leax_capture_spec:
@ -174,32 +172,18 @@ bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
// movl %edx, (%esp)
// retl
//
// This last one is a bit more special and so needs a little extra
// handling.
// __llvm_retpoline_push:
// calll .Lpush_call_target
// .Lpush_capture_spec:
// pause
// lfence
// jmp .Lpush_capture_spec
// .align 16
// .Lpush_call_target:
// # Clear pause_loop return address.
// addl $4, %esp
// # Top of stack words are: Callee, RA. Exchange Callee and RA.
// pushl 4(%esp) # Push callee
// pushl 4(%esp) # Push RA
// popl 8(%esp) # Pop RA to final RA
// popl (%esp) # Pop callee to next top of stack
// retl # Ret to callee
// __llvm_retpoline_edi:
// ... # Same setup
// movl %edi, (%esp)
// retl
if (MF.getName() == EAXThunkName)
populateThunk(MF, X86::EAX);
else if (MF.getName() == ECXThunkName)
populateThunk(MF, X86::ECX);
else if (MF.getName() == EDXThunkName)
populateThunk(MF, X86::EDX);
else if (MF.getName() == PushThunkName)
populateThunk(MF);
else if (MF.getName() == EDIThunkName)
populateThunk(MF, X86::EDI);
else
llvm_unreachable("Invalid thunk name on x86-32!");
}
@ -240,31 +224,6 @@ void X86RetpolineThunks::insertRegReturnAddrClobber(MachineBasicBlock &MBB,
.addReg(Reg);
}
void X86RetpolineThunks::insert32BitPushReturnAddrClobber(
MachineBasicBlock &MBB) {
// The instruction sequence we use to replace the return address without
// a scratch register is somewhat complicated:
// # Clear capture_spec from return address.
// addl $4, %esp
// # Top of stack words are: Callee, RA. Exchange Callee and RA.
// pushl 4(%esp) # Push callee
// pushl 4(%esp) # Push RA
// popl 8(%esp) # Pop RA to final RA
// popl (%esp) # Pop callee to next top of stack
// retl # Ret to callee
BuildMI(&MBB, DebugLoc(), TII->get(X86::ADD32ri), X86::ESP)
.addReg(X86::ESP)
.addImm(4);
addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::PUSH32rmm)), X86::ESP,
false, 4);
addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::PUSH32rmm)), X86::ESP,
false, 4);
addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::POP32rmm)), X86::ESP,
false, 8);
addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::POP32rmm)), X86::ESP,
false, 0);
}
void X86RetpolineThunks::populateThunk(MachineFunction &MF,
Optional<unsigned> Reg) {
// Set MF properties. We never use vregs...
@ -301,11 +260,6 @@ void X86RetpolineThunks::populateThunk(MachineFunction &MF,
CaptureSpec->addSuccessor(CaptureSpec);
CallTarget->setAlignment(4);
if (Reg) {
insertRegReturnAddrClobber(*CallTarget, *Reg);
} else {
assert(!Is64Bit && "We only support non-reg thunks on 32-bit x86!");
insert32BitPushReturnAddrClobber(*CallTarget);
}
insertRegReturnAddrClobber(*CallTarget, *Reg);
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
}

View File

@ -3264,6 +3264,18 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}
case Intrinsic::amdgcn_cvt_pknorm_i16:
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16: {
Value *Src0 = II->getArgOperand(0);
Value *Src1 = II->getArgOperand(1);
if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
break;
}
case Intrinsic::amdgcn_ubfe:
case Intrinsic::amdgcn_sbfe: {
// Decompose simple cases into standard shifts.

View File

@ -836,6 +836,10 @@ class LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) Decl {
void setLexicalDeclContext(DeclContext *DC);
/// Determine whether this declaration is a templated entity (whether it is
// within the scope of a template parameter).
bool isTemplated() const;
/// isDefinedOutsideFunctionOrMethod - This predicate returns true if this
/// scoped decl is defined outside the current function or method. This is
/// roughly global variables and functions, but also handles enums (which

View File

@ -2145,7 +2145,7 @@ static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
if (FieldSize != UnionSize)
return false;
}
return true;
return !RD->field_empty();
}
static bool isStructEmpty(QualType Ty) {

View File

@ -236,10 +236,23 @@ TemplateDecl *Decl::getDescribedTemplate() const {
return RD->getDescribedClassTemplate();
else if (auto *VD = dyn_cast<VarDecl>(this))
return VD->getDescribedVarTemplate();
else if (auto *AD = dyn_cast<TypeAliasDecl>(this))
return AD->getDescribedAliasTemplate();
return nullptr;
}
bool Decl::isTemplated() const {
// A declaration is dependent if it is a template or a template pattern, or
// is within (lexcially for a friend, semantically otherwise) a dependent
// context.
// FIXME: Should local extern declarations be treated like friends?
if (auto *AsDC = dyn_cast<DeclContext>(this))
return AsDC->isDependentContext();
auto *DC = getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
return DC->isDependentContext() || isTemplateDecl() || getDescribedTemplate();
}
const DeclContext *Decl::getParentFunctionOrMethod() const {
for (const DeclContext *DC = getDeclContext();
DC && !DC->isTranslationUnit() && !DC->isNamespace();

View File

@ -950,11 +950,10 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
}
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
const DeclContext *DC = getEffectiveDeclContext(ND);
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
unsigned Disc;
@ -2140,6 +2139,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
case CC_X86StdCall: Out << 'G'; break;
case CC_X86FastCall: Out << 'I'; break;
case CC_X86VectorCall: Out << 'Q'; break;
case CC_Swift: Out << 'S'; break;
case CC_X86RegCall: Out << 'w'; break;
}
}

View File

@ -4000,18 +4000,13 @@ void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Ignore dependent declarations.
if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
if (D->isTemplated())
return;
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function:
// Skip function templates
if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
EmitGlobal(cast<FunctionDecl>(D));
// Always provide some coverage mapping
// even for the functions that aren't emitted.
@ -4024,10 +4019,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Var:
case Decl::Decomposition:
// Skip variable templates
if (cast<VarDecl>(D)->getDescribedVarTemplate())
return;
LLVM_FALLTHROUGH;
case Decl::VarTemplateSpecialization:
EmitGlobal(cast<VarDecl>(D));
if (auto *DD = dyn_cast<DecompositionDecl>(D))
@ -4086,16 +4077,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
return;
case Decl::CXXConstructor:
// Skip function templates
if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
case Decl::CXXDestructor:
if (cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
break;

View File

@ -2761,6 +2761,11 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
// N.B. We must always emit the RTTI data ourselves if there exists a key
// function.
bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
// Don't import the RTTI but emit it locally.
if (CGM.getTriple().isWindowsGNUEnvironment() && IsDLLImport)
return false;
if (CGM.getVTables().isVTableExternal(RD))
return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
? false

View File

@ -1723,15 +1723,18 @@ void TokenAnnotator::setCommentLineLevels(
}
}
if (NextNonCommentLine && CommentLine) {
// If the comment is currently aligned with the line immediately following
// it, that's probably intentional and we should keep it.
bool AlignedWithNextLine =
NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
(*I)->First->OriginalColumn;
if (AlignedWithNextLine)
(*I)->Level = NextNonCommentLine->Level;
// If the comment is currently aligned with the line immediately following
// it, that's probably intentional and we should keep it.
if (NextNonCommentLine && CommentLine &&
NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
(*I)->First->OriginalColumn) {
// Align comments for preprocessor lines with the # in column 0.
// Otherwise, align with the next line.
(*I)->Level = (NextNonCommentLine->Type == LT_PreprocessorDirective ||
NextNonCommentLine->Type == LT_ImportStatement)
? 0
: NextNonCommentLine->Level;
} else {
NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr;
}

View File

@ -234,14 +234,17 @@ UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
CurrentLines(&Lines), Style(Style), Keywords(Keywords),
CommentPragmasRegex(Style.CommentPragmas), Tokens(nullptr),
Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1),
IfNdefCondition(nullptr), FoundIncludeGuardStart(false),
IncludeGuardRejected(false), FirstStartColumn(FirstStartColumn) {}
IncludeGuard(Style.IndentPPDirectives == FormatStyle::PPDIS_None
? IG_Rejected
: IG_Inited),
IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn) {}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
IfNdefCondition = nullptr;
FoundIncludeGuardStart = false;
IncludeGuardRejected = false;
IncludeGuard = Style.IndentPPDirectives == FormatStyle::PPDIS_None
? IG_Rejected
: IG_Inited;
IncludeGuardToken = nullptr;
Line.reset(new UnwrappedLine);
CommentsBeforeNextToken.clear();
FormatTok = nullptr;
@ -264,6 +267,14 @@ void UnwrappedLineParser::parse() {
readToken();
parseFile();
// If we found an include guard then all preprocessor directives (other than
// the guard) are over-indented by one.
if (IncludeGuard == IG_Found)
for (auto &Line : Lines)
if (Line.InPPDirective && Line.Level > 0)
--Line.Level;
// Create line with eof token.
pushToken(FormatTok);
addUnwrappedLine();
@ -712,26 +723,27 @@ void UnwrappedLineParser::parsePPIf(bool IfDef) {
// If there's a #ifndef on the first line, and the only lines before it are
// comments, it could be an include guard.
bool MaybeIncludeGuard = IfNDef;
if (!IncludeGuardRejected && !FoundIncludeGuardStart && MaybeIncludeGuard) {
if (IncludeGuard == IG_Inited && MaybeIncludeGuard)
for (auto &Line : Lines) {
if (!Line.Tokens.front().Tok->is(tok::comment)) {
MaybeIncludeGuard = false;
IncludeGuardRejected = true;
IncludeGuard = IG_Rejected;
break;
}
}
}
--PPBranchLevel;
parsePPUnknown();
++PPBranchLevel;
if (!IncludeGuardRejected && !FoundIncludeGuardStart && MaybeIncludeGuard)
IfNdefCondition = IfCondition;
if (IncludeGuard == IG_Inited && MaybeIncludeGuard) {
IncludeGuard = IG_IfNdefed;
IncludeGuardToken = IfCondition;
}
}
void UnwrappedLineParser::parsePPElse() {
// If a potential include guard has an #else, it's not an include guard.
if (FoundIncludeGuardStart && PPBranchLevel == 0)
FoundIncludeGuardStart = false;
if (IncludeGuard == IG_Defined && PPBranchLevel == 0)
IncludeGuard = IG_Rejected;
conditionalCompilationAlternative();
if (PPBranchLevel > -1)
--PPBranchLevel;
@ -745,34 +757,37 @@ void UnwrappedLineParser::parsePPEndIf() {
conditionalCompilationEnd();
parsePPUnknown();
// If the #endif of a potential include guard is the last thing in the file,
// then we count it as a real include guard and subtract one from every
// preprocessor indent.
// then we found an include guard.
unsigned TokenPosition = Tokens->getPosition();
FormatToken *PeekNext = AllTokens[TokenPosition];
if (FoundIncludeGuardStart && PPBranchLevel == -1 && PeekNext->is(tok::eof) &&
if (IncludeGuard == IG_Defined && PPBranchLevel == -1 &&
PeekNext->is(tok::eof) &&
Style.IndentPPDirectives != FormatStyle::PPDIS_None)
for (auto &Line : Lines)
if (Line.InPPDirective && Line.Level > 0)
--Line.Level;
IncludeGuard = IG_Found;
}
void UnwrappedLineParser::parsePPDefine() {
nextToken();
if (FormatTok->Tok.getKind() != tok::identifier) {
IncludeGuard = IG_Rejected;
IncludeGuardToken = nullptr;
parsePPUnknown();
return;
}
if (IfNdefCondition && IfNdefCondition->TokenText == FormatTok->TokenText) {
FoundIncludeGuardStart = true;
if (IncludeGuard == IG_IfNdefed &&
IncludeGuardToken->TokenText == FormatTok->TokenText) {
IncludeGuard = IG_Defined;
IncludeGuardToken = nullptr;
for (auto &Line : Lines) {
if (!Line.Tokens.front().Tok->isOneOf(tok::comment, tok::hash)) {
FoundIncludeGuardStart = false;
IncludeGuard = IG_Rejected;
break;
}
}
}
IfNdefCondition = nullptr;
nextToken();
if (FormatTok->Tok.getKind() == tok::l_paren &&
FormatTok->WhitespaceRange.getBegin() ==
@ -799,7 +814,6 @@ void UnwrappedLineParser::parsePPUnknown() {
if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash)
Line->Level += PPBranchLevel + 1;
addUnwrappedLine();
IfNdefCondition = nullptr;
}
// Here we blacklist certain tokens that are not usually the first token in an

View File

@ -248,10 +248,23 @@ class UnwrappedLineParser {
// sequence.
std::stack<int> PPChainBranchIndex;
// Contains the #ifndef condition for a potential include guard.
FormatToken *IfNdefCondition;
bool FoundIncludeGuardStart;
bool IncludeGuardRejected;
// Include guard search state. Used to fixup preprocessor indent levels
// so that include guards do not participate in indentation.
enum IncludeGuardState {
IG_Inited, // Search started, looking for #ifndef.
IG_IfNdefed, // #ifndef found, IncludeGuardToken points to condition.
IG_Defined, // Matching #define found, checking other requirements.
IG_Found, // All requirements met, need to fix indents.
IG_Rejected, // Search failed or never started.
};
// Current state of include guard search.
IncludeGuardState IncludeGuard;
// Points to the #ifndef condition for a potential include guard. Null unless
// IncludeGuardState == IG_IfNdefed.
FormatToken *IncludeGuardToken;
// Contains the first start column where the source begins. This is zero for
// normal source code and may be nonzero when formatting a code fragment that
// does not start at the beginning of the file.

View File

@ -1854,13 +1854,15 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
{
return (__mmask64) (( __A & 0xFFFFFFFF) | ( __B << 32));
return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
(__mmask64) __B);
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
{
return (__mmask32) (( __A & 0xFFFF) | ( __B << 16));
return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
(__mmask32) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS

View File

@ -8787,7 +8787,7 @@ _mm512_kortestz (__mmask16 __A, __mmask16 __B)
static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) (( __A & 0xFF) | ( __B << 8));
return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS

View File

@ -738,15 +738,17 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
s++;
radix = 10;
saw_exponent = true;
if (*s == '+' || *s == '-') s++; // sign
if (s != ThisTokEnd && (*s == '+' || *s == '-')) s++; // sign
const char *first_non_digit = SkipDigits(s);
if (containsDigits(s, first_non_digit)) {
checkSeparator(TokLoc, s, CSK_BeforeDigits);
s = first_non_digit;
} else {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
diag::err_exponent_has_no_digits);
hadError = true;
if (!hadError) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
diag::err_exponent_has_no_digits);
hadError = true;
}
return;
}
}
@ -787,10 +789,12 @@ void NumericLiteralParser::checkSeparator(SourceLocation TokLoc,
} else if (Pos == ThisTokEnd)
return;
if (isDigitSeparator(*Pos))
if (isDigitSeparator(*Pos)) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin),
diag::err_digit_separator_not_between_digits)
<< IsAfterDigits;
hadError = true;
}
}
/// ParseNumberStartingWithZero - This method is called when the first character
@ -840,12 +844,14 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
const char *Exponent = s;
s++;
saw_exponent = true;
if (*s == '+' || *s == '-') s++; // sign
if (s != ThisTokEnd && (*s == '+' || *s == '-')) s++; // sign
const char *first_non_digit = SkipDigits(s);
if (!containsDigits(s, first_non_digit)) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
diag::err_exponent_has_no_digits);
hadError = true;
if (!hadError) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
diag::err_exponent_has_no_digits);
hadError = true;
}
return;
}
checkSeparator(TokLoc, s, CSK_BeforeDigits);

View File

@ -8972,6 +8972,16 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
LHS = LHS->IgnoreParenImpCasts();
RHS = RHS->IgnoreParenImpCasts();
if (!S.getLangOpts().CPlusPlus) {
// Avoid warning about comparison of integers with different signs when
// RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
// the type of `E`.
if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
}
// Check to see if one of the (unmodified) operands is of different
// signedness.
Expr *signedOperand, *unsignedOperand;

View File

@ -352,6 +352,7 @@ class InitListChecker {
bool FillWithNoInit = false);
void FillInEmptyInitializations(const InitializedEntity &Entity,
InitListExpr *ILE, bool &RequiresSecondPass,
InitListExpr *OuterILE, unsigned OuterIndex,
bool FillWithNoInit = false);
bool CheckFlexibleArrayInit(const InitializedEntity &Entity,
Expr *InitExpr, FieldDecl *Field,
@ -517,12 +518,13 @@ void InitListChecker::FillInEmptyInitForBase(
ILE->setInit(Init, BaseInit.getAs<Expr>());
} else if (InitListExpr *InnerILE =
dyn_cast<InitListExpr>(ILE->getInit(Init))) {
FillInEmptyInitializations(BaseEntity, InnerILE,
RequiresSecondPass, FillWithNoInit);
FillInEmptyInitializations(BaseEntity, InnerILE, RequiresSecondPass,
ILE, Init, FillWithNoInit);
} else if (DesignatedInitUpdateExpr *InnerDIUE =
dyn_cast<DesignatedInitUpdateExpr>(ILE->getInit(Init))) {
FillInEmptyInitializations(BaseEntity, InnerDIUE->getUpdater(),
RequiresSecondPass, /*FillWithNoInit =*/true);
RequiresSecondPass, ILE, Init,
/*FillWithNoInit =*/true);
}
}
@ -605,24 +607,43 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
} else if (InitListExpr *InnerILE
= dyn_cast<InitListExpr>(ILE->getInit(Init)))
FillInEmptyInitializations(MemberEntity, InnerILE,
RequiresSecondPass, FillWithNoInit);
RequiresSecondPass, ILE, Init, FillWithNoInit);
else if (DesignatedInitUpdateExpr *InnerDIUE
= dyn_cast<DesignatedInitUpdateExpr>(ILE->getInit(Init)))
FillInEmptyInitializations(MemberEntity, InnerDIUE->getUpdater(),
RequiresSecondPass, /*FillWithNoInit =*/ true);
RequiresSecondPass, ILE, Init,
/*FillWithNoInit =*/true);
}
/// Recursively replaces NULL values within the given initializer list
/// with expressions that perform value-initialization of the
/// appropriate type.
/// appropriate type, and finish off the InitListExpr formation.
void
InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
InitListExpr *ILE,
bool &RequiresSecondPass,
InitListExpr *OuterILE,
unsigned OuterIndex,
bool FillWithNoInit) {
assert((ILE->getType() != SemaRef.Context.VoidTy) &&
"Should not have void type");
// If this is a nested initializer list, we might have changed its contents
// (and therefore some of its properties, such as instantiation-dependence)
// while filling it in. Inform the outer initializer list so that its state
// can be updated to match.
// FIXME: We should fully build the inner initializers before constructing
// the outer InitListExpr instead of mutating AST nodes after they have
// been used as subexpressions of other nodes.
struct UpdateOuterILEWithUpdatedInit {
InitListExpr *Outer;
unsigned OuterIndex;
~UpdateOuterILEWithUpdatedInit() {
if (Outer)
Outer->setInit(OuterIndex, Outer->getInit(OuterIndex));
}
} UpdateOuterRAII = {OuterILE, OuterIndex};
// A transparent ILE is not performing aggregate initialization and should
// not be filled in.
if (ILE->isTransparent())
@ -769,11 +790,12 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
} else if (InitListExpr *InnerILE
= dyn_cast_or_null<InitListExpr>(InitExpr))
FillInEmptyInitializations(ElementEntity, InnerILE, RequiresSecondPass,
FillWithNoInit);
ILE, Init, FillWithNoInit);
else if (DesignatedInitUpdateExpr *InnerDIUE
= dyn_cast_or_null<DesignatedInitUpdateExpr>(InitExpr))
FillInEmptyInitializations(ElementEntity, InnerDIUE->getUpdater(),
RequiresSecondPass, /*FillWithNoInit =*/ true);
RequiresSecondPass, ILE, Init,
/*FillWithNoInit =*/true);
}
}
@ -795,10 +817,11 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
if (!hadError && !VerifyOnly) {
bool RequiresSecondPass = false;
FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass);
FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass,
/*OuterILE=*/nullptr, /*OuterIndex=*/0);
if (RequiresSecondPass && !hadError)
FillInEmptyInitializations(Entity, FullyStructuredList,
RequiresSecondPass);
RequiresSecondPass, nullptr, 0);
}
}
@ -1162,10 +1185,12 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
if (!hadError && !VerifyOnly) {
bool RequiresSecondPass = false;
FillInEmptyInitializations(Entity, InnerStructuredList,
RequiresSecondPass);
RequiresSecondPass, StructuredList,
StructuredIndex);
if (RequiresSecondPass && !hadError)
FillInEmptyInitializations(Entity, InnerStructuredList,
RequiresSecondPass);
RequiresSecondPass, StructuredList,
StructuredIndex);
}
++StructuredIndex;
++Index;

View File

@ -96,10 +96,11 @@ class PDBLinker {
/// If the object does not use a type server PDB (compiled with /Z7), we merge
/// all the type and item records from the .debug$S stream and fill in the
/// caller-provided ObjectIndexMap.
const CVIndexMap &mergeDebugT(ObjFile *File, CVIndexMap &ObjectIndexMap);
Expected<const CVIndexMap&> mergeDebugT(ObjFile *File,
CVIndexMap &ObjectIndexMap);
const CVIndexMap &maybeMergeTypeServerPDB(ObjFile *File,
TypeServer2Record &TS);
Expected<const CVIndexMap&> maybeMergeTypeServerPDB(ObjFile *File,
TypeServer2Record &TS);
/// Add the section map and section contributions to the PDB.
void addSections(ArrayRef<OutputSection *> OutputSections,
@ -140,6 +141,10 @@ class PDBLinker {
/// Type index mappings of type server PDBs that we've loaded so far.
std::map<GUID, CVIndexMap> TypeServerIndexMappings;
/// List of TypeServer PDBs which cannot be loaded.
/// Cached to prevent repeated load attempts.
std::set<GUID> MissingTypeServerPDBs;
};
}
@ -230,8 +235,8 @@ maybeReadTypeServerRecord(CVTypeArray &Types) {
return std::move(TS);
}
const CVIndexMap &PDBLinker::mergeDebugT(ObjFile *File,
CVIndexMap &ObjectIndexMap) {
Expected<const CVIndexMap&> PDBLinker::mergeDebugT(ObjFile *File,
CVIndexMap &ObjectIndexMap) {
ArrayRef<uint8_t> Data = getDebugSection(File, ".debug$T");
if (Data.empty())
return ObjectIndexMap;
@ -304,11 +309,19 @@ tryToLoadPDB(const GUID &GuidFromObj, StringRef TSPath) {
return std::move(NS);
}
const CVIndexMap &PDBLinker::maybeMergeTypeServerPDB(ObjFile *File,
TypeServer2Record &TS) {
// First, check if we already loaded a PDB with this GUID. Return the type
Expected<const CVIndexMap&> PDBLinker::maybeMergeTypeServerPDB(ObjFile *File,
TypeServer2Record &TS) {
const GUID& TSId = TS.getGuid();
StringRef TSPath = TS.getName();
// First, check if the PDB has previously failed to load.
if (MissingTypeServerPDBs.count(TSId))
return make_error<pdb::GenericError>(
pdb::generic_error_code::type_server_not_found, TSPath);
// Second, check if we already loaded a PDB with this GUID. Return the type
// index mapping if we have it.
auto Insertion = TypeServerIndexMappings.insert({TS.getGuid(), CVIndexMap()});
auto Insertion = TypeServerIndexMappings.insert({TSId, CVIndexMap()});
CVIndexMap &IndexMap = Insertion.first->second;
if (!Insertion.second)
return IndexMap;
@ -319,18 +332,21 @@ const CVIndexMap &PDBLinker::maybeMergeTypeServerPDB(ObjFile *File,
// Check for a PDB at:
// 1. The given file path
// 2. Next to the object file or archive file
auto ExpectedSession = tryToLoadPDB(TS.getGuid(), TS.getName());
auto ExpectedSession = tryToLoadPDB(TSId, TSPath);
if (!ExpectedSession) {
consumeError(ExpectedSession.takeError());
StringRef LocalPath =
!File->ParentName.empty() ? File->ParentName : File->getName();
SmallString<128> Path = sys::path::parent_path(LocalPath);
sys::path::append(
Path, sys::path::filename(TS.getName(), sys::path::Style::windows));
ExpectedSession = tryToLoadPDB(TS.getGuid(), Path);
Path, sys::path::filename(TSPath, sys::path::Style::windows));
ExpectedSession = tryToLoadPDB(TSId, Path);
}
if (auto E = ExpectedSession.takeError()) {
TypeServerIndexMappings.erase(TSId);
MissingTypeServerPDBs.emplace(TSId);
return std::move(E);
}
if (auto E = ExpectedSession.takeError())
fatal("Type server PDB was not found: " + toString(std::move(E)));
auto ExpectedTpi = (*ExpectedSession)->getPDBFile().getPDBTpiStream();
if (auto E = ExpectedTpi.takeError())
@ -707,7 +723,16 @@ void PDBLinker::addObjFile(ObjFile *File) {
// the PDB first, so that we can get the map from object file type and item
// indices to PDB type and item indices.
CVIndexMap ObjectIndexMap;
const CVIndexMap &IndexMap = mergeDebugT(File, ObjectIndexMap);
auto IndexMapResult = mergeDebugT(File, ObjectIndexMap);
// If the .debug$T sections fail to merge, assume there is no debug info.
if (!IndexMapResult) {
warn("Type server PDB for " + Name + " is invalid, ignoring debug info. " +
toString(IndexMapResult.takeError()));
return;
}
const CVIndexMap &IndexMap = *IndexMapResult;
// Now do all live .debug$S sections.
for (SectionChunk *DebugChunk : File->getDebugChunks()) {

View File

@ -638,7 +638,7 @@ void LinkerDriver::readConfigs(opt::InputArgList &Args) {
Config->Optimize = args::getInteger(Args, OPT_O, 1);
Config->OrphanHandling = getOrphanHandling(Args);
Config->OutputFile = Args.getLastArgValue(OPT_o);
Config->Pie = Args.hasFlag(OPT_pie, OPT_nopie, false);
Config->Pie = Args.hasFlag(OPT_pie, OPT_no_pie, false);
Config->PrintGcSections =
Args.hasFlag(OPT_print_gc_sections, OPT_no_print_gc_sections, false);
Config->Rpath = getRpath(Args);

View File

@ -856,6 +856,14 @@ template <class ELFT> void SharedFile<ELFT>::parseRest() {
continue;
}
if (Config->EMachine == EM_MIPS) {
// FIXME: MIPS BFD linker puts _gp_disp symbol into DSO files
// and incorrectly assigns VER_NDX_LOCAL to this section global
// symbol. Here is a workaround for this bug.
if (Versym && VersymIndex == VER_NDX_LOCAL && Name == "_gp_disp")
continue;
}
const Elf_Verdef *Ver = nullptr;
if (VersymIndex != VER_NDX_GLOBAL) {
if (VersymIndex >= Verdefs.size() || VersymIndex == VER_NDX_LOCAL) {

View File

@ -202,6 +202,8 @@ def no_gnu_unique: F<"no-gnu-unique">,
def no_merge_exidx_entries: F<"no-merge-exidx-entries">,
HelpText<"Disable merging .ARM.exidx entries">;
def no_pie: F<"no-pie">, HelpText<"Do not create a position independent executable">;
def no_threads: F<"no-threads">,
HelpText<"Do not run the linker multi-threaded">;
@ -211,8 +213,6 @@ def no_whole_archive: F<"no-whole-archive">,
def noinhibit_exec: F<"noinhibit-exec">,
HelpText<"Retain the executable output file whenever it is still usable">;
def nopie: F<"nopie">, HelpText<"Do not create a position independent executable">;
def no_omagic: Flag<["--"], "no-omagic">, MetaVarName<"<magic>">,
HelpText<"Do not set the text data sections to be writable">;

View File

@ -45,19 +45,9 @@ static uint32_t g_initialize_count = 0;
PlatformSP PlatformNetBSD::CreateInstance(bool force, const ArchSpec *arch) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PLATFORM));
if (log) {
const char *arch_name;
if (arch && arch->GetArchitectureName())
arch_name = arch->GetArchitectureName();
else
arch_name = "<null>";
const char *triple_cstr =
arch ? arch->GetTriple().getTriple().c_str() : "<null>";
log->Printf("PlatformNetBSD::%s(force=%s, arch={%s,%s})", __FUNCTION__,
force ? "true" : "false", arch_name, triple_cstr);
}
LLDB_LOG(log, "force = {0}, arch=({1}, {2})", force,
arch ? arch->GetArchitectureName() : "<null>",
arch ? arch->GetTriple().getTriple() : "<null>");
bool create = force;
if (create == false && arch && arch->IsValid()) {
@ -72,18 +62,10 @@ PlatformSP PlatformNetBSD::CreateInstance(bool force, const ArchSpec *arch) {
}
}
LLDB_LOG(log, "create = {0}", create);
if (create) {
if (log)
log->Printf("PlatformNetBSD::%s() creating remote-netbsd platform",
__FUNCTION__);
return PlatformSP(new PlatformNetBSD(false));
}
if (log)
log->Printf(
"PlatformNetBSD::%s() aborting creation of remote-netbsd platform",
__FUNCTION__);
return PlatformSP();
}
@ -258,19 +240,15 @@ bool PlatformNetBSD::CanDebugProcess() {
}
// For local debugging, NetBSD will override the debug logic to use llgs-launch
// rather than
// lldb-launch, llgs-attach. This differs from current lldb-launch,
// debugserver-attach
// approach on MacOSX.
lldb::ProcessSP PlatformNetBSD::DebugProcess(
ProcessLaunchInfo &launch_info, Debugger &debugger,
Target *target, // Can be NULL, if NULL create a new
// target, else use existing one
Status &error) {
// rather than lldb-launch, llgs-attach. This differs from current lldb-launch,
// debugserver-attach approach on MacOSX.
lldb::ProcessSP
PlatformNetBSD::DebugProcess(ProcessLaunchInfo &launch_info, Debugger &debugger,
Target *target, // Can be NULL, if NULL create a new
// target, else use existing one
Status &error) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PLATFORM));
if (log)
log->Printf("PlatformNetBSD::%s entered (target %p)", __FUNCTION__,
static_cast<void *>(target));
LLDB_LOG(log, "target {0}", target);
// If we're a remote host, use standard behavior from parent class.
if (!IsHost())
@ -293,61 +271,42 @@ lldb::ProcessSP PlatformNetBSD::DebugProcess(
// Ensure we have a target.
if (target == nullptr) {
if (log)
log->Printf("PlatformNetBSD::%s creating new target", __FUNCTION__);
LLDB_LOG(log, "creating new target");
TargetSP new_target_sp;
error = debugger.GetTargetList().CreateTarget(debugger, "", "", false,
nullptr, new_target_sp);
if (error.Fail()) {
if (log)
log->Printf("PlatformNetBSD::%s failed to create new target: %s",
__FUNCTION__, error.AsCString());
LLDB_LOG(log, "failed to create new target: {0}", error);
return process_sp;
}
target = new_target_sp.get();
if (!target) {
error.SetErrorString("CreateTarget() returned nullptr");
if (log)
log->Printf("PlatformNetBSD::%s failed: %s", __FUNCTION__,
error.AsCString());
LLDB_LOG(log, "error: {0}", error);
return process_sp;
}
} else {
if (log)
log->Printf("PlatformNetBSD::%s using provided target", __FUNCTION__);
}
// Mark target as currently selected target.
debugger.GetTargetList().SetSelectedTarget(target);
// Now create the gdb-remote process.
if (log)
log->Printf(
"PlatformNetBSD::%s having target create process with gdb-remote plugin",
__FUNCTION__);
LLDB_LOG(log, "having target create process with gdb-remote plugin");
process_sp = target->CreateProcess(
launch_info.GetListenerForProcess(debugger), "gdb-remote", nullptr);
if (!process_sp) {
error.SetErrorString("CreateProcess() failed for gdb-remote process");
if (log)
log->Printf("PlatformNetBSD::%s failed: %s", __FUNCTION__,
error.AsCString());
LLDB_LOG(log, "error: {0}", error);
return process_sp;
} else {
if (log)
log->Printf("PlatformNetBSD::%s successfully created process",
__FUNCTION__);
}
LLDB_LOG(log, "successfully created process");
// Adjust launch for a hijacker.
ListenerSP listener_sp;
if (!launch_info.GetHijackListener()) {
if (log)
log->Printf("PlatformNetBSD::%s setting up hijacker", __FUNCTION__);
LLDB_LOG(log, "setting up hijacker");
listener_sp =
Listener::MakeListener("lldb.PlatformNetBSD.DebugProcess.hijack");
launch_info.SetHijackListener(listener_sp);
@ -356,16 +315,13 @@ lldb::ProcessSP PlatformNetBSD::DebugProcess(
// Log file actions.
if (log) {
log->Printf(
"PlatformNetBSD::%s launching process with the following file actions:",
__FUNCTION__);
LLDB_LOG(log, "launching process with the following file actions:");
StreamString stream;
size_t i = 0;
const FileAction *file_action;
while ((file_action = launch_info.GetFileActionAtIndex(i++)) != nullptr) {
file_action->Dump(stream);
log->PutCString(stream.GetData());
LLDB_LOG(log, "{0}", stream.GetData());
stream.Clear();
}
}
@ -378,16 +334,7 @@ lldb::ProcessSP PlatformNetBSD::DebugProcess(
const StateType state = process_sp->WaitForProcessToStop(
llvm::None, NULL, false, listener_sp);
if (state == eStateStopped) {
if (log)
log->Printf("PlatformNetBSD::%s pid %" PRIu64 " state %s\n",
__FUNCTION__, process_sp->GetID(), StateAsCString(state));
} else {
if (log)
log->Printf("PlatformNetBSD::%s pid %" PRIu64
" state is not stopped - %s\n",
__FUNCTION__, process_sp->GetID(), StateAsCString(state));
}
LLDB_LOG(log, "pid {0} state {0}", process_sp->GetID(), state);
}
// Hook up process PTY if we have one (which we should for local debugging
@ -395,20 +342,11 @@ lldb::ProcessSP PlatformNetBSD::DebugProcess(
int pty_fd = launch_info.GetPTY().ReleaseMasterFileDescriptor();
if (pty_fd != PseudoTerminal::invalid_fd) {
process_sp->SetSTDIOFileDescriptor(pty_fd);
if (log)
log->Printf("PlatformNetBSD::%s pid %" PRIu64
" hooked up STDIO pty to process",
__FUNCTION__, process_sp->GetID());
} else {
if (log)
log->Printf("PlatformNetBSD::%s pid %" PRIu64
" not using process STDIO pty",
__FUNCTION__, process_sp->GetID());
}
LLDB_LOG(log, "hooked up STDIO pty to process");
} else
LLDB_LOG(log, "not using process STDIO pty");
} else {
if (log)
log->Printf("PlatformNetBSD::%s process launch failed: %s", __FUNCTION__,
error.AsCString());
LLDB_LOG(log, "process launch failed: {0}", error);
// FIXME figure out appropriate cleanup here. Do we delete the target? Do
// we delete the process? Does our caller do that?
}

View File

@ -111,7 +111,7 @@ NativeProcessNetBSD::Factory::Launch(ProcessLaunchInfo &launch_info,
for (const auto &thread : process_up->m_threads)
static_cast<NativeThreadNetBSD &>(*thread).SetStoppedBySignal(SIGSTOP);
process_up->SetState(StateType::eStateStopped);
process_up->SetState(StateType::eStateStopped, false);
return std::move(process_up);
}

View File

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "324090"
#define SVN_REVISION "325330"

View File

@ -4,5 +4,5 @@
#define LLD_VERSION_STRING "6.0.0"
#define LLD_VERSION_MAJOR 6
#define LLD_VERSION_MINOR 0
#define LLD_REVISION_STRING "324090"
#define LLD_REVISION_STRING "325330"
#define LLD_REPOSITORY_STRING "FreeBSD"

View File

@ -1,2 +1,2 @@
/* $FreeBSD$ */
#define LLVM_REVISION "svn-r324090"
#define LLVM_REVISION "svn-r325330"