Merge llvm, clang, compiler-rt, libc++, lld, and lldb release_80 branch

r354799, resolve conflicts, and bump version numbers.
This commit is contained in:
Dimitry Andric 2019-02-25 19:17:20 +00:00
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang800-import/; revision=344548
14 changed files with 196 additions and 120 deletions

View File

@ -13884,7 +13884,6 @@ static SDValue lowerVectorShuffleAsLanePermuteAndPermute(
int NumEltsPerLane = NumElts / NumLanes;
SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
for (int i = 0; i != NumElts; ++i) {
@ -13899,10 +13898,20 @@ static SDValue lowerVectorShuffleAsLanePermuteAndPermute(
return SDValue();
SrcLaneMask[DstLane] = SrcLane;
LaneMask[i] = (SrcLane * NumEltsPerLane) + (i % NumEltsPerLane);
PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
}
// Make sure we set all elements of the lane mask, to avoid undef propagation.
SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
int SrcLane = SrcLaneMask[DstLane];
if (0 <= SrcLane)
for (int j = 0; j != NumEltsPerLane; ++j) {
LaneMask[(DstLane * NumEltsPerLane) + j] =
(SrcLane * NumEltsPerLane) + j;
}
}
// If we're only shuffling a single lowest lane and the rest are identity
// then don't bother.
// TODO - isShuffleMaskInputInPlace could be extended to something like this.

View File

@ -1376,7 +1376,8 @@ Instruction *InstCombiner::foldVectorBinop(BinaryOperator &Inst) {
if (match(LHS, m_ShuffleVector(m_Value(L0), m_Value(L1), m_Constant(Mask))) &&
match(RHS, m_ShuffleVector(m_Value(R0), m_Value(R1), m_Specific(Mask))) &&
LHS->hasOneUse() && RHS->hasOneUse() &&
cast<ShuffleVectorInst>(LHS)->isConcat()) {
cast<ShuffleVectorInst>(LHS)->isConcat() &&
cast<ShuffleVectorInst>(RHS)->isConcat()) {
// This transform does not have the speculative execution constraint as
// below because the shuffle is a concatenation. The new binops are
// operating on exactly the same elements as the existing binop.

View File

@ -11,21 +11,37 @@
// later typically inlined as a chain of efficient hardware comparisons). This
// typically benefits c++ member or nonmember operator==().
//
// The basic idea is to replace a larger chain of integer comparisons loaded
// from contiguous memory locations into a smaller chain of such integer
// The basic idea is to replace a longer chain of integer comparisons loaded
// from contiguous memory locations into a shorter chain of larger integer
// comparisons. Benefits are double:
// - There are less jumps, and therefore less opportunities for mispredictions
// and I-cache misses.
// - Code size is smaller, both because jumps are removed and because the
// encoding of a 2*n byte compare is smaller than that of two n-byte
// compares.
//
// Example:
//
// struct S {
// int a;
// char b;
// char c;
// uint16_t d;
// bool operator==(const S& o) const {
// return a == o.a && b == o.b && c == o.c && d == o.d;
// }
// };
//
// Is optimized as :
//
// bool S::operator==(const S& o) const {
// return memcmp(this, &o, 8) == 0;
// }
//
// Which will later be expanded (ExpandMemCmp) as a single 8-bytes icmp.
//
//===----------------------------------------------------------------------===//
#include <algorithm>
#include <numeric>
#include <utility>
#include <vector>
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@ -34,6 +50,10 @@
#include "llvm/Pass.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include <algorithm>
#include <numeric>
#include <utility>
#include <vector>
using namespace llvm;
@ -50,76 +70,95 @@ static bool isSimpleLoadOrStore(const Instruction *I) {
return false;
}
// A BCE atom.
// A BCE atom "Binary Compare Expression Atom" represents an integer load
// that is a constant offset from a base value, e.g. `a` or `o.c` in the example
// at the top.
struct BCEAtom {
BCEAtom() : GEP(nullptr), LoadI(nullptr), Offset() {}
const Value *Base() const { return GEP ? GEP->getPointerOperand() : nullptr; }
BCEAtom() = default;
BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset)
: GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(Offset) {}
// We want to order BCEAtoms by (Base, Offset). However we cannot use
// the pointer values for Base because these are non-deterministic.
// To make sure that the sort order is stable, we first assign to each atom
// base value an index based on its order of appearance in the chain of
// comparisons. We call this index `BaseOrdering`. For example, for:
// b[3] == c[2] && a[1] == d[1] && b[4] == c[3]
// | block 1 | | block 2 | | block 3 |
// b gets assigned index 0 and a index 1, because b appears as LHS in block 1,
// which is before block 2.
// We then sort by (BaseOrdering[LHS.Base()], LHS.Offset), which is stable.
bool operator<(const BCEAtom &O) const {
assert(Base() && "invalid atom");
assert(O.Base() && "invalid atom");
// Just ordering by (Base(), Offset) is sufficient. However because this
// means that the ordering will depend on the addresses of the base
// values, which are not reproducible from run to run. To guarantee
// stability, we use the names of the values if they exist; we sort by:
// (Base.getName(), Base(), Offset).
const int NameCmp = Base()->getName().compare(O.Base()->getName());
if (NameCmp == 0) {
if (Base() == O.Base()) {
return Offset.slt(O.Offset);
}
return Base() < O.Base();
}
return NameCmp < 0;
return BaseId != O.BaseId ? BaseId < O.BaseId : Offset.slt(O.Offset);
}
GetElementPtrInst *GEP;
LoadInst *LoadI;
GetElementPtrInst *GEP = nullptr;
LoadInst *LoadI = nullptr;
unsigned BaseId = 0;
APInt Offset;
};
// A class that assigns increasing ids to values in the order in which they are
// seen. See comment in `BCEAtom::operator<()``.
class BaseIdentifier {
public:
// Returns the id for value `Base`, after assigning one if `Base` has not been
// seen before.
int getBaseId(const Value *Base) {
assert(Base && "invalid base");
const auto Insertion = BaseToIndex.try_emplace(Base, Order);
if (Insertion.second)
++Order;
return Insertion.first->second;
}
private:
unsigned Order = 1;
DenseMap<const Value*, int> BaseToIndex;
};
// If this value is a load from a constant offset w.r.t. a base address, and
// there are no other users of the load or address, returns the base address and
// the offset.
BCEAtom visitICmpLoadOperand(Value *const Val) {
BCEAtom Result;
if (auto *const LoadI = dyn_cast<LoadInst>(Val)) {
LLVM_DEBUG(dbgs() << "load\n");
if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
LLVM_DEBUG(dbgs() << "used outside of block\n");
return {};
}
// Do not optimize atomic loads to non-atomic memcmp
if (!LoadI->isSimple()) {
LLVM_DEBUG(dbgs() << "volatile or atomic\n");
return {};
}
Value *const Addr = LoadI->getOperand(0);
if (auto *const GEP = dyn_cast<GetElementPtrInst>(Addr)) {
LLVM_DEBUG(dbgs() << "GEP\n");
if (GEP->isUsedOutsideOfBlock(LoadI->getParent())) {
LLVM_DEBUG(dbgs() << "used outside of block\n");
return {};
}
const auto &DL = GEP->getModule()->getDataLayout();
if (!isDereferenceablePointer(GEP, DL)) {
LLVM_DEBUG(dbgs() << "not dereferenceable\n");
// We need to make sure that we can do comparison in any order, so we
// require memory to be unconditionnally dereferencable.
return {};
}
Result.Offset = APInt(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
if (GEP->accumulateConstantOffset(DL, Result.Offset)) {
Result.GEP = GEP;
Result.LoadI = LoadI;
}
}
BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
auto *const LoadI = dyn_cast<LoadInst>(Val);
if (!LoadI)
return {};
LLVM_DEBUG(dbgs() << "load\n");
if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
LLVM_DEBUG(dbgs() << "used outside of block\n");
return {};
}
return Result;
// Do not optimize atomic loads to non-atomic memcmp
if (!LoadI->isSimple()) {
LLVM_DEBUG(dbgs() << "volatile or atomic\n");
return {};
}
Value *const Addr = LoadI->getOperand(0);
auto *const GEP = dyn_cast<GetElementPtrInst>(Addr);
if (!GEP)
return {};
LLVM_DEBUG(dbgs() << "GEP\n");
if (GEP->isUsedOutsideOfBlock(LoadI->getParent())) {
LLVM_DEBUG(dbgs() << "used outside of block\n");
return {};
}
const auto &DL = GEP->getModule()->getDataLayout();
if (!isDereferenceablePointer(GEP, DL)) {
LLVM_DEBUG(dbgs() << "not dereferenceable\n");
// We need to make sure that we can do comparison in any order, so we
// require memory to be unconditionnally dereferencable.
return {};
}
APInt Offset = APInt(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
if (!GEP->accumulateConstantOffset(DL, Offset))
return {};
return BCEAtom(GEP, LoadI, BaseId.getBaseId(GEP->getPointerOperand()),
Offset);
}
// A basic block with a comparison between two BCE atoms.
// A basic block with a comparison between two BCE atoms, e.g. `a == o.a` in the
// example at the top.
// The block might do extra work besides the atom comparison, in which case
// doesOtherWork() returns true. Under some conditions, the block can be
// split into the atom comparison part and the "other work" part
@ -137,9 +176,7 @@ class BCECmpBlock {
if (Rhs_ < Lhs_) std::swap(Rhs_, Lhs_);
}
bool IsValid() const {
return Lhs_.Base() != nullptr && Rhs_.Base() != nullptr;
}
bool IsValid() const { return Lhs_.BaseId != 0 && Rhs_.BaseId != 0; }
// Assert the block is consistent: If valid, it should also have
// non-null members besides Lhs_ and Rhs_.
@ -265,7 +302,8 @@ bool BCECmpBlock::doesOtherWork() const {
// Visit the given comparison. If this is a comparison between two valid
// BCE atoms, returns the comparison.
BCECmpBlock visitICmp(const ICmpInst *const CmpI,
const ICmpInst::Predicate ExpectedPredicate) {
const ICmpInst::Predicate ExpectedPredicate,
BaseIdentifier &BaseId) {
// The comparison can only be used once:
// - For intermediate blocks, as a branch condition.
// - For the final block, as an incoming value for the Phi.
@ -275,25 +313,27 @@ BCECmpBlock visitICmp(const ICmpInst *const CmpI,
LLVM_DEBUG(dbgs() << "cmp has several uses\n");
return {};
}
if (CmpI->getPredicate() == ExpectedPredicate) {
LLVM_DEBUG(dbgs() << "cmp "
<< (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
<< "\n");
auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0));
if (!Lhs.Base()) return {};
auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1));
if (!Rhs.Base()) return {};
const auto &DL = CmpI->getModule()->getDataLayout();
return BCECmpBlock(std::move(Lhs), std::move(Rhs),
DL.getTypeSizeInBits(CmpI->getOperand(0)->getType()));
}
return {};
if (CmpI->getPredicate() != ExpectedPredicate)
return {};
LLVM_DEBUG(dbgs() << "cmp "
<< (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
<< "\n");
auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0), BaseId);
if (!Lhs.BaseId)
return {};
auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1), BaseId);
if (!Rhs.BaseId)
return {};
const auto &DL = CmpI->getModule()->getDataLayout();
return BCECmpBlock(std::move(Lhs), std::move(Rhs),
DL.getTypeSizeInBits(CmpI->getOperand(0)->getType()));
}
// Visit the given comparison block. If this is a comparison between two valid
// BCE atoms, returns the comparison.
BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
const BasicBlock *const PhiBlock) {
const BasicBlock *const PhiBlock,
BaseIdentifier &BaseId) {
if (Block->empty()) return {};
auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
if (!BranchI) return {};
@ -306,7 +346,7 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
auto *const CmpI = dyn_cast<ICmpInst>(Val);
if (!CmpI) return {};
LLVM_DEBUG(dbgs() << "icmp\n");
auto Result = visitICmp(CmpI, ICmpInst::ICMP_EQ);
auto Result = visitICmp(CmpI, ICmpInst::ICMP_EQ, BaseId);
Result.CmpI = CmpI;
Result.BranchI = BranchI;
return Result;
@ -323,7 +363,8 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
assert(BranchI->getNumSuccessors() == 2 && "expecting a cond branch");
BasicBlock *const FalseBlock = BranchI->getSuccessor(1);
auto Result = visitICmp(
CmpI, FalseBlock == PhiBlock ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE);
CmpI, FalseBlock == PhiBlock ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
BaseId);
Result.CmpI = CmpI;
Result.BranchI = BranchI;
return Result;
@ -335,9 +376,9 @@ static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons,
BCECmpBlock &Comparison) {
LLVM_DEBUG(dbgs() << "Block '" << Comparison.BB->getName()
<< "': Found cmp of " << Comparison.SizeBits()
<< " bits between " << Comparison.Lhs().Base() << " + "
<< " bits between " << Comparison.Lhs().BaseId << " + "
<< Comparison.Lhs().Offset << " and "
<< Comparison.Rhs().Base() << " + "
<< Comparison.Rhs().BaseId << " + "
<< Comparison.Rhs().Offset << "\n");
LLVM_DEBUG(dbgs() << "\n");
Comparisons.push_back(Comparison);
@ -360,8 +401,8 @@ class BCECmpChain {
private:
static bool IsContiguous(const BCECmpBlock &First,
const BCECmpBlock &Second) {
return First.Lhs().Base() == Second.Lhs().Base() &&
First.Rhs().Base() == Second.Rhs().Base() &&
return First.Lhs().BaseId == Second.Lhs().BaseId &&
First.Rhs().BaseId == Second.Rhs().BaseId &&
First.Lhs().Offset + First.SizeBits() / 8 == Second.Lhs().Offset &&
First.Rhs().Offset + First.SizeBits() / 8 == Second.Rhs().Offset;
}
@ -385,11 +426,12 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
assert(!Blocks.empty() && "a chain should have at least one block");
// Now look inside blocks to check for BCE comparisons.
std::vector<BCECmpBlock> Comparisons;
BaseIdentifier BaseId;
for (size_t BlockIdx = 0; BlockIdx < Blocks.size(); ++BlockIdx) {
BasicBlock *const Block = Blocks[BlockIdx];
assert(Block && "invalid block");
BCECmpBlock Comparison = visitCmpBlock(Phi.getIncomingValueForBlock(Block),
Block, Phi.getParent());
Block, Phi.getParent(), BaseId);
Comparison.BB = Block;
if (!Comparison.IsValid()) {
LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n");
@ -466,9 +508,10 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
#endif // MERGEICMPS_DOT_ON
// Reorder blocks by LHS. We can do that without changing the
// semantics because we are only accessing dereferencable memory.
llvm::sort(Comparisons_, [](const BCECmpBlock &a, const BCECmpBlock &b) {
return a.Lhs() < b.Lhs();
});
llvm::sort(Comparisons_,
[](const BCECmpBlock &LhsBlock, const BCECmpBlock &RhsBlock) {
return LhsBlock.Lhs() < RhsBlock.Lhs();
});
#ifdef MERGEICMPS_DOT_ON
errs() << "AFTER REORDERING:\n\n";
dump();

View File

@ -2577,6 +2577,11 @@ class CallExpr : public Expr {
NumArgs = NewNumArgs;
}
/// Bluntly set a new number of arguments without doing any checks whatsoever.
/// Only used during construction of a CallExpr in a few places in Sema.
/// FIXME: Find a way to remove it.
void setNumArgsUnsafe(unsigned NewNumArgs) { NumArgs = NewNumArgs; }
typedef ExprIterator arg_iterator;
typedef ConstExprIterator const_arg_iterator;
typedef llvm::iterator_range<arg_iterator> arg_range;

View File

@ -1620,8 +1620,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
bool capturedByInit =
Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
Address Loc =
capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
bool locIsByrefHeader = !capturedByInit;
const Address Loc =
locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
// Note: constexpr already initializes everything correctly.
LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
@ -1637,7 +1638,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
return;
// Only initialize a __block's storage: we always initialize the header.
if (emission.IsEscapingByRef)
if (emission.IsEscapingByRef && !locIsByrefHeader)
Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
CharUnits Size = getContext().getTypeSizeInChars(type);
@ -1745,10 +1746,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
}
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
emitStoresForConstant(
CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
isVolatile, Builder, constant);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)

View File

@ -838,7 +838,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
if (LangOpts.MSCompatibilityVersion &&
!LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2015))
OS << ' ';
OS << ": ";
OS << ':';
break;
}

View File

@ -5676,18 +5676,36 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
}
if (!getLangOpts().CPlusPlus) {
// Forget about the nulled arguments since typo correction
// do not handle them well.
TheCall->shrinkNumArgs(Args.size());
// C cannot always handle TypoExpr nodes in builtin calls and direct
// function calls as their argument checking don't necessarily handle
// dependent types properly, so make sure any TypoExprs have been
// dealt with.
ExprResult Result = CorrectDelayedTyposInExpr(TheCall);
if (!Result.isUsable()) return ExprError();
CallExpr *TheOldCall = TheCall;
TheCall = dyn_cast<CallExpr>(Result.get());
bool CorrectedTypos = TheCall != TheOldCall;
if (!TheCall) return Result;
// TheCall at this point has max(Args.size(), NumParams) arguments,
// with extra arguments nulled. We don't want to introduce nulled
// arguments in Args and so we only take the first Args.size() arguments.
Args = llvm::makeArrayRef(TheCall->getArgs(), Args.size());
Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
// A new call expression node was created if some typos were corrected.
// However it may not have been constructed with enough storage. In this
// case, rebuild the node with enough storage. The waste of space is
// immaterial since this only happens when some typos were corrected.
if (CorrectedTypos && Args.size() < NumParams) {
if (Config)
TheCall = CUDAKernelCallExpr::Create(
Context, Fn, cast<CallExpr>(Config), Args, ResultTy, VK_RValue,
RParenLoc, NumParams);
else
TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue,
RParenLoc, NumParams, UsesADL);
}
// We can now handle the nulled arguments for the default arguments.
TheCall->setNumArgsUnsafe(std::max<unsigned>(Args.size(), NumParams));
}
// Bail out early if calling a builtin with custom type checking.

View File

@ -30,7 +30,7 @@ def Bstatic: F<"Bstatic">, HelpText<"Do not link against shared libraries">;
def build_id: F<"build-id">, HelpText<"Alias for --build-id=fast">;
def build_id_eq: J<"build-id=">, HelpText<"Generate build ID note">,
MetaVarName<"[fast,md5,sha,uuid,0x<hexstring>]">;
MetaVarName<"[fast,md5,sha1,uuid,0x<hexstring>]">;
defm check_sections: B<"check-sections",
"Check section addresses for overlaps (default)",

View File

@ -5,11 +5,6 @@ lld 8.0.0 Release Notes
.. contents::
:local:
.. warning::
These are in-progress notes for the upcoming LLVM 8.0.0 release.
Release notes for previous releases can be found on
`the Download Page <https://releases.llvm.org/download.html>`_.
Introduction
============
@ -18,7 +13,7 @@ Mach-O (macOS), MinGW and WebAssembly. lld is command-line-compatible with
GNU linkers and Microsoft link.exe and is significantly faster than the
system default linkers.
nlld 8.0.0 has lots of feature improvements and bug fixes.
lld 8.0.0 has lots of feature improvements and bug fixes.
Non-comprehensive list of changes in this release
=================================================

View File

@ -108,8 +108,14 @@ Status MainLoop::RunImpl::Poll() {
num_events = kevent(loop.m_kqueue, in_events.data(), in_events.size(),
out_events, llvm::array_lengthof(out_events), nullptr);
if (num_events < 0)
return Status("kevent() failed with error %d\n", num_events);
if (num_events < 0) {
if (errno == EINTR) {
// in case of EINTR, let the main loop run one iteration
// we need to zero num_events to avoid assertions failing
num_events = 0;
} else
return Status(errno, eErrorTypePOSIX);
}
return Status();
}

View File

@ -68,8 +68,7 @@ GetOpenBSDProcessArgs(const ProcessInstanceInfoMatch *match_info_ptr,
cstr = data.GetCStr(&offset);
if (cstr) {
process_info.GetExecutableFile().SetFile(cstr, false,
FileSpec::Style::native);
process_info.GetExecutableFile().SetFile(cstr, FileSpec::Style::native);
if (!(match_info_ptr == NULL ||
NameMatches(

View File

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "354130"
#define SVN_REVISION "354799"

View File

@ -7,4 +7,4 @@
#define LLD_REPOSITORY_STRING "FreeBSD"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION_STRING "354130-1300002"
#define LLD_REVISION_STRING "354799-1300002"

View File

@ -1,2 +1,2 @@
/* $FreeBSD$ */
#define LLVM_REVISION "svn-r354130"
#define LLVM_REVISION "svn-r354799"