Vendor import of clang trunk r302069:

https://llvm.org/svn/llvm-project/cfe/trunk@302069
This commit is contained in:
Dimitry Andric 2017-05-03 20:26:23 +00:00
parent 570918821a
commit f0c0337bbf
56 changed files with 2835 additions and 2198 deletions

View File

@ -157,8 +157,6 @@ will need to:
``UBSAN_OPTIONS=print_stacktrace=1``.
#. Make sure ``llvm-symbolizer`` binary is in ``PATH``.
Stacktrace printing for UBSan issues is currently not supported on Darwin.
Issue Suppression
=================

View File

@ -178,12 +178,7 @@ public:
private:
unsigned char AllExtensionsSilenced; // Used by __extension__
bool IgnoreAllWarnings; // Ignore all warnings: -w
bool WarningsAsErrors; // Treat warnings like errors.
bool EnableAllWarnings; // Enable all warnings.
bool ErrorsAsFatal; // Treat errors like fatal errors.
bool FatalsAsError; // Treat fatal errors like errors.
bool SuppressSystemWarnings; // Suppress warnings in system headers.
bool SuppressAfterFatalError; // Suppress diagnostics after a fatal error?
bool SuppressAllDiagnostics; // Suppress all diagnostics.
bool ElideType; // Elide common types of templates.
bool PrintTemplateTree; // Print a tree when comparing templates.
@ -194,7 +189,6 @@ private:
// 0 -> no limit.
unsigned ConstexprBacktraceLimit; // Cap on depth of constexpr evaluation
// backtrace stack, 0 -> no limit.
diag::Severity ExtBehavior; // Map extensions to warnings or errors?
IntrusiveRefCntPtr<DiagnosticIDs> Diags;
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
DiagnosticConsumer *Client;
@ -216,6 +210,19 @@ private:
llvm::DenseMap<unsigned, DiagnosticMapping> DiagMap;
public:
// "Global" configuration state that can actually vary between modules.
unsigned IgnoreAllWarnings : 1; // Ignore all warnings: -w
unsigned EnableAllWarnings : 1; // Enable all warnings.
unsigned WarningsAsErrors : 1; // Treat warnings like errors.
unsigned ErrorsAsFatal : 1; // Treat errors like fatal errors.
unsigned SuppressSystemWarnings : 1; // Suppress warnings in system headers.
diag::Severity ExtBehavior; // Map extensions to warnings or errors?
DiagState()
: IgnoreAllWarnings(false), EnableAllWarnings(false),
WarningsAsErrors(false), ErrorsAsFatal(false),
SuppressSystemWarnings(false), ExtBehavior(diag::Severity::Ignored) {}
typedef llvm::DenseMap<unsigned, DiagnosticMapping>::iterator iterator;
typedef llvm::DenseMap<unsigned, DiagnosticMapping>::const_iterator
const_iterator;
@ -493,33 +500,47 @@ public:
/// \brief When set to true, any unmapped warnings are ignored.
///
/// If this and WarningsAsErrors are both set, then this one wins.
void setIgnoreAllWarnings(bool Val) { IgnoreAllWarnings = Val; }
bool getIgnoreAllWarnings() const { return IgnoreAllWarnings; }
void setIgnoreAllWarnings(bool Val) {
GetCurDiagState()->IgnoreAllWarnings = Val;
}
bool getIgnoreAllWarnings() const {
return GetCurDiagState()->IgnoreAllWarnings;
}
/// \brief When set to true, any unmapped ignored warnings are no longer
/// ignored.
///
/// If this and IgnoreAllWarnings are both set, then that one wins.
void setEnableAllWarnings(bool Val) { EnableAllWarnings = Val; }
bool getEnableAllWarnings() const { return EnableAllWarnings; }
void setEnableAllWarnings(bool Val) {
GetCurDiagState()->EnableAllWarnings = Val;
}
bool getEnableAllWarnings() const {
return GetCurDiagState()->EnableAllWarnings;
}
/// \brief When set to true, any warnings reported are issued as errors.
void setWarningsAsErrors(bool Val) { WarningsAsErrors = Val; }
bool getWarningsAsErrors() const { return WarningsAsErrors; }
void setWarningsAsErrors(bool Val) {
GetCurDiagState()->WarningsAsErrors = Val;
}
bool getWarningsAsErrors() const {
return GetCurDiagState()->WarningsAsErrors;
}
/// \brief When set to true, any error reported is made a fatal error.
void setErrorsAsFatal(bool Val) { ErrorsAsFatal = Val; }
bool getErrorsAsFatal() const { return ErrorsAsFatal; }
void setErrorsAsFatal(bool Val) { GetCurDiagState()->ErrorsAsFatal = Val; }
bool getErrorsAsFatal() const { return GetCurDiagState()->ErrorsAsFatal; }
/// \brief When set to true, any fatal error reported is made an error.
///
/// This setting takes precedence over the setErrorsAsFatal setting above.
void setFatalsAsError(bool Val) { FatalsAsError = Val; }
bool getFatalsAsError() const { return FatalsAsError; }
/// \brief When set to true (the default), suppress further diagnostics after
/// a fatal error.
void setSuppressAfterFatalError(bool Val) { SuppressAfterFatalError = Val; }
/// \brief When set to true mask warnings that come from system headers.
void setSuppressSystemWarnings(bool Val) { SuppressSystemWarnings = Val; }
bool getSuppressSystemWarnings() const { return SuppressSystemWarnings; }
void setSuppressSystemWarnings(bool Val) {
GetCurDiagState()->SuppressSystemWarnings = Val;
}
bool getSuppressSystemWarnings() const {
return GetCurDiagState()->SuppressSystemWarnings;
}
/// \brief Suppress all diagnostics, to silence the front end when we
/// know that we don't want any more diagnostics to be passed along to the
@ -571,11 +592,15 @@ public:
}
/// \brief Controls whether otherwise-unmapped extension diagnostics are
/// mapped onto ignore/warning/error.
/// mapped onto ignore/warning/error.
///
/// This corresponds to the GCC -pedantic and -pedantic-errors option.
void setExtensionHandlingBehavior(diag::Severity H) { ExtBehavior = H; }
diag::Severity getExtensionHandlingBehavior() const { return ExtBehavior; }
void setExtensionHandlingBehavior(diag::Severity H) {
GetCurDiagState()->ExtBehavior = H;
}
diag::Severity getExtensionHandlingBehavior() const {
return GetCurDiagState()->ExtBehavior;
}
/// \brief Counter bumped when an __extension__ block is/ encountered.
///

View File

@ -154,14 +154,12 @@ def note_constexpr_baa_insufficient_alignment : Note<
def note_constexpr_baa_value_insufficient_alignment : Note<
"value of the aligned pointer (%0) is not a multiple of the asserted %1 "
"%plural{1:byte|:bytes}1">;
def note_constexpr_array_unknown_bound_arithmetic : Note<
"cannot perform pointer arithmetic on pointer to array without constant bound">;
def warn_integer_constant_overflow : Warning<
"overflow in expression; result is %0 with type %1">,
InGroup<DiagGroup<"integer-overflow">>;
// This is a temporary diagnostic, and shall be removed once our
// This is a temporary diagnostic, and shall be removed once our
// implementation is complete, and like the preceding constexpr notes belongs
// in Sema.
def note_unimplemented_constexpr_lambda_feature_ast : Note<

View File

@ -122,15 +122,21 @@ public:
bool wasUpgradedFromWarning() const { return WasUpgradedFromWarning; }
void setUpgradedFromWarning(bool Value) { WasUpgradedFromWarning = Value; }
/// Serialize the bits that aren't based on context.
unsigned serializeBits() const {
return (WasUpgradedFromWarning << 3) | Severity;
/// Serialize this mapping as a raw integer.
unsigned serialize() const {
return (IsUser << 7) | (IsPragma << 6) | (HasNoWarningAsError << 5) |
(HasNoErrorAsFatal << 4) | (WasUpgradedFromWarning << 3) | Severity;
}
static diag::Severity deserializeSeverity(unsigned Bits) {
return (diag::Severity)(Bits & 0x7);
}
static bool deserializeUpgradedFromWarning(unsigned Bits) {
return Bits >> 3;
/// Deserialize a mapping.
static DiagnosticMapping deserialize(unsigned Bits) {
DiagnosticMapping Result;
Result.IsUser = (Bits >> 7) & 1;
Result.IsPragma = (Bits >> 6) & 1;
Result.HasNoWarningAsError = (Bits >> 5) & 1;
Result.HasNoErrorAsFatal = (Bits >> 4) & 1;
Result.WasUpgradedFromWarning = (Bits >> 3) & 1;
Result.Severity = Bits & 0x7;
return Result;
}
};

View File

@ -2820,9 +2820,6 @@ def warn_cconv_structors : Warning<
def err_regparm_mismatch : Error<"function declared with regparm(%0) "
"attribute was previously declared "
"%plural{0:without the regparm|:with the regparm(%1)}1 attribute">;
def err_returns_retained_mismatch : Error<
"function declared with the ns_returns_retained attribute "
"was previously declared without the ns_returns_retained attribute">;
def err_function_attribute_mismatch : Error<
"function declared with %0 attribute "
"was previously declared without the %0 attribute">;
@ -8048,7 +8045,7 @@ def err_64_bit_builtin_32_bit_tgt : Error<
"this builtin is only available on 64-bit targets">;
def err_ppc_builtin_only_on_pwr7 : Error<
"this builtin is only valid on POWER7 or later CPUs">;
def err_x86_builtin_32_bit_tgt : Error<
def err_x86_builtin_64_only : Error<
"this builtin is only available on x86-64 targets">;
def err_x86_builtin_invalid_rounding : Error<
"invalid rounding argument">;

View File

@ -146,7 +146,10 @@ def err_module_odr_violation_mismatch_decl_diff : Error<
"method %4 is %select{not static|static}5|"
"method %4 is %select{not volatile|volatile}5|"
"method %4 is %select{not const|const}5|"
"method %4 is %select{not inline|inline}5}3">;
"method %4 is %select{not inline|inline}5|"
"method %4 that has %5 parameter%s5|"
"method %4 with %ordinal5 parameter of type %6%select{| decayed from %8}7|"
"method %4 with %ordinal5 parameter named %6}3">;
def note_module_odr_violation_mismatch_decl_diff : Note<"but in '%0' found "
"%select{"
@ -166,7 +169,10 @@ def note_module_odr_violation_mismatch_decl_diff : Note<"but in '%0' found "
"method %2 is %select{not static|static}3|"
"method %2 is %select{not volatile|volatile}3|"
"method %2 is %select{not const|const}3|"
"method %2 is %select{not inline|inline}3}1">;
"method %2 is %select{not inline|inline}3|"
"method %2 that has %3 parameter%s3|"
"method %2 with %ordinal3 parameter of type %4%select{| decayed from %6}5|"
"method %2 with %ordinal3 parameter named %4}1">;
def warn_module_uses_date_time : Warning<
"%select{precompiled header|module}0 uses __DATE__ or __TIME__">,

View File

@ -10068,9 +10068,7 @@ private:
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartImpl(CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinMSVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);

View File

@ -17,6 +17,7 @@ namespace clang {
extern const char * const CoreFoundationObjectiveC;
extern const char * const LogicError;
extern const char * const MemoryCoreFoundationObjectiveC;
extern const char * const MemoryError;
extern const char * const UnixAPI;
}
}

View File

@ -148,8 +148,7 @@ namespace {
static unsigned
findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base,
ArrayRef<APValue::LValuePathEntry> Path,
uint64_t &ArraySize, QualType &Type, bool &IsArray,
bool &IsUnsizedArray) {
uint64_t &ArraySize, QualType &Type, bool &IsArray) {
// This only accepts LValueBases from APValues, and APValues don't support
// arrays that lack size info.
assert(!isBaseAnAllocSizeCall(Base) &&
@ -158,34 +157,28 @@ namespace {
Type = getType(Base);
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
if (auto AT = Ctx.getAsArrayType(Type)) {
if (Type->isArrayType()) {
const ConstantArrayType *CAT =
cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
Type = CAT->getElementType();
ArraySize = CAT->getSize().getZExtValue();
MostDerivedLength = I + 1;
IsArray = true;
if (auto CAT = Ctx.getAsConstantArrayType(Type))
ArraySize = CAT->getSize().getZExtValue();
else {
ArraySize = 0;
IsUnsizedArray = true;
}
Type = AT->getElementType();
} else if (Type->isAnyComplexType()) {
const ComplexType *CT = Type->castAs<ComplexType>();
Type = CT->getElementType();
ArraySize = 2;
MostDerivedLength = I + 1;
IsArray = true;
IsUnsizedArray = false;
} else if (const FieldDecl *FD = getAsField(Path[I])) {
Type = FD->getType();
ArraySize = 0;
MostDerivedLength = I + 1;
IsArray = false;
IsUnsizedArray = false;
} else {
// Path[I] describes a base class.
ArraySize = 0;
IsArray = false;
IsUnsizedArray = false;
}
}
return MostDerivedLength;
@ -207,9 +200,8 @@ namespace {
/// Is this a pointer one past the end of an object?
unsigned IsOnePastTheEnd : 1;
/// Indicator of whether the most-derived object is an unsized array (e.g.
/// of unknown bound).
unsigned MostDerivedIsAnUnsizedArray : 1;
/// Indicator of whether the first entry is an unsized array.
unsigned FirstEntryIsAnUnsizedArray : 1;
/// Indicator of whether the most-derived object is an array element.
unsigned MostDerivedIsArrayElement : 1;
@ -239,28 +231,25 @@ namespace {
explicit SubobjectDesignator(QualType T)
: Invalid(false), IsOnePastTheEnd(false),
MostDerivedIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
MostDerivedPathLength(0), MostDerivedArraySize(0),
MostDerivedType(T) {}
SubobjectDesignator(ASTContext &Ctx, const APValue &V)
: Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
MostDerivedIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
MostDerivedPathLength(0), MostDerivedArraySize(0) {
assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
if (!Invalid) {
IsOnePastTheEnd = V.isLValueOnePastTheEnd();
ArrayRef<PathEntry> VEntries = V.getLValuePath();
Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
if (auto Base = V.getLValueBase()) {
if (auto Decl = Base.dyn_cast<ValueDecl const*>())
Base = cast<ValueDecl>(Decl->getMostRecentDecl());
bool IsArray = false, IsUnsizedArray = false;
if (V.getLValueBase()) {
bool IsArray = false;
MostDerivedPathLength = findMostDerivedSubobject(
Ctx, Base, V.getLValuePath(), MostDerivedArraySize,
MostDerivedType, IsArray, IsUnsizedArray);
MostDerivedIsArrayElement = IsArray;
MostDerivedIsAnUnsizedArray = IsUnsizedArray;
Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
MostDerivedType, IsArray);
MostDerivedIsArrayElement = IsArray;
}
}
}
@ -274,7 +263,7 @@ namespace {
/// known bound.
bool isMostDerivedAnUnsizedArray() const {
assert(!Invalid && "Calling this makes no sense on invalid designators");
return MostDerivedIsAnUnsizedArray;
return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
}
/// Determine what the most derived array's size is. Results in an assertion
@ -314,7 +303,6 @@ namespace {
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
MostDerivedIsArrayElement = true;
MostDerivedIsAnUnsizedArray = false;
MostDerivedArraySize = CAT->getSize().getZExtValue();
MostDerivedPathLength = Entries.size();
}
@ -327,7 +315,6 @@ namespace {
MostDerivedType = ElemTy;
MostDerivedIsArrayElement = true;
MostDerivedIsAnUnsizedArray = true;
// The value in MostDerivedArraySize is undefined in this case. So, set it
// to an arbitrary value that's likely to loudly break things if it's
// used.
@ -346,7 +333,6 @@ namespace {
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
MostDerivedType = FD->getType();
MostDerivedIsArrayElement = false;
MostDerivedIsAnUnsizedArray = false;
MostDerivedArraySize = 0;
MostDerivedPathLength = Entries.size();
}
@ -361,14 +347,53 @@ namespace {
// is unlikely to matter.
MostDerivedType = EltTy;
MostDerivedIsArrayElement = true;
MostDerivedIsAnUnsizedArray = false;
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
const APSInt &N);
/// Add N to the address of this subobject.
void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N);
void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) {
if (Invalid || !N) return;
uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue();
if (isMostDerivedAnUnsizedArray()) {
// Can't verify -- trust that the user is doing the right thing (or if
// not, trust that the caller will catch the bad behavior).
// FIXME: Should we reject if this overflows, at least?
Entries.back().ArrayIndex += TruncatedN;
return;
}
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
bool IsArray = MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement;
uint64_t ArrayIndex =
IsArray ? Entries.back().ArrayIndex : (uint64_t)IsOnePastTheEnd;
uint64_t ArraySize =
IsArray ? getMostDerivedArraySize() : (uint64_t)1;
if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
// Calculate the actual index in a wide enough type, so we can include
// it in the note.
N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65));
(llvm::APInt&)N += ArrayIndex;
assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
diagnosePointerArithmetic(Info, E, N);
setInvalid();
return;
}
ArrayIndex += TruncatedN;
assert(ArrayIndex <= ArraySize &&
"bounds check succeeded for out-of-bounds index");
if (IsArray)
Entries.back().ArrayIndex = ArrayIndex;
else
IsOnePastTheEnd = (ArrayIndex != 0);
}
};
/// A stack frame in the constexpr call stack.
@ -470,7 +495,7 @@ namespace {
// FIXME: Force the precision of the source value down so we don't
// print digits which are usually useless (we don't really care here if
// we truncate a digit by accident in edge cases). Ideally,
// APFloat::toString would automatically print the shortest
// APFloat::toString would automatically print the shortest
// representation which rounds to the correct value, but it's a bit
// tricky to implement.
unsigned precision =
@ -695,7 +720,7 @@ namespace {
private:
OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId,
unsigned ExtraNotes, bool IsCCEDiag) {
if (EvalStatus.Diag) {
// If we have a prior diagnostic, it will be noting that the expression
// isn't a constant expression. This diagnostic is more important,
@ -748,7 +773,7 @@ namespace {
unsigned ExtraNotes = 0) {
return Diag(Loc, DiagId, ExtraNotes, false);
}
OptionalDiagnostic FFDiag(const Expr *E, diag::kind DiagId
= diag::note_invalid_subexpr_in_const_expr,
unsigned ExtraNotes = 0) {
@ -1061,53 +1086,6 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
setInvalid();
}
void SubobjectDesignator::adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) {
if (Invalid || !N) return;
uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue();
if (isMostDerivedAnUnsizedArray()) {
// If we're dealing with an array without constant bound, the expression is
// not a constant expression.
if (!Info.checkingPotentialConstantExpression())
Info.CCEDiag(E, diag::note_constexpr_array_unknown_bound_arithmetic);
// Can't verify -- trust that the user is doing the right thing (or if
// not, trust that the caller will catch the bad behavior).
// FIXME: Should we reject if this overflows, at least?
Entries.back().ArrayIndex += TruncatedN;
return;
}
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
bool IsArray = MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement;
uint64_t ArrayIndex =
IsArray ? Entries.back().ArrayIndex : (uint64_t)IsOnePastTheEnd;
uint64_t ArraySize =
IsArray ? getMostDerivedArraySize() : (uint64_t)1;
if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
// Calculate the actual index in a wide enough type, so we can include
// it in the note.
N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65));
(llvm::APInt&)N += ArrayIndex;
assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
diagnosePointerArithmetic(Info, E, N);
setInvalid();
return;
}
ArrayIndex += TruncatedN;
assert(ArrayIndex <= ArraySize &&
"bounds check succeeded for out-of-bounds index");
if (IsArray)
Entries.back().ArrayIndex = ArrayIndex;
else
IsOnePastTheEnd = (ArrayIndex != 0);
}
CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments)
@ -1236,6 +1214,8 @@ namespace {
IsNullPtr);
else {
assert(!InvalidBase && "APValues can't handle invalid LValue bases");
assert(!Designator.FirstEntryIsAnUnsizedArray &&
"Unsized array with a valid base?");
V = APValue(Base, Offset, Designator.Entries,
Designator.IsOnePastTheEnd, CallIndex, IsNullPtr);
}
@ -1300,9 +1280,12 @@ namespace {
if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
Designator.addDeclUnchecked(D, Virtual);
}
void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
if (checkSubobject(Info, E, CSK_ArrayToPointer))
Designator.addUnsizedArrayUnchecked(ElemTy);
void addUnsizedArray(EvalInfo &Info, QualType ElemTy) {
assert(Designator.Entries.empty() && getType(Base)->isPointerType());
assert(isBaseAnAllocSizeCall(Base) &&
"Only alloc_size bases can have unsized arrays");
Designator.FirstEntryIsAnUnsizedArray = true;
Designator.addUnsizedArrayUnchecked(ElemTy);
}
void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
if (checkSubobject(Info, E, CSK_ArrayToPointer))
@ -3033,15 +3016,6 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal))
return CompleteObject();
// The complete object can be an array of unknown bound, in which case we
// have to find the most recent declaration and adjust the type accordingly.
if (Info.Ctx.getAsIncompleteArrayType(BaseType)) {
QualType MostRecentType =
cast<ValueDecl const>(D->getMostRecentDecl())->getType();
if (Info.Ctx.getAsConstantArrayType(MostRecentType))
BaseType = MostRecentType;
}
} else {
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
@ -4124,13 +4098,13 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
if (Info.getLangOpts().CPlusPlus11) {
const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
// If this function is not constexpr because it is an inherited
// non-constexpr constructor, diagnose that directly.
auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
if (CD && CD->isInheritingConstructor()) {
auto *Inherited = CD->getInheritedConstructor().getConstructor();
if (!Inherited->isConstexpr())
if (!Inherited->isConstexpr())
DiagDecl = CD = Inherited;
}
@ -4667,7 +4641,7 @@ public:
return false;
This = &ThisVal;
Args = Args.slice(1);
} else if (MD && MD->isLambdaStaticInvoker()) {
} else if (MD && MD->isLambdaStaticInvoker()) {
// Map the static invoker for the lambda back to the call operator.
// Conveniently, we don't have to slice out the 'this' argument (as is
// being done for the non-static case), since a static member function
@ -4702,7 +4676,7 @@ public:
FD = LambdaCallOp;
}
} else
return Error(E);
@ -5462,7 +5436,7 @@ static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
Result.setInvalid(E);
QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType();
Result.addUnsizedArray(Info, E, Pointee);
Result.addUnsizedArray(Info, Pointee);
return true;
}
@ -5541,7 +5515,7 @@ public:
// Update 'Result' to refer to the data member/field of the closure object
// that represents the '*this' capture.
if (!HandleLValueMember(Info, E, Result,
Info.CurrentCall->LambdaThisCaptureField))
Info.CurrentCall->LambdaThisCaptureField))
return false;
// If we captured '*this' by reference, replace the field with its referent.
if (Info.CurrentCall->LambdaThisCaptureField->getType()
@ -5682,18 +5656,12 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
Info, Result, SubExpr))
return false;
}
// The result is a pointer to the first element of the array.
if (const ConstantArrayType *CAT
= Info.Ctx.getAsConstantArrayType(SubExpr->getType()))
Result.addArray(Info, E, CAT);
// If the array hasn't been given a bound yet, add it as an unsized one.
else {
auto AT = Info.Ctx.getAsArrayType(SubExpr->getType());
assert(AT && "Array to pointer decay on non-array object?");
Result.addUnsizedArray(Info, E, AT->getElementType());
}
else
Result.Designator.setInvalid();
return true;
case CK_FunctionToPointerDecay:
@ -5761,7 +5729,7 @@ bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
Result.setInvalid(E);
QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType();
Result.addUnsizedArray(Info, E, PointeeTy);
Result.addUnsizedArray(Info, PointeeTy);
return true;
}
@ -6395,7 +6363,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
if (ClosureClass->isInvalidDecl()) return false;
if (Info.checkingPotentialConstantExpression()) return true;
const size_t NumFields =
std::distance(ClosureClass->field_begin(), ClosureClass->field_end());
@ -6414,7 +6382,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
assert(CaptureInitIt != E->capture_init_end());
// Get the initializer for this field
Expr *const CurFieldInit = *CaptureInitIt++;
// If there is no initializer, either this is a VLA or an error has
// occurred.
if (!CurFieldInit)
@ -6615,18 +6583,18 @@ VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// The number of initializers can be less than the number of
// vector elements. For OpenCL, this can be due to nested vector
// initialization. For GCC compatibility, missing trailing elements
// initialization. For GCC compatibility, missing trailing elements
// should be initialized with zeroes.
unsigned CountInits = 0, CountElts = 0;
while (CountElts < NumElements) {
// Handle nested vector initialization.
if (CountInits < NumInits
if (CountInits < NumInits
&& E->getInit(CountInits)->getType()->isVectorType()) {
APValue v;
if (!EvaluateVector(E->getInit(CountInits), v, Info))
return Error(E);
unsigned vlen = v.getVectorLength();
for (unsigned j = 0; j < vlen; j++)
for (unsigned j = 0; j < vlen; j++)
Elements.push_back(v.getVectorElt(j));
CountElts += vlen;
} else if (EltTy->isIntegerType()) {
@ -6902,7 +6870,7 @@ public:
}
bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
"Invalid evaluation result.");
@ -6916,7 +6884,7 @@ public:
}
bool Success(uint64_t Value, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
return true;
@ -6992,7 +6960,7 @@ public:
}
return Success(Info.ArrayInitIndex, E);
}
// Note, GNU defines __null as an integer, not a pointer.
bool VisitGNUNullExpr(const GNUNullExpr *E) {
return ZeroInitialization(E);
@ -7356,8 +7324,10 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
unsigned I = 0;
QualType BaseType = getType(Base);
// If this is an alloc_size base, we should ignore the initial array index
if (isBaseAnAllocSizeCall(Base)) {
if (LVal.Designator.FirstEntryIsAnUnsizedArray) {
assert(isBaseAnAllocSizeCall(Base) &&
"Unsized array in non-alloc_size call?");
// If this is an alloc_size base, we should ignore the initial array index
++I;
BaseType = BaseType->castAs<PointerType>()->getPointeeType();
}
@ -8144,12 +8114,12 @@ bool DataRecursiveIntBinOpEvaluator::
Result = RHSResult.Val;
return true;
}
if (E->isLogicalOp()) {
bool lhsResult, rhsResult;
bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
if (LHSIsOK) {
if (RHSIsOK) {
if (E->getOpcode() == BO_LOr)
@ -8165,26 +8135,26 @@ bool DataRecursiveIntBinOpEvaluator::
return Success(rhsResult, E, Result);
}
}
return false;
}
assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
E->getRHS()->getType()->isIntegralOrEnumerationType());
if (LHSResult.Failed || RHSResult.Failed)
return false;
const APValue &LHSVal = LHSResult.Val;
const APValue &RHSVal = RHSResult.Val;
// Handle cases like (unsigned long)&a + 4.
if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
Result = LHSVal;
addOrSubLValueAsInteger(Result, RHSVal.getInt(), E->getOpcode() == BO_Sub);
return true;
}
// Handle cases like 4 + (unsigned long)&a
if (E->getOpcode() == BO_Add &&
RHSVal.isLValue() && LHSVal.isInt()) {
@ -8192,7 +8162,7 @@ bool DataRecursiveIntBinOpEvaluator::
addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/false);
return true;
}
if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
// Handle (intptr_t)&&A - (intptr_t)&&B.
if (!LHSVal.getLValueOffset().isZero() ||
@ -8231,7 +8201,7 @@ bool DataRecursiveIntBinOpEvaluator::
void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
Job &job = Queue.back();
switch (job.Kind) {
case Job::AnyExprKind: {
if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
@ -8241,12 +8211,12 @@ void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
return;
}
}
EvaluateExpr(job.E, Result);
Queue.pop_back();
return;
}
case Job::BinOpKind: {
const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
bool SuppressRHSDiags = false;
@ -8261,7 +8231,7 @@ void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
enqueue(Bop->getRHS());
return;
}
case Job::BinOpVisitedLHSKind: {
const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
EvalResult RHS;
@ -8271,7 +8241,7 @@ void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
return;
}
}
llvm_unreachable("Invalid Job::Kind!");
}
@ -8783,7 +8753,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
if (!BaseRT)
return Error(OOE);
// Add the offset to the base.
Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
break;
@ -9978,7 +9948,7 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
IsConst = false;
return true;
}
// FIXME: Evaluating values of large array and record types can cause
// performance problems. Only do so in C++11 for now.
if (Exp->isRValue() && (Exp->getType()->isArrayType() ||
@ -10000,7 +9970,7 @@ bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
bool IsConst;
if (FastEvaluateAsRValue(this, Result, Ctx, IsConst, false))
return IsConst;
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
return ::EvaluateAsRValue(Info, this, Result.Val);
}

View File

@ -169,6 +169,11 @@ public:
Inherited::VisitValueDecl(D);
}
void VisitParmVarDecl(const ParmVarDecl *D) {
// TODO: Handle default arguments.
Inherited::VisitParmVarDecl(D);
}
void VisitAccessSpecDecl(const AccessSpecDecl *D) {
ID.AddInteger(D->getAccess());
Inherited::VisitAccessSpecDecl(D);
@ -202,6 +207,12 @@ public:
Hash.AddBoolean(D->isPure());
Hash.AddBoolean(D->isDeletedAsWritten());
ID.AddInteger(D->param_size());
for (auto *Param : D->parameters()) {
Hash.AddSubDecl(Param);
}
Inherited::VisitFunctionDecl(D);
}
@ -256,6 +267,11 @@ void ODRHash::AddSubDecl(const Decl *D) {
void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
assert(Record && Record->hasDefinition() &&
"Expected non-null record to be a definition.");
if (isa<ClassTemplateSpecializationDecl>(Record)) {
return;
}
AddDecl(Record);
// Filter out sub-Decls which will not be processed in order to get an
@ -315,6 +331,14 @@ public:
}
}
void AddQualType(QualType T) {
Hash.AddQualType(T);
}
void VisitQualifiers(Qualifiers Quals) {
ID.AddInteger(Quals.getAsOpaqueValue());
}
void Visit(const Type *T) {
ID.AddInteger(T->getTypeClass());
Inherited::Visit(T);
@ -322,11 +346,69 @@ public:
void VisitType(const Type *T) {}
void VisitAdjustedType(const AdjustedType *T) {
AddQualType(T->getOriginalType());
AddQualType(T->getAdjustedType());
VisitType(T);
}
void VisitDecayedType(const DecayedType *T) {
AddQualType(T->getDecayedType());
AddQualType(T->getPointeeType());
VisitAdjustedType(T);
}
void VisitArrayType(const ArrayType *T) {
AddQualType(T->getElementType());
ID.AddInteger(T->getSizeModifier());
VisitQualifiers(T->getIndexTypeQualifiers());
VisitType(T);
}
void VisitConstantArrayType(const ConstantArrayType *T) {
T->getSize().Profile(ID);
VisitArrayType(T);
}
void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
AddStmt(T->getSizeExpr());
VisitArrayType(T);
}
void VisitIncompleteArrayType(const IncompleteArrayType *T) {
VisitArrayType(T);
}
void VisitVariableArrayType(const VariableArrayType *T) {
AddStmt(T->getSizeExpr());
VisitArrayType(T);
}
void VisitBuiltinType(const BuiltinType *T) {
ID.AddInteger(T->getKind());
VisitType(T);
}
void VisitFunctionType(const FunctionType *T) {
AddQualType(T->getReturnType());
T->getExtInfo().Profile(ID);
Hash.AddBoolean(T->isConst());
Hash.AddBoolean(T->isVolatile());
Hash.AddBoolean(T->isRestrict());
VisitType(T);
}
void VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
VisitFunctionType(T);
}
void VisitFunctionProtoType(const FunctionProtoType *T) {
ID.AddInteger(T->getNumParams());
for (auto ParamType : T->getParamTypes())
AddQualType(ParamType);
VisitFunctionType(T);
}
void VisitTypedefType(const TypedefType *T) {
AddDecl(T->getDecl());
Hash.AddQualType(T->getDecl()->getUnderlyingType());

View File

@ -67,18 +67,12 @@ DiagnosticsEngine::DiagnosticsEngine(IntrusiveRefCntPtr<DiagnosticIDs> diags,
ArgToStringCookie = nullptr;
AllExtensionsSilenced = 0;
IgnoreAllWarnings = false;
WarningsAsErrors = false;
EnableAllWarnings = false;
ErrorsAsFatal = false;
FatalsAsError = false;
SuppressSystemWarnings = false;
SuppressAfterFatalError = true;
SuppressAllDiagnostics = false;
ElideType = true;
PrintTemplateTree = false;
ShowColors = false;
ShowOverloads = Ovl_All;
ExtBehavior = diag::Severity::Ignored;
ErrorLimit = 0;
TemplateBacktraceLimit = 0;
@ -343,8 +337,8 @@ bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
return setSeverityForGroup(diag::Flavor::WarningOrError, Group,
diag::Severity::Fatal);
// Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
// potentially downgrade anything already mapped to be an error.
// Otherwise, we want to set the diagnostic mapping's "no Wfatal-errors" bit,
// and potentially downgrade anything already mapped to be a fatal error.
// Get the diagnostics in this group.
SmallVector<diag::kind, 8> GroupDiags;

View File

@ -420,7 +420,7 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
Result = Mapping.getSeverity();
// Upgrade ignored diagnostics if -Weverything is enabled.
if (Diag.EnableAllWarnings && Result == diag::Severity::Ignored &&
if (State->EnableAllWarnings && Result == diag::Severity::Ignored &&
!Mapping.isUser() && getBuiltinDiagClass(DiagID) != CLASS_REMARK)
Result = diag::Severity::Warning;
@ -435,7 +435,7 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
// For extension diagnostics that haven't been explicitly mapped, check if we
// should upgrade the diagnostic.
if (IsExtensionDiag && !Mapping.isUser())
Result = std::max(Result, Diag.ExtBehavior);
Result = std::max(Result, State->ExtBehavior);
// At this point, ignored errors can no longer be upgraded.
if (Result == diag::Severity::Ignored)
@ -443,28 +443,24 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
// Honor -w, which is lower in priority than pedantic-errors, but higher than
// -Werror.
if (Result == diag::Severity::Warning && Diag.IgnoreAllWarnings)
// FIXME: Under GCC, this also suppresses warnings that have been mapped to
// errors by -W flags and #pragma diagnostic.
if (Result == diag::Severity::Warning && State->IgnoreAllWarnings)
return diag::Severity::Ignored;
// If -Werror is enabled, map warnings to errors unless explicitly disabled.
if (Result == diag::Severity::Warning) {
if (Diag.WarningsAsErrors && !Mapping.hasNoWarningAsError())
if (State->WarningsAsErrors && !Mapping.hasNoWarningAsError())
Result = diag::Severity::Error;
}
// If -Wfatal-errors is enabled, map errors to fatal unless explicity
// disabled.
if (Result == diag::Severity::Error) {
if (Diag.ErrorsAsFatal && !Mapping.hasNoErrorAsFatal())
if (State->ErrorsAsFatal && !Mapping.hasNoErrorAsFatal())
Result = diag::Severity::Fatal;
}
// If explicitly requested, map fatal errors to errors.
if (Result == diag::Severity::Fatal) {
if (Diag.FatalsAsError)
Result = diag::Severity::Error;
}
// Custom diagnostics always are emitted in system headers.
bool ShowInSystemHeader =
!GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemHeader;
@ -472,7 +468,7 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
// If we are in a system header, we ignore it. We look at the diagnostic class
// because we also want to ignore extensions and warnings in -Werror and
// -pedantic-errors modes, which *map* warnings/extensions to errors.
if (Diag.SuppressSystemWarnings && !ShowInSystemHeader && Loc.isValid() &&
if (State->SuppressSystemWarnings && !ShowInSystemHeader && Loc.isValid() &&
Diag.getSourceManager().isInSystemHeader(
Diag.getSourceManager().getExpansionLoc(Loc)))
return diag::Severity::Ignored;
@ -632,7 +628,7 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
// If a fatal error has already been emitted, silence all subsequent
// diagnostics.
if (Diag.FatalErrorOccurred) {
if (Diag.FatalErrorOccurred && Diag.SuppressAfterFatalError) {
if (DiagLevel >= DiagnosticIDs::Error &&
Diag.Client->IncludeInDiagnosticCounts()) {
++Diag.NumErrors;

View File

@ -1756,9 +1756,7 @@ void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
ConstructDefaultFnAttrList(F.getName(),
F.hasFnAttribute(llvm::Attribute::OptimizeNone),
/* AttrOnCallsite = */ false, FuncAttrs);
llvm::AttributeList AS = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
F.addAttributes(llvm::AttributeList::FunctionIndex, AS);
F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
}
void CodeGenModule::ConstructAttributeList(

View File

@ -51,6 +51,64 @@ struct BinOpInfo {
BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
FPOptions FPFeatures;
const Expr *E; // Entire expr, for error unsupported. May not be binop.
/// Check if the binop can result in integer overflow.
bool mayHaveIntegerOverflow() const {
// Without constant input, we can't rule out overflow.
const auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
const auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
if (!LHSCI || !RHSCI)
return true;
// Assume overflow is possible, unless we can prove otherwise.
bool Overflow = true;
const auto &LHSAP = LHSCI->getValue();
const auto &RHSAP = RHSCI->getValue();
if (Opcode == BO_Add) {
if (Ty->hasSignedIntegerRepresentation())
(void)LHSAP.sadd_ov(RHSAP, Overflow);
else
(void)LHSAP.uadd_ov(RHSAP, Overflow);
} else if (Opcode == BO_Sub) {
if (Ty->hasSignedIntegerRepresentation())
(void)LHSAP.ssub_ov(RHSAP, Overflow);
else
(void)LHSAP.usub_ov(RHSAP, Overflow);
} else if (Opcode == BO_Mul) {
if (Ty->hasSignedIntegerRepresentation())
(void)LHSAP.smul_ov(RHSAP, Overflow);
else
(void)LHSAP.umul_ov(RHSAP, Overflow);
} else if (Opcode == BO_Div || Opcode == BO_Rem) {
if (Ty->hasSignedIntegerRepresentation() && !RHSCI->isZero())
(void)LHSAP.sdiv_ov(RHSAP, Overflow);
else
return false;
}
return Overflow;
}
/// Check if the binop computes a division or a remainder.
bool isDivisionLikeOperation() const {
return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
Opcode == BO_RemAssign;
}
/// Check if the binop can result in an integer division by zero.
bool mayHaveIntegerDivisionByZero() const {
if (isDivisionLikeOperation())
if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
return CI->isZero();
return true;
}
/// Check if the binop can result in a float division by zero.
bool mayHaveFloatDivisionByZero() const {
if (isDivisionLikeOperation())
if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
return CFP->isZero();
return true;
}
};
static bool MustVisitNullValue(const Expr *E) {
@ -85,9 +143,17 @@ static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
"Expected a unary or binary operator");
// If the binop has constant inputs and we can prove there is no overflow,
// we can elide the overflow check.
if (!Op.mayHaveIntegerOverflow())
return true;
// If a unary op has a widened operand, the op cannot overflow.
if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
return IsWidenedIntegerOp(Ctx, UO->getSubExpr());
// We usually don't need overflow checks for binops with widened operands.
// Multiplication with promoted unsigned operands is a special case.
const auto *BO = cast<BinaryOperator>(Op.E);
auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
if (!OptionalLHSTy)
@ -100,14 +166,14 @@ static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
QualType LHSTy = *OptionalLHSTy;
QualType RHSTy = *OptionalRHSTy;
// We usually don't need overflow checks for binary operations with widened
// operands. Multiplication with promoted unsigned operands is a special case.
// This is the simple case: binops without unsigned multiplication, and with
// widened operands. No overflow check is needed here.
if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
!LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
return true;
// The overflow check can be skipped if either one of the unpromoted types
// are less than half the size of the promoted type.
// For unsigned multiplication the overflow check can be elided if either one
// of the unpromoted types are less than half the size of the promoted type.
unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
(2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
@ -2377,7 +2443,8 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
const auto *BO = cast<BinaryOperator>(Ops.E);
if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
Ops.Ty->hasSignedIntegerRepresentation() &&
!IsWidenedIntegerOp(CGF.getContext(), BO->getLHS())) {
!IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
Ops.mayHaveIntegerOverflow()) {
llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
llvm::Value *IntMin =
@ -2400,11 +2467,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
Ops.Ty->isIntegerType()) {
Ops.Ty->isIntegerType() &&
(Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
} else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
Ops.Ty->isRealFloatingType()) {
Ops.Ty->isRealFloatingType() &&
Ops.mayHaveFloatDivisionByZero()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
@ -2439,7 +2508,8 @@ Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
Ops.Ty->isIntegerType()) {
Ops.Ty->isIntegerType() &&
(Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);

View File

@ -663,7 +663,7 @@ class CGObjCGNUstep : public CGObjCGNU {
}
// The lookup function is guaranteed not to capture the receiver pointer.
LookupFn->setDoesNotCapture(1);
LookupFn->addParamAttr(0, llvm::Attribute::NoCapture);
llvm::Value *args[] = {
EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),

View File

@ -892,10 +892,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
B.addAttribute(llvm::Attribute::NoInline);
F->addAttributes(
llvm::AttributeList::FunctionIndex,
llvm::AttributeList::get(F->getContext(),
llvm::AttributeList::FunctionIndex, B));
F->addAttributes(llvm::AttributeList::FunctionIndex, B);
return;
}
@ -961,9 +958,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
B.addAttribute(llvm::Attribute::MinSize);
}
F->addAttributes(llvm::AttributeList::FunctionIndex,
llvm::AttributeList::get(
F->getContext(), llvm::AttributeList::FunctionIndex, B));
F->addAttributes(llvm::AttributeList::FunctionIndex, B);
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
@ -2029,9 +2024,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
if (ExtraAttrs.hasAttributes(llvm::AttributeList::FunctionIndex)) {
llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
F->addAttributes(llvm::AttributeList::FunctionIndex,
llvm::AttributeList::get(
VMContext, llvm::AttributeList::FunctionIndex, B));
F->addAttributes(llvm::AttributeList::FunctionIndex, B);
}
if (!DontDefer) {

View File

@ -1901,10 +1901,7 @@ void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
// Now add the 'alignstack' attribute with a value of 16.
llvm::AttrBuilder B;
B.addStackAlignmentAttr(16);
Fn->addAttributes(
llvm::AttributeList::FunctionIndex,
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex, B));
Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
}
if (FD->hasAttr<AnyX86InterruptAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
@ -5449,10 +5446,7 @@ public:
// the backend to perform a realignment as part of the function prologue.
llvm::AttrBuilder B;
B.addStackAlignmentAttr(8);
Fn->addAttributes(
llvm::AttributeList::FunctionIndex,
llvm::AttributeList::get(CGM.getLLVMContext(),
llvm::AttributeList::FunctionIndex, B));
Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
}
};

View File

@ -511,7 +511,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< "-fsanitize-coverage=edge";
// Basic block tracing and 8-bit counters require some type of coverage
// enabled.
int CoverageTypes = CoverageFunc | CoverageBB | CoverageEdge;
if (CoverageFeatures & CoverageTraceBB)
D.Diag(clang::diag::warn_drv_deprecated_arg)
<< "-fsanitize-coverage=trace-bb"
@ -520,9 +519,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
D.Diag(clang::diag::warn_drv_deprecated_arg)
<< "-fsanitize-coverage=8bit-counters"
<< "-fsanitize-coverage=trace-pc-guard";
int InsertionPointTypes = CoverageFunc | CoverageBB | CoverageEdge;
if ((CoverageFeatures & InsertionPointTypes) &&
!(CoverageFeatures &(CoverageTracePC | CoverageTracePCGuard))) {
D.Diag(clang::diag::warn_drv_deprecated_arg)
<< "-fsanitize-coverage=[func|bb|edge]"
<< "-fsanitize-coverage=[func|bb|edge],[trace-pc-guard|trace-pc]";
}
// trace-pc w/o func/bb/edge implies edge.
if ((CoverageFeatures & (CoverageTracePC | CoverageTracePCGuard)) &&
!(CoverageFeatures & CoverageTypes))
!(CoverageFeatures & InsertionPointTypes))
CoverageFeatures |= CoverageEdge;
if (AllAddedKinds & Address) {

View File

@ -1677,6 +1677,30 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
ConsumeToken();
ConsumeToken();
Data.MapType =
IsMapClauseModifierToken(Tok)
? static_cast<OpenMPMapClauseKind>(
getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)))
: OMPC_MAP_unknown;
if (Data.MapType == OMPC_MAP_unknown ||
Data.MapType == OMPC_MAP_always)
Diag(Tok, diag::err_omp_unknown_map_type);
ConsumeToken();
} else {
Data.MapType = OMPC_MAP_tofrom;
Data.IsMapTypeImplicit = true;
}
} else if (IsMapClauseModifierToken(PP.LookAhead(0))) {
if (PP.LookAhead(1).is(tok::colon)) {
Data.MapTypeModifier = Data.MapType;
if (Data.MapTypeModifier != OMPC_MAP_always) {
Diag(Tok, diag::err_omp_unknown_map_type_modifier);
Data.MapTypeModifier = OMPC_MAP_unknown;
} else
MapTypeModifierSpecified = true;
ConsumeToken();
Data.MapType =
IsMapClauseModifierToken(Tok)
? static_cast<OpenMPMapClauseKind>(

View File

@ -759,7 +759,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
if (SemaBuiltinVAStart(TheCall))
if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
case Builtin::BI__va_start: {
@ -770,7 +770,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
default:
if (SemaBuiltinVAStart(TheCall))
if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
}
@ -2090,7 +2090,7 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return SemaBuiltinCpuSupports(*this, TheCall);
if (BuiltinID == X86::BI__builtin_ms_va_start)
return SemaBuiltinMSVAStart(TheCall);
return SemaBuiltinVAStart(BuiltinID, TheCall);
// If the intrinsic has rounding or SAE make sure its valid.
if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
@ -3611,11 +3611,81 @@ ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
return Result;
}
/// Check that the user is calling the appropriate va_start builtin for the
/// target and calling convention.
static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
bool IsWindows = TT.isOSWindows();
bool IsMSVAStart = BuiltinID == X86::BI__builtin_ms_va_start;
if (IsX64) {
clang::CallingConv CC = CC_C;
if (const FunctionDecl *FD = S.getCurFunctionDecl())
CC = FD->getType()->getAs<FunctionType>()->getCallConv();
if (IsMSVAStart) {
// Don't allow this in System V ABI functions.
if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_X86_64Win64))
return S.Diag(Fn->getLocStart(),
diag::err_ms_va_start_used_in_sysv_function);
} else {
// On x86-64 Unix, don't allow this in Win64 ABI functions.
// On x64 Windows, don't allow this in System V ABI functions.
// (Yes, that means there's no corresponding way to support variadic
// System V ABI functions on Windows.)
if ((IsWindows && CC == CC_X86_64SysV) ||
(!IsWindows && CC == CC_X86_64Win64))
return S.Diag(Fn->getLocStart(),
diag::err_va_start_used_in_wrong_abi_function)
<< !IsWindows;
}
return false;
}
if (IsMSVAStart)
return S.Diag(Fn->getLocStart(), diag::err_x86_builtin_64_only);
return false;
}
static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
ParmVarDecl **LastParam = nullptr) {
// Determine whether the current function, block, or obj-c method is variadic
// and get its parameter list.
bool IsVariadic = false;
ArrayRef<ParmVarDecl *> Params;
if (BlockScopeInfo *CurBlock = S.getCurBlock()) {
IsVariadic = CurBlock->TheDecl->isVariadic();
Params = CurBlock->TheDecl->parameters();
} else if (FunctionDecl *FD = S.getCurFunctionDecl()) {
IsVariadic = FD->isVariadic();
Params = FD->parameters();
} else if (ObjCMethodDecl *MD = S.getCurMethodDecl()) {
IsVariadic = MD->isVariadic();
// FIXME: This isn't correct for methods (results in bogus warning).
Params = MD->parameters();
} else {
llvm_unreachable("unknown va_start context");
}
if (!IsVariadic) {
S.Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
return true;
}
if (LastParam)
*LastParam = Params.empty() ? nullptr : Params.back();
return false;
}
/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
/// for validity. Emit an error and return true on failure; return false
/// on success.
bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
Expr *Fn = TheCall->getCallee();
if (checkVAStartABI(*this, BuiltinID, Fn))
return true;
if (TheCall->getNumArgs() > 2) {
Diag(TheCall->getArg(2)->getLocStart(),
diag::err_typecheck_call_too_many_args)
@ -3636,20 +3706,10 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
if (checkBuiltinArgument(*this, TheCall, 0))
return true;
// Determine whether the current function is variadic or not.
BlockScopeInfo *CurBlock = getCurBlock();
bool isVariadic;
if (CurBlock)
isVariadic = CurBlock->TheDecl->isVariadic();
else if (FunctionDecl *FD = getCurFunctionDecl())
isVariadic = FD->isVariadic();
else
isVariadic = getCurMethodDecl()->isVariadic();
if (!isVariadic) {
Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
// Check that the current function is variadic, and get its last parameter.
ParmVarDecl *LastParam;
if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
return true;
}
// Verify that the second argument to the builtin is the last argument of the
// current function or method.
@ -3664,16 +3724,7 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
// FIXME: This isn't correct for methods (results in bogus warning).
// Get the last formal in the current function.
const ParmVarDecl *LastArg;
if (CurBlock)
LastArg = CurBlock->TheDecl->parameters().back();
else if (FunctionDecl *FD = getCurFunctionDecl())
LastArg = FD->parameters().back();
else
LastArg = getCurMethodDecl()->parameters().back();
SecondArgIsLastNamedArgument = PV == LastArg;
SecondArgIsLastNamedArgument = PV == LastParam;
Type = PV->getType();
ParamLoc = PV->getLocation();
@ -3708,48 +3759,6 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
return false;
}
/// Check the arguments to '__builtin_va_start' for validity, and that
/// it was called from a function of the native ABI.
/// Emit an error and return true on failure; return false on success.
bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
// On x86-64 Unix, don't allow this in Win64 ABI functions.
// On x64 Windows, don't allow this in System V ABI functions.
// (Yes, that means there's no corresponding way to support variadic
// System V ABI functions on Windows.)
if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64) {
unsigned OS = Context.getTargetInfo().getTriple().getOS();
clang::CallingConv CC = CC_C;
if (const FunctionDecl *FD = getCurFunctionDecl())
CC = FD->getType()->getAs<FunctionType>()->getCallConv();
if ((OS == llvm::Triple::Win32 && CC == CC_X86_64SysV) ||
(OS != llvm::Triple::Win32 && CC == CC_X86_64Win64))
return Diag(TheCall->getCallee()->getLocStart(),
diag::err_va_start_used_in_wrong_abi_function)
<< (OS != llvm::Triple::Win32);
}
return SemaBuiltinVAStartImpl(TheCall);
}
/// Check the arguments to '__builtin_ms_va_start' for validity, and that
/// it was called from a Win64 ABI function.
/// Emit an error and return true on failure; return false on success.
bool Sema::SemaBuiltinMSVAStart(CallExpr *TheCall) {
// This only makes sense for x86-64.
const llvm::Triple &TT = Context.getTargetInfo().getTriple();
Expr *Callee = TheCall->getCallee();
if (TT.getArch() != llvm::Triple::x86_64)
return Diag(Callee->getLocStart(), diag::err_x86_builtin_32_bit_tgt);
// Don't allow this in System V ABI functions.
clang::CallingConv CC = CC_C;
if (const FunctionDecl *FD = getCurFunctionDecl())
CC = FD->getType()->getAs<FunctionType>()->getCallConv();
if (CC == CC_X86_64SysV ||
(TT.getOS() != llvm::Triple::Win32 && CC != CC_X86_64Win64))
return Diag(Callee->getLocStart(),
diag::err_ms_va_start_used_in_sysv_function);
return SemaBuiltinVAStartImpl(TheCall);
}
bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) {
// void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
// const char *named_addr);
@ -3761,26 +3770,14 @@ bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) {
diag::err_typecheck_call_too_few_args_at_least)
<< 0 /*function call*/ << 3 << Call->getNumArgs();
// Determine whether the current function is variadic or not.
bool IsVariadic;
if (BlockScopeInfo *CurBlock = getCurBlock())
IsVariadic = CurBlock->TheDecl->isVariadic();
else if (FunctionDecl *FD = getCurFunctionDecl())
IsVariadic = FD->isVariadic();
else if (ObjCMethodDecl *MD = getCurMethodDecl())
IsVariadic = MD->isVariadic();
else
llvm_unreachable("unexpected statement type");
if (!IsVariadic) {
Diag(Func->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
return true;
}
// Type-check the first argument normally.
if (checkBuiltinArgument(*this, Call, 0))
return true;
// Check that the current function is variadic.
if (checkVAStartIsInVariadicFunction(*this, Func))
return true;
const struct {
unsigned ArgNo;
QualType Type;

View File

@ -2951,7 +2951,8 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// Merge ns_returns_retained attribute.
if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) {
if (NewTypeInfo.getProducesResult()) {
Diag(New->getLocation(), diag::err_returns_retained_mismatch);
Diag(New->getLocation(), diag::err_function_attribute_mismatch)
<< "'ns_returns_retained'";
Diag(OldLocation, diag::note_previous_declaration);
return true;
}

View File

@ -15372,7 +15372,7 @@ static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
}
/// Check for operands with placeholder types and complain if found.
/// Returns true if there was an error and no recovery was possible.
/// Returns ExprError() if there was an error and no recovery was possible.
ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
if (!getLangOpts().CPlusPlus) {
// C cannot handle TypoExpr nodes on either side of a binop because it

View File

@ -901,17 +901,36 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
// capturing lamdbda's call operator.
//
// The issue is that we cannot rely entirely on the FunctionScopeInfo stack
// since ScopeInfos are pushed on during parsing and treetransforming. But
// since a generic lambda's call operator can be instantiated anywhere (even
// end of the TU) we need to be able to examine its enclosing lambdas and so
// we use the DeclContext to get a hold of the closure-class and query it for
// capture information. The reason we don't just resort to always using the
// DeclContext chain is that it is only mature for lambda expressions
// enclosing generic lambda's call operators that are being instantiated.
// Since the FunctionScopeInfo stack is representative of the lexical
// nesting of the lambda expressions during initial parsing (and is the best
// place for querying information about captures about lambdas that are
// partially processed) and perhaps during instantiation of function templates
// that contain lambda expressions that need to be transformed BUT not
// necessarily during instantiation of a nested generic lambda's function call
// operator (which might even be instantiated at the end of the TU) - at which
// time the DeclContext tree is mature enough to query capture information
// reliably - we use a two pronged approach to walk through all the lexically
// enclosing lambda expressions:
//
// 1) Climb down the FunctionScopeInfo stack as long as each item represents
// a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
// enclosed by the call-operator of the LSI below it on the stack (while
// tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
// the stack represents the innermost lambda.
//
// 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
// represents a lambda's call operator. If it does, we must be instantiating
// a generic lambda's call operator (represented by the Current LSI, and
// should be the only scenario where an inconsistency between the LSI and the
// DeclContext should occur), so climb out the DeclContexts if they
// represent lambdas, while querying the corresponding closure types
// regarding capture information.
// 1) Climb down the function scope info stack.
for (int I = FunctionScopes.size();
I-- && isa<LambdaScopeInfo>(FunctionScopes[I]);
I-- && isa<LambdaScopeInfo>(FunctionScopes[I]) &&
(!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
cast<LambdaScopeInfo>(FunctionScopes[I])->CallOperator);
CurDC = getLambdaAwareParentOfDeclContext(CurDC)) {
CurLSI = cast<LambdaScopeInfo>(FunctionScopes[I]);
@ -927,11 +946,17 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
return ASTCtx.getPointerType(ClassType);
}
}
// We've run out of ScopeInfos but check if CurDC is a lambda (which can
// happen during instantiation of generic lambdas)
// 2) We've run out of ScopeInfos but check if CurDC is a lambda (which can
// happen during instantiation of its nested generic lambda call operator)
if (isLambdaCallOperator(CurDC)) {
assert(CurLSI);
assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator));
assert(CurLSI && "While computing 'this' capture-type for a generic "
"lambda, we must have a corresponding LambdaScopeInfo");
assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
"While computing 'this' capture-type for a generic lambda, when we "
"run out of enclosing LSI's, yet the enclosing DC is a "
"lambda-call-operator we must be (i.e. Current LSI) in a generic "
"lambda call oeprator");
assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
auto IsThisCaptured =

View File

@ -5531,14 +5531,8 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
"Invalid data, not enough diag/map pairs");
while (Size--) {
unsigned DiagID = Record[Idx++];
unsigned SeverityAndUpgradedFromWarning = Record[Idx++];
bool WasUpgradedFromWarning =
DiagnosticMapping::deserializeUpgradedFromWarning(
SeverityAndUpgradedFromWarning);
DiagnosticMapping NewMapping =
Diag.makeUserMapping(DiagnosticMapping::deserializeSeverity(
SeverityAndUpgradedFromWarning),
Loc);
DiagnosticMapping::deserialize(Record[Idx++]);
if (!NewMapping.isPragma() && !IncludeNonPragmaStates)
continue;
@ -5547,14 +5541,12 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
// If this mapping was specified as a warning but the severity was
// upgraded due to diagnostic settings, simulate the current diagnostic
// settings (and use a warning).
if (WasUpgradedFromWarning && !Mapping.isErrorOrFatal()) {
Mapping = Diag.makeUserMapping(diag::Severity::Warning, Loc);
continue;
if (NewMapping.wasUpgradedFromWarning() && !Mapping.isErrorOrFatal()) {
NewMapping.setSeverity(diag::Severity::Warning);
NewMapping.setUpgradedFromWarning(false);
}
// Use the deserialized mapping verbatim.
Mapping = NewMapping;
Mapping.setUpgradedFromWarning(WasUpgradedFromWarning);
}
return NewState;
};
@ -5569,22 +5561,36 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
DiagStates.push_back(FirstState);
// Skip the initial diagnostic state from the serialized module.
assert(Record[0] == 0 &&
assert(Record[1] == 0 &&
"Invalid data, unexpected backref in initial state");
Idx = 2 + Record[1] * 2;
Idx = 3 + Record[2] * 2;
assert(Idx < Record.size() &&
"Invalid data, not enough state change pairs in initial state");
} else {
FirstState = ReadDiagState(
F.isModule() ? DiagState() : *Diag.DiagStatesByLoc.CurDiagState,
SourceLocation(), F.isModule());
} else if (F.isModule()) {
// For an explicit module, preserve the flags from the module build
// command line (-w, -Weverything, -Werror, ...) along with any explicit
// -Wblah flags.
unsigned Flags = Record[Idx++];
DiagState Initial;
Initial.SuppressSystemWarnings = Flags & 1; Flags >>= 1;
Initial.ErrorsAsFatal = Flags & 1; Flags >>= 1;
Initial.WarningsAsErrors = Flags & 1; Flags >>= 1;
Initial.EnableAllWarnings = Flags & 1; Flags >>= 1;
Initial.IgnoreAllWarnings = Flags & 1; Flags >>= 1;
Initial.ExtBehavior = (diag::Severity)Flags;
FirstState = ReadDiagState(Initial, SourceLocation(), true);
// For an explicit module, set up the root buffer of the module to start
// with the initial diagnostic state of the module itself, to cover files
// that contain no explicit transitions.
if (F.isModule())
Diag.DiagStatesByLoc.Files[F.OriginalSourceFileID]
.StateTransitions.push_back({FirstState, 0});
// Set up the root buffer of the module to start with the initial
// diagnostic state of the module itself, to cover files that contain no
// explicit transitions (for which we did not serialize anything).
Diag.DiagStatesByLoc.Files[F.OriginalSourceFileID]
.StateTransitions.push_back({FirstState, 0});
} else {
// For prefix ASTs, start with whatever the user configured on the
// command line.
Idx++; // Skip flags.
FirstState = ReadDiagState(*Diag.DiagStatesByLoc.CurDiagState,
SourceLocation(), false);
}
// Read the state transitions.
@ -9316,6 +9322,9 @@ void ASTReader::diagnoseOdrViolations() {
MethodVolatile,
MethodConst,
MethodInline,
MethodNumberParameters,
MethodParameterType,
MethodParameterName,
};
// These lambdas have the common portions of the ODR diagnostics. This
@ -9346,6 +9355,12 @@ void ASTReader::diagnoseOdrViolations() {
return Hash.CalculateHash();
};
auto ComputeQualTypeODRHash = [&Hash](QualType Ty) {
Hash.clear();
Hash.AddQualType(Ty);
return Hash.CalculateHash();
};
switch (FirstDiffType) {
case Other:
case EndOfClass:
@ -9640,6 +9655,76 @@ void ASTReader::diagnoseOdrViolations() {
break;
}
const unsigned FirstNumParameters = FirstMethod->param_size();
const unsigned SecondNumParameters = SecondMethod->param_size();
if (FirstNumParameters != SecondNumParameters) {
ODRDiagError(FirstMethod->getLocation(),
FirstMethod->getSourceRange(), MethodNumberParameters)
<< FirstName << FirstNumParameters;
ODRDiagNote(SecondMethod->getLocation(),
SecondMethod->getSourceRange(), MethodNumberParameters)
<< SecondName << SecondNumParameters;
Diagnosed = true;
break;
}
// Need this status boolean to know when break out of the switch.
bool ParameterMismatch = false;
for (unsigned I = 0; I < FirstNumParameters; ++I) {
const ParmVarDecl *FirstParam = FirstMethod->getParamDecl(I);
const ParmVarDecl *SecondParam = SecondMethod->getParamDecl(I);
QualType FirstParamType = FirstParam->getType();
QualType SecondParamType = SecondParam->getType();
if (FirstParamType != SecondParamType &&
ComputeQualTypeODRHash(FirstParamType) !=
ComputeQualTypeODRHash(SecondParamType)) {
if (const DecayedType *ParamDecayedType =
FirstParamType->getAs<DecayedType>()) {
ODRDiagError(FirstMethod->getLocation(),
FirstMethod->getSourceRange(), MethodParameterType)
<< FirstName << (I + 1) << FirstParamType << true
<< ParamDecayedType->getOriginalType();
} else {
ODRDiagError(FirstMethod->getLocation(),
FirstMethod->getSourceRange(), MethodParameterType)
<< FirstName << (I + 1) << FirstParamType << false;
}
if (const DecayedType *ParamDecayedType =
SecondParamType->getAs<DecayedType>()) {
ODRDiagNote(SecondMethod->getLocation(),
SecondMethod->getSourceRange(), MethodParameterType)
<< SecondName << (I + 1) << SecondParamType << true
<< ParamDecayedType->getOriginalType();
} else {
ODRDiagNote(SecondMethod->getLocation(),
SecondMethod->getSourceRange(), MethodParameterType)
<< SecondName << (I + 1) << SecondParamType << false;
}
ParameterMismatch = true;
break;
}
DeclarationName FirstParamName = FirstParam->getDeclName();
DeclarationName SecondParamName = SecondParam->getDeclName();
if (FirstParamName != SecondParamName) {
ODRDiagError(FirstMethod->getLocation(),
FirstMethod->getSourceRange(), MethodParameterName)
<< FirstName << (I + 1) << FirstParamName;
ODRDiagNote(SecondMethod->getLocation(),
SecondMethod->getSourceRange(), MethodParameterName)
<< SecondName << (I + 1) << SecondParamName;
ParameterMismatch = true;
break;
}
}
if (ParameterMismatch) {
Diagnosed = true;
break;
}
break;
}
}

View File

@ -2868,8 +2868,27 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
unsigned CurrID = 0;
RecordData Record;
auto EncodeDiagStateFlags =
[](const DiagnosticsEngine::DiagState *DS) -> unsigned {
unsigned Result = (unsigned)DS->ExtBehavior;
for (unsigned Val :
{(unsigned)DS->IgnoreAllWarnings, (unsigned)DS->EnableAllWarnings,
(unsigned)DS->WarningsAsErrors, (unsigned)DS->ErrorsAsFatal,
(unsigned)DS->SuppressSystemWarnings})
Result = (Result << 1) | Val;
return Result;
};
unsigned Flags = EncodeDiagStateFlags(Diag.DiagStatesByLoc.FirstDiagState);
Record.push_back(Flags);
auto AddDiagState = [&](const DiagnosticsEngine::DiagState *State,
bool IncludeNonPragmaStates) {
// Ensure that the diagnostic state wasn't modified since it was created.
// We will not correctly round-trip this information otherwise.
assert(Flags == EncodeDiagStateFlags(State) &&
"diag state flags vary in single AST file");
unsigned &DiagStateID = DiagStateIDMap[State];
Record.push_back(DiagStateID);
@ -2882,7 +2901,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
for (const auto &I : *State) {
if (I.second.isPragma() || IncludeNonPragmaStates) {
Record.push_back(I.first);
Record.push_back(I.second.serializeBits());
Record.push_back(I.second.serialize());
}
}
// Update the placeholder.

View File

@ -19,6 +19,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@ -1753,8 +1754,8 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_BadFree[*CheckKind])
BT_BadFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Bad free", "Memory Error"));
BT_BadFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Bad free", categories::MemoryError));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
@ -1798,8 +1799,8 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_FreeAlloca[*CheckKind])
BT_FreeAlloca[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Free alloca()", "Memory Error"));
BT_FreeAlloca[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Free alloca()", categories::MemoryError));
auto R = llvm::make_unique<BugReport>(
*BT_FreeAlloca[*CheckKind],
@ -1824,7 +1825,7 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
if (!BT_MismatchedDealloc)
BT_MismatchedDealloc.reset(
new BugType(CheckNames[CK_MismatchedDeallocatorChecker],
"Bad deallocator", "Memory Error"));
"Bad deallocator", categories::MemoryError));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
@ -1884,8 +1885,8 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
return;
if (!BT_OffsetFree[*CheckKind])
BT_OffsetFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Offset free", "Memory Error"));
BT_OffsetFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Offset free", categories::MemoryError));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
@ -1936,7 +1937,7 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_UseFree[*CheckKind])
BT_UseFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use-after-free", "Memory Error"));
CheckNames[*CheckKind], "Use-after-free", categories::MemoryError));
auto R = llvm::make_unique<BugReport>(*BT_UseFree[*CheckKind],
"Use of memory after it is freed", N);
@ -1962,8 +1963,8 @@ void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_DoubleFree[*CheckKind])
BT_DoubleFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Double free", "Memory Error"));
BT_DoubleFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Double free", categories::MemoryError));
auto R = llvm::make_unique<BugReport>(
*BT_DoubleFree[*CheckKind],
@ -1991,7 +1992,8 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_DoubleDelete)
BT_DoubleDelete.reset(new BugType(CheckNames[CK_NewDeleteChecker],
"Double delete", "Memory Error"));
"Double delete",
categories::MemoryError));
auto R = llvm::make_unique<BugReport>(
*BT_DoubleDelete, "Attempt to delete released memory", N);
@ -2017,8 +2019,9 @@ void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_UseZerroAllocated[*CheckKind])
BT_UseZerroAllocated[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use of zero allocated", "Memory Error"));
BT_UseZerroAllocated[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Use of zero allocated",
categories::MemoryError));
auto R = llvm::make_unique<BugReport>(*BT_UseZerroAllocated[*CheckKind],
"Use of zero-allocated memory", N);
@ -2253,8 +2256,8 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
assert(N);
if (!BT_Leak[*CheckKind]) {
BT_Leak[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Memory leak", "Memory Error"));
BT_Leak[*CheckKind].reset(new BugType(CheckNames[*CheckKind], "Memory leak",
categories::MemoryError));
// Leaks should not be reported if they are post-dominated by a sink:
// (1) Sinks are higher importance bugs.
// (2) NoReturnFunctionChecker uses sink nodes to represent paths ending

View File

@ -178,7 +178,7 @@ private:
const MemRegion *Region, BugReporter &BR,
const Stmt *ValueExpr = nullptr) const {
if (!BT)
BT.reset(new BugType(this, "Nullability", "Memory error"));
BT.reset(new BugType(this, "Nullability", categories::MemoryError));
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
if (Region) {

View File

@ -256,7 +256,7 @@ void ValistChecker::reportUninitializedAccess(const MemRegion *VAList,
if (!BT_uninitaccess)
BT_uninitaccess.reset(new BugType(CheckNames[CK_Uninitialized],
"Uninitialized va_list",
"Memory Error"));
categories::MemoryError));
auto R = llvm::make_unique<BugReport>(*BT_uninitaccess, Msg, N);
R->markInteresting(VAList);
R->addVisitor(llvm::make_unique<ValistBugVisitor>(VAList));
@ -274,7 +274,8 @@ void ValistChecker::reportLeakedVALists(const RegionVector &LeakedVALists,
for (auto Reg : LeakedVALists) {
if (!BT_leakedvalist) {
BT_leakedvalist.reset(new BugType(CheckNames[CK_Unterminated],
"Leaked va_list", "Memory Error"));
"Leaked va_list",
categories::MemoryError));
BT_leakedvalist->setSuppressOnSink(true);
}

View File

@ -16,5 +16,6 @@ const char * const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
const char * const LogicError = "Logic error";
const char * const MemoryCoreFoundationObjectiveC =
"Memory (Core Foundation/Objective-C)";
const char * const MemoryError = "Memory error";
const char * const UnixAPI = "Unix API";
}}}

View File

@ -287,7 +287,7 @@ void test() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Memory allocated by &apos;new[]&apos; should be deallocated by &apos;delete[]&apos;, not &apos;delete&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Bad deallocator</string>
// CHECK-NEXT: <key>check_name</key><string>unix.MismatchedDeallocator</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->

View File

@ -257,7 +257,7 @@ void test(Odd *odd) {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Attempt to free released memory</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Double free</string>
// CHECK-NEXT: <key>check_name</key><string>cplusplus.NewDelete</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -475,7 +475,7 @@ void test(Odd *odd) {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Attempt to free released memory</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Double free</string>
// CHECK-NEXT: <key>check_name</key><string>cplusplus.NewDelete</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->

View File

@ -945,7 +945,7 @@ void callInMacroArg() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Memory allocated by &apos;new[]&apos; should be deallocated by &apos;delete[]&apos;, not &apos;delete&apos; (within a call to &apos;~auto_ptr&apos;)</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Bad deallocator</string>
// CHECK-NEXT: <key>check_name</key><string>unix.MismatchedDeallocator</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->

View File

@ -20042,7 +20042,7 @@ namespace rdar14960554 {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;buf&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -20284,7 +20284,7 @@ namespace rdar14960554 {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Memory allocated by &apos;new[]&apos; should be deallocated by &apos;delete[]&apos;, not &apos;delete&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Bad deallocator</string>
// CHECK-NEXT: <key>check_name</key><string>unix.MismatchedDeallocator</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->

View File

@ -421,7 +421,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;p&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -586,7 +586,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;A&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -974,7 +974,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;buf&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -1376,7 +1376,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;buf&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -1962,7 +1962,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Use of memory after it is freed</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Use-after-free</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -2524,7 +2524,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;buf&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -2795,7 +2795,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;v&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -3144,7 +3144,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Use of memory after it is freed</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Use-after-free</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -3309,7 +3309,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;m&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -3517,7 +3517,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -3725,7 +3725,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -4030,7 +4030,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -4335,7 +4335,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -4543,7 +4543,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -4751,7 +4751,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -4988,7 +4988,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential memory leak</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -5225,7 +5225,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential memory leak</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -5496,7 +5496,7 @@ void testMyMalloc() {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential memory leak</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->

View File

@ -218,7 +218,7 @@ void test2(int *p) {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Memory allocated by malloc() should be deallocated by free(), not &apos;delete&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Bad deallocator</string>
// CHECK-NEXT: <key>check_name</key><string>unix.MismatchedDeallocator</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->
@ -315,7 +315,7 @@ void test2(int *p) {
// CHECK-NEXT: </dict>
// CHECK-NEXT: </array>
// CHECK-NEXT: <key>description</key><string>Potential leak of memory pointed to by &apos;x&apos;</string>
// CHECK-NEXT: <key>category</key><string>Memory Error</string>
// CHECK-NEXT: <key>category</key><string>Memory error</string>
// CHECK-NEXT: <key>type</key><string>Memory leak</string>
// CHECK-NEXT: <key>check_name</key><string>unix.Malloc</string>
// CHECK-NEXT: <!-- This hash is experimental and going to change! -->

61
test/CodeGen/PR32874.c Normal file
View File

@ -0,0 +1,61 @@
// RUN: %clang_cc1 -x c -S -emit-llvm -o - -triple x86_64-apple-darwin10 %s \
// RUN: -w -fsanitize=signed-integer-overflow,unsigned-integer-overflow,integer-divide-by-zero,float-divide-by-zero \
// RUN: | FileCheck %s
// CHECK-LABEL: define void @foo
// CHECK-NOT: !nosanitize
void foo(const int *p) {
// __builtin_prefetch expects its optional arguments to be constant integers.
// Check that ubsan does not instrument any safe arithmetic performed in
// operands to __builtin_prefetch. (A clang frontend check should reject
// unsafe arithmetic in these operands.)
__builtin_prefetch(p, 0 + 1, 0 + 3);
__builtin_prefetch(p, 1 - 0, 3 - 0);
__builtin_prefetch(p, 1 * 1, 1 * 3);
__builtin_prefetch(p, 1 / 1, 3 / 1);
__builtin_prefetch(p, 3 % 2, 3 % 1);
__builtin_prefetch(p, 0U + 1U, 0U + 3U);
__builtin_prefetch(p, 1U - 0U, 3U - 0U);
__builtin_prefetch(p, 1U * 1U, 1U * 3U);
__builtin_prefetch(p, 1U / 1U, 3U / 1U);
__builtin_prefetch(p, 3U % 2U, 3U % 1U);
}
// CHECK-LABEL: define void @ub_constant_arithmetic
void ub_constant_arithmetic() {
// Check that we still instrument unsafe arithmetic, even if it is known to
// be unsafe at compile time.
int INT_MIN = 0xffffffff;
int INT_MAX = 0x7fffffff;
// CHECK: call void @__ubsan_handle_add_overflow
// CHECK: call void @__ubsan_handle_add_overflow
INT_MAX + 1;
INT_MAX + -1;
// CHECK: call void @__ubsan_handle_negate_overflow
// CHECK: call void @__ubsan_handle_sub_overflow
-INT_MIN;
-INT_MAX - 2;
// CHECK: call void @__ubsan_handle_mul_overflow
// CHECK: call void @__ubsan_handle_mul_overflow
INT_MAX * INT_MAX;
INT_MIN * INT_MIN;
// CHECK: call void @__ubsan_handle_divrem_overflow
// CHECK: call void @__ubsan_handle_divrem_overflow
1 / 0;
INT_MIN / -1;
// CHECK: call void @__ubsan_handle_divrem_overflow
// CHECK: call void @__ubsan_handle_divrem_overflow
1 % 0;
INT_MIN % -1;
// CHECK: call void @__ubsan_handle_divrem_overflow
1.0 / 0.0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -59,7 +59,7 @@ float64x1_t test_vmul_n_f64(float64x1_t a, float64_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <2 x float> %b to <8 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
// CHECK: [[VGET_LANE:%.*]] = extractelement <2 x float> [[TMP1]], i32 1
// CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGET_LANE]]) #2
// CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGET_LANE]])
// CHECK: ret float [[VMULXS_F32_I]]
float32_t test_vmulxs_lane_f32(float32_t a, float32x2_t b) {
return vmulxs_lane_f32(a, b, 1);
@ -69,7 +69,7 @@ float32_t test_vmulxs_lane_f32(float32_t a, float32x2_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <4 x float> %b to <16 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> [[TMP1]], i32 3
// CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGETQ_LANE]]) #2
// CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGETQ_LANE]])
// CHECK: ret float [[VMULXS_F32_I]]
float32_t test_vmulxs_laneq_f32(float32_t a, float32x4_t b) {
return vmulxs_laneq_f32(a, b, 3);
@ -79,7 +79,7 @@ float32_t test_vmulxs_laneq_f32(float32_t a, float32x4_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <1 x double> %b to <8 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
// CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> [[TMP1]], i32 0
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGET_LANE]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGET_LANE]])
// CHECK: ret double [[VMULXD_F64_I]]
float64_t test_vmulxd_lane_f64(float64_t a, float64x1_t b) {
return vmulxd_lane_f64(a, b, 0);
@ -89,7 +89,7 @@ float64_t test_vmulxd_lane_f64(float64_t a, float64x1_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <2 x double> %b to <16 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGETQ_LANE]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGETQ_LANE]])
// CHECK: ret double [[VMULXD_F64_I]]
float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) {
return vmulxd_laneq_f64(a, b, 1);
@ -102,7 +102,7 @@ float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) {
// CHECK: [[TMP2:%.*]] = bitcast <1 x double> %b to <8 x i8>
// CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
// CHECK: [[VGET_LANE6:%.*]] = extractelement <1 x double> [[TMP3]], i32 0
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE6]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE6]])
// CHECK: [[TMP4:%.*]] = bitcast <1 x double> %a to <8 x i8>
// CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double>
// CHECK: [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP5]], double [[VMULXD_F64_I]], i32 0
@ -119,7 +119,7 @@ float64x1_t test_vmulx_lane_f64(float64x1_t a, float64x1_t b) {
// CHECK: [[TMP2:%.*]] = bitcast <2 x double> %b to <16 x i8>
// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> [[TMP3]], i32 0
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]])
// CHECK: [[TMP4:%.*]] = bitcast <1 x double> %a to <8 x i8>
// CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double>
// CHECK: [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP5]], double [[VMULXD_F64_I]], i32 0
@ -135,7 +135,7 @@ float64x1_t test_vmulx_laneq_f64_0(float64x1_t a, float64x2_t b) {
// CHECK: [[TMP2:%.*]] = bitcast <2 x double> %b to <16 x i8>
// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> [[TMP3]], i32 1
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]])
// CHECK: [[TMP4:%.*]] = bitcast <1 x double> %a to <8 x i8>
// CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double>
// CHECK: [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP5]], double [[VMULXD_F64_I]], i32 0
@ -252,7 +252,7 @@ float64x1_t test_vfms_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) {
// CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
// CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGET_LANE]], i64 0
// CHECK: [[VQDMULLH_S16_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]]) #2
// CHECK: [[VQDMULLH_S16_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]])
// CHECK: [[TMP4:%.*]] = extractelement <4 x i32> [[VQDMULLH_S16_I]], i64 0
// CHECK: ret i32 [[TMP4]]
int32_t test_vqdmullh_lane_s16(int16_t a, int16x4_t b) {
@ -263,7 +263,7 @@ int32_t test_vqdmullh_lane_s16(int16_t a, int16x4_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
// CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
// CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGET_LANE]]) #2
// CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGET_LANE]])
// CHECK: ret i64 [[VQDMULLS_S32_I]]
int64_t test_vqdmulls_lane_s32(int32_t a, int32x2_t b) {
return vqdmulls_lane_s32(a, b, 1);
@ -275,7 +275,7 @@ int64_t test_vqdmulls_lane_s32(int32_t a, int32x2_t b) {
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
// CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0
// CHECK: [[VQDMULLH_S16_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]]) #2
// CHECK: [[VQDMULLH_S16_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]])
// CHECK: [[TMP4:%.*]] = extractelement <4 x i32> [[VQDMULLH_S16_I]], i64 0
// CHECK: ret i32 [[TMP4]]
int32_t test_vqdmullh_laneq_s16(int16_t a, int16x8_t b) {
@ -286,7 +286,7 @@ int32_t test_vqdmullh_laneq_s16(int16_t a, int16x8_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
// CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGETQ_LANE]]) #2
// CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGETQ_LANE]])
// CHECK: ret i64 [[VQDMULLS_S32_I]]
int64_t test_vqdmulls_laneq_s32(int32_t a, int32x4_t b) {
return vqdmulls_laneq_s32(a, b, 3);
@ -298,7 +298,7 @@ int64_t test_vqdmulls_laneq_s32(int32_t a, int32x4_t b) {
// CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
// CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGET_LANE]], i64 0
// CHECK: [[VQDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]]) #2
// CHECK: [[VQDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]])
// CHECK: [[TMP4:%.*]] = extractelement <4 x i16> [[VQDMULHH_S16_I]], i64 0
// CHECK: ret i16 [[TMP4]]
int16_t test_vqdmulhh_lane_s16(int16_t a, int16x4_t b) {
@ -309,7 +309,7 @@ int16_t test_vqdmulhh_lane_s16(int16_t a, int16x4_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
// CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
// CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGET_LANE]]) #2
// CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGET_LANE]])
// CHECK: ret i32 [[VQDMULHS_S32_I]]
int32_t test_vqdmulhs_lane_s32(int32_t a, int32x2_t b) {
return vqdmulhs_lane_s32(a, b, 1);
@ -322,7 +322,7 @@ int32_t test_vqdmulhs_lane_s32(int32_t a, int32x2_t b) {
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
// CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0
// CHECK: [[VQDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]]) #2
// CHECK: [[VQDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]])
// CHECK: [[TMP4:%.*]] = extractelement <4 x i16> [[VQDMULHH_S16_I]], i64 0
// CHECK: ret i16 [[TMP4]]
int16_t test_vqdmulhh_laneq_s16(int16_t a, int16x8_t b) {
@ -334,7 +334,7 @@ int16_t test_vqdmulhh_laneq_s16(int16_t a, int16x8_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
// CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGETQ_LANE]]) #2
// CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGETQ_LANE]])
// CHECK: ret i32 [[VQDMULHS_S32_I]]
int32_t test_vqdmulhs_laneq_s32(int32_t a, int32x4_t b) {
return vqdmulhs_laneq_s32(a, b, 3);
@ -346,7 +346,7 @@ int32_t test_vqdmulhs_laneq_s32(int32_t a, int32x4_t b) {
// CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
// CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGET_LANE]], i64 0
// CHECK: [[VQRDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]]) #2
// CHECK: [[VQRDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]])
// CHECK: [[TMP4:%.*]] = extractelement <4 x i16> [[VQRDMULHH_S16_I]], i64 0
// CHECK: ret i16 [[TMP4]]
int16_t test_vqrdmulhh_lane_s16(int16_t a, int16x4_t b) {
@ -357,7 +357,7 @@ int16_t test_vqrdmulhh_lane_s16(int16_t a, int16x4_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
// CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
// CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGET_LANE]]) #2
// CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGET_LANE]])
// CHECK: ret i32 [[VQRDMULHS_S32_I]]
int32_t test_vqrdmulhs_lane_s32(int32_t a, int32x2_t b) {
return vqrdmulhs_lane_s32(a, b, 1);
@ -370,7 +370,7 @@ int32_t test_vqrdmulhs_lane_s32(int32_t a, int32x2_t b) {
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
// CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0
// CHECK: [[VQRDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]]) #2
// CHECK: [[VQRDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]])
// CHECK: [[TMP4:%.*]] = extractelement <4 x i16> [[VQRDMULHH_S16_I]], i64 0
// CHECK: ret i16 [[TMP4]]
int16_t test_vqrdmulhh_laneq_s16(int16_t a, int16x8_t b) {
@ -382,7 +382,7 @@ int16_t test_vqrdmulhh_laneq_s16(int16_t a, int16x8_t b) {
// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
// CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGETQ_LANE]]) #2
// CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGETQ_LANE]])
// CHECK: ret i32 [[VQRDMULHS_S32_I]]
int32_t test_vqrdmulhs_laneq_s32(int32_t a, int32x4_t b) {
return vqrdmulhs_laneq_s32(a, b, 3);
@ -497,7 +497,7 @@ int64_t test_vqdmlsls_laneq_s32(int64_t a, int32_t b, int32x4_t c) {
// CHECK: [[TMP4:%.*]] = bitcast <1 x double> [[TMP1]] to <8 x i8>
// CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double>
// CHECK: [[VGET_LANE7:%.*]] = extractelement <1 x double> [[TMP5]], i32 0
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE7]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE7]])
// CHECK: [[TMP6:%.*]] = bitcast <1 x double> [[TMP0]] to <8 x i8>
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double>
// CHECK: [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP7]], double [[VMULXD_F64_I]], i32 0
@ -523,7 +523,7 @@ float64x1_t test_vmulx_lane_f64_0() {
// CHECK: [[TMP4:%.*]] = bitcast <2 x double> [[SHUFFLE_I]] to <16 x i8>
// CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double>
// CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> [[TMP5]], i32 1
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) #2
// CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]])
// CHECK: [[TMP6:%.*]] = bitcast <1 x double> [[TMP0]] to <8 x i8>
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double>
// CHECK: [[VSET_LANE:%.*]] = insertelement <1 x double> [[TMP7]], double [[VMULXD_F64_I]], i32 0

File diff suppressed because it is too large Load Diff

View File

@ -124,7 +124,7 @@ void test_builtins(double d, float f, long double ld) {
// CHECK-YES-NOT: declare float @logf(float) [[NUW_RN]]
}
// CHECK-YES: attributes [[NUW_RN]] = { nounwind readnone }
// CHECK-YES: attributes [[NUW_RN]] = { nounwind readnone speculatable }
// CHECK-NO: attributes [[NUW_RN]] = { nounwind readnone{{.*}} }
// CHECK-NO: attributes [[NUW_RNI]] = { nounwind readnone }
// CHECK-NO: attributes [[NUW_RNI]] = { nounwind readnone speculatable }

View File

@ -81,5 +81,5 @@
// RPATH-X86_64: "-rpath" "[[RESDIR]]{{(/|\\\\)lib(/|\\\\)linux(/|\\\\)x86_64}}"
// LIBPATH-AArch64: -L[[RESDIR]]{{(/|\\\\)lib(/|\\\\)linux(/|\\\\)aarch64}}
// RPATH-AArch64: "-rpath" "[[RESDIR]]{{(/|\\\\)lib(/|\\\\)linux(/|\\\\)aarch64}}"
// NO-LIBPATH-NOT: -L{{.*Inputs(/|\\\\)resource_dir}}
// NO-LIBPATH-NOT: "-L{{[^"]*Inputs(/|\\\\)resource_dir}}"
// NO-RPATH-NOT: "-rpath" {{.*(/|\\\\)Inputs(/|\\\\)resource_dir}}

View File

@ -4,12 +4,13 @@
// CHECK-SANITIZE-COVERAGE-0-NOT: fsanitize-coverage-type
// CHECK-SANITIZE-COVERAGE-0: -fsanitize=address
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=memory -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=leak -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=bool -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=dataflow -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=memory -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=leak -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=bool -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize=dataflow -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// CHECK-SANITIZE-COVERAGE-FUNC: fsanitize-coverage-type=1
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-BB
@ -25,13 +26,10 @@
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-1
// CHECK-SANITIZE-COVERAGE-1: warning: argument '-fsanitize-coverage=1' is deprecated, use '-fsanitize-coverage=trace-pc-guard' instead
// RUN: %clang -target x86_64-linux-gnu -fsanitize=thread -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-UNUSED
// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// CHECK-SANITIZE-COVERAGE-UNUSED: argument unused during compilation: '-fsanitize-coverage=func'
// CHECK-SANITIZE-COVERAGE-UNUSED-NOT: -fsanitize-coverage-type=1
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func -fno-sanitize=address %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-SAN-DISABLED
// CHECK-SANITIZE-COVERAGE-SAN-DISABLED-NOT: argument unused
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
// CHECK_FUNC_BB_EDGE_DEPRECATED: warning: argument '-fsanitize-coverage=[func|bb|edge]' is deprecated, use '-fsanitize-coverage=[func|bb|edge],[trace-pc-guard|trace-pc]' instead
// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge,indirect-calls,trace-pc,trace-cmp,trace-div,trace-gep %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FEATURES
// CHECK-SANITIZE-COVERAGE-FEATURES: -fsanitize-coverage-type=3
@ -82,7 +80,7 @@
// CHECK-EXTEND-LEGACY: -fsanitize-coverage-type=1
// CHECK-EXTEND-LEGACY: -fsanitize-coverage-trace-cmp
// RUN: %clang_cl --target=i386-pc-win32 -fsanitize=address -fsanitize-coverage=func -c -### -- %s 2>&1 | FileCheck %s -check-prefix=CLANG-CL-COVERAGE
// RUN: %clang_cl --target=i386-pc-win32 -fsanitize=address -fsanitize-coverage=func,trace-pc-guard -c -### -- %s 2>&1 | FileCheck %s -check-prefix=CLANG-CL-COVERAGE
// CLANG-CL-COVERAGE-NOT: error:
// CLANG-CL-COVERAGE-NOT: warning:
// CLANG-CL-COVERAGE-NOT: argument unused

View File

@ -25,5 +25,5 @@ class C : public A<float> { };
// CHECK: C++ base class specifier=A<float>:4:7 [access=public isVirtual=false] [type=A<float>] [typekind=Unexposed] [templateargs/1= [type=float] [typekind=Float]] [canonicaltype=A<float>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=float] [typekind=Float]] [isPOD=0] [nbFields=1]
// CHECK: TemplateRef=A:4:7 [type=] [typekind=Invalid] [isPOD=0]
// CHECK-DIAG: keep-going.cpp:1:10: error: 'missing1.h' file not found
// CHECK-DIAG: keep-going.cpp:8:10: error: 'missing2.h' file not found
// CHECK-DIAG: keep-going.cpp:1:10: fatal error: 'missing1.h' file not found
// CHECK-DIAG: keep-going.cpp:8:10: fatal error: 'missing2.h' file not found

View File

@ -1,20 +1,42 @@
// RUN: rm -rf %t
//
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fmodules -fimplicit-module-maps -emit-module -fmodules-cache-path=%t -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DIMPLICIT_FLAG -Werror=padded
// For an implicit module, all that matters are the warning flags in the user.
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -emit-module -fmodules-cache-path=%t -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -Wpadded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DERROR -Wpadded -Werror
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DERROR -Werror=padded
//
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fmodules -fimplicit-module-maps -emit-module -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts -o %t/explicit.pcm -Werror=string-plus-int -Wpadded
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DEXPLICIT_FLAG -fmodule-file=%t/explicit.pcm
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DEXPLICIT_FLAG -fmodule-file=%t/explicit.pcm -Werror=padded
// For an explicit module, all that matters are the warning flags in the module build.
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -emit-module -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts -o %t/nodiag.pcm
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -fmodule-file=%t/nodiag.pcm -Wpadded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -fmodule-file=%t/nodiag.pcm -Werror -Wpadded
//
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -emit-module -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts -o %t/warning.pcm -Wpadded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -fmodule-file=%t/warning.pcm
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -fmodule-file=%t/warning.pcm -Werror=padded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -fmodule-file=%t/warning.pcm -Werror
//
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -emit-module -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts -o %t/werror-no-error.pcm -Werror -Wpadded -Wno-error=padded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -fmodule-file=%t/werror-no-error.pcm
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -fmodule-file=%t/werror-no-error.pcm -Wno-padded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DWARNING -fmodule-file=%t/werror-no-error.pcm -Werror=padded
//
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -emit-module -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts -o %t/error.pcm -Werror=padded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DERROR -fmodule-file=%t/error.pcm -Wno-padded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DERROR -fmodule-file=%t/error.pcm -Wno-error=padded
//
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -emit-module -fmodule-name=diag_flags -x c++ %S/Inputs/module.map -fmodules-ts -o %t/werror.pcm -Werror -Wpadded
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DERROR -fmodule-file=%t/werror.pcm -Wno-error
// RUN: %clang_cc1 -triple %itanium_abi_triple -fmodules -fimplicit-module-maps -verify -fmodules-cache-path=%t -I %S/Inputs %s -fmodules-ts -DERROR -fmodule-file=%t/werror.pcm -Wno-padded
import diag_flags;
// Diagnostic flags from the module user make no difference to diagnostics
// emitted within the module when using an explicitly-loaded module.
#ifdef IMPLICIT_FLAG
#if ERROR
// expected-error@diag_flags.h:14 {{padding struct}}
#elif defined(EXPLICIT_FLAG)
#elif WARNING
// expected-warning@diag_flags.h:14 {{padding struct}}
#else
// expected-no-diagnostics

View File

@ -275,6 +275,33 @@ S11 s11;
// expected-note@first.h:* {{but in 'FirstModule' found field 'x' with a different initializer}}
#endif
#if defined(FIRST)
struct S12 {
unsigned x[5];
};
#elif defined(SECOND)
struct S12 {
unsigned x[7];
};
#else
S12 s12;
// expected-error@first.h:* {{'Field::S12::x' from module 'FirstModule' is not present in definition of 'Field::S12' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
#endif
#if defined(FIRST)
struct S13 {
unsigned x[7];
};
#elif defined(SECOND)
struct S13 {
double x[7];
};
#else
S13 s13;
// expected-error@first.h:* {{'Field::S13::x' from module 'FirstModule' is not present in definition of 'Field::S13' in module 'SecondModule'}}
// expected-note@second.h:* {{declaration of 'x' does not match}}
#endif
} // namespace Field
namespace Method {
@ -403,6 +430,91 @@ S8 s8;
// expected-note@first.h:* {{but in 'FirstModule' found method 'A' is const}}
#endif
#if defined(FIRST)
struct S9 {
void A(int x) {}
void A(int x, int y) {}
};
#elif defined(SECOND)
struct S9 {
void A(int x, int y) {}
void A(int x) {}
};
#else
S9 s9;
// expected-error@second.h:* {{'Method::S9' has different definitions in different modules; first difference is definition in module 'SecondModule' found method 'A' that has 2 parameters}}
// expected-note@first.h:* {{but in 'FirstModule' found method 'A' that has 1 parameter}}
#endif
#if defined(FIRST)
struct S10 {
void A(int x) {}
void A(float x) {}
};
#elif defined(SECOND)
struct S10 {
void A(float x) {}
void A(int x) {}
};
#else
S10 s10;
// expected-error@second.h:* {{'Method::S10' has different definitions in different modules; first difference is definition in module 'SecondModule' found method 'A' with 1st parameter of type 'float'}}
// expected-note@first.h:* {{but in 'FirstModule' found method 'A' with 1st parameter of type 'int'}}
#endif
#if defined(FIRST)
struct S11 {
void A(int x) {}
};
#elif defined(SECOND)
struct S11 {
void A(int y) {}
};
#else
S11 s11;
// expected-error@second.h:* {{'Method::S11' has different definitions in different modules; first difference is definition in module 'SecondModule' found method 'A' with 1st parameter named 'y'}}
// expected-note@first.h:* {{but in 'FirstModule' found method 'A' with 1st parameter named 'x'}}
#endif
#if defined(FIRST)
struct S12 {
void A(int x) {}
};
#elif defined(SECOND)
struct S12 {
void A(int x = 1) {}
};
#else
S12 s12;
// TODO: This should produce an error.
#endif
#if defined(FIRST)
struct S13 {
void A(int x = 1 + 0) {}
};
#elif defined(SECOND)
struct S13 {
void A(int x = 1) {}
};
#else
S13 s13;
// TODO: This should produce an error.
#endif
#if defined(FIRST)
struct S14 {
void A(int x[2]) {}
};
#elif defined(SECOND)
struct S14 {
void A(int x[3]) {}
};
#else
S14 s14;
// expected-error@second.h:* {{'Method::S14' has different definitions in different modules; first difference is definition in module 'SecondModule' found method 'A' with 1st parameter of type 'int *' decayed from 'int [3]'}}
// expected-note@first.h:* {{but in 'FirstModule' found method 'A' with 1st parameter of type 'int *' decayed from 'int [2]'}}
#endif
} // namespace Method
// Naive parsing of AST can lead to cycles in processing. Ensure
@ -526,37 +638,43 @@ S3 s3;
// Interesting cases that should not cause errors. struct S should not error
// while struct T should error at the access specifier mismatch at the end.
namespace AllDecls {
#define CREATE_ALL_DECL_STRUCT(NAME, ACCESS) \
typedef int INT; \
struct NAME { \
public: \
private: \
protected: \
static_assert(1 == 1, "Message"); \
static_assert(2 == 2); \
\
int x; \
double y; \
\
INT z; \
\
unsigned a : 1; \
unsigned b : 2 * 2 + 5 / 2; \
\
mutable int c = sizeof(x + y); \
\
void method() {} \
static void static_method() {} \
virtual void virtual_method() {} \
virtual void pure_virtual_method() = 0; \
inline void inline_method() {} \
void volatile_method() volatile {} \
void const_method() const {} \
\
typedef int typedef_int; \
using using_int = int; \
\
ACCESS: \
#define CREATE_ALL_DECL_STRUCT(NAME, ACCESS) \
typedef int INT; \
struct NAME { \
public: \
private: \
protected: \
static_assert(1 == 1, "Message"); \
static_assert(2 == 2); \
\
int x; \
double y; \
\
INT z; \
\
unsigned a : 1; \
unsigned b : 2 * 2 + 5 / 2; \
\
mutable int c = sizeof(x + y); \
\
void method() {} \
static void static_method() {} \
virtual void virtual_method() {} \
virtual void pure_virtual_method() = 0; \
inline void inline_method() {} \
void volatile_method() volatile {} \
void const_method() const {} \
\
typedef int typedef_int; \
using using_int = int; \
\
void method_one_arg(int x) {} \
void method_one_arg_default_argument(int x = 5 + 5) {} \
void method_decayed_type(int x[5]) {} \
\
int constant_arr[5]; \
\
ACCESS: \
};
#if defined(FIRST)
@ -933,6 +1051,34 @@ Alpha::Alpha() {}
#endif
}
namespace ParameterTest {
#if defined(FIRST)
class X {};
template <typename G>
class S {
public:
typedef G Type;
static inline G *Foo(const G *a, int * = nullptr);
};
template<typename G>
G* S<G>::Foo(const G* aaaa, int*) {}
#elif defined(SECOND)
template <typename G>
class S {
public:
typedef G Type;
static inline G *Foo(const G *a, int * = nullptr);
};
template<typename G>
G* S<G>::Foo(const G* asdf, int*) {}
#else
S<X> s;
#endif
}
// Keep macros contained to one file.
#ifdef FIRST
#undef FIRST

View File

@ -10,7 +10,7 @@ void foo() {}
template <typename T, int C>
T tmain(T argc, T *argv) {
T i, j, a[20];
T i, j, a[20], always;
#pragma omp target
foo();
#pragma omp target if (target:argc > 0)
@ -25,6 +25,12 @@ T tmain(T argc, T *argv) {
foo();
#pragma omp target map(always,alloc: i)
foo();
#pragma omp target map(always from: i)
foo();
#pragma omp target map(always)
{always++;}
#pragma omp target map(always,i)
{always++;i++;}
#pragma omp target nowait
foo();
#pragma omp target depend(in : argc, argv[i:argc], a[:])
@ -50,6 +56,17 @@ T tmain(T argc, T *argv) {
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(always,alloc: i)
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(always,from: i)
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(tofrom: always)
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: }
// CHECK-NEXT: #pragma omp target map(tofrom: always,i)
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: i++;
// CHECK-NEXT: }
// CHECK-NEXT: #pragma omp target nowait
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target depend(in : argc,argv[i:argc],a[:])
@ -72,6 +89,17 @@ T tmain(T argc, T *argv) {
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(always,alloc: i)
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(always,from: i)
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(tofrom: always)
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: }
// CHECK-NEXT: #pragma omp target map(tofrom: always,i)
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: i++;
// CHECK-NEXT: }
// CHECK-NEXT: #pragma omp target nowait
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target depend(in : argc,argv[i:argc],a[:])
@ -94,6 +122,17 @@ T tmain(T argc, T *argv) {
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(always,alloc: i)
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(always,from: i)
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target map(tofrom: always)
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: }
// CHECK-NEXT: #pragma omp target map(tofrom: always,i)
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: i++;
// CHECK-NEXT: }
// CHECK-NEXT: #pragma omp target nowait
// CHECK-NEXT: foo()
// CHECK-NEXT: #pragma omp target depend(in : argc,argv[i:argc],a[:])
@ -103,7 +142,7 @@ T tmain(T argc, T *argv) {
// CHECK-LABEL: int main(int argc, char **argv) {
int main (int argc, char **argv) {
int i, j, a[20];
int i, j, a[20], always;
// CHECK-NEXT: int i, j, a[20]
#pragma omp target
// CHECK-NEXT: #pragma omp target
@ -139,6 +178,26 @@ int main (int argc, char **argv) {
foo();
// CHECK-NEXT: foo();
#pragma omp target map(always from: i)
// CHECK-NEXT: #pragma omp target map(always,from: i)
foo();
// CHECK-NEXT: foo();
#pragma omp target map(always)
// CHECK-NEXT: #pragma omp target map(tofrom: always)
{always++;}
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: }
#pragma omp target map(always,i)
// CHECK-NEXT: #pragma omp target map(tofrom: always,i)
{always++;i++;}
// CHECK-NEXT: {
// CHECK-NEXT: always++;
// CHECK-NEXT: i++;
// CHECK-NEXT: }
#pragma omp target nowait
// CHECK-NEXT: #pragma omp target nowait
foo();

View File

@ -66,6 +66,8 @@ struct SA {
{}
#pragma omp target map(always, tofrom: c,f[:]) // expected-error {{section length is unspecified and cannot be inferred because subscripted value is not an array}}
{}
#pragma omp target map(always) // expected-error {{use of undeclared identifier 'always'}}
{}
return;
}
};

View File

@ -27,7 +27,7 @@ void f3(float a, ...) { // expected-note 2{{parameter of type 'float' is declare
}
// stdarg: PR3075
// stdarg: PR3075 and PR2531
void f4(const char *msg, ...) {
__builtin_va_list ap;
__builtin_stdarg_start((ap), (msg));

View File

@ -1,25 +0,0 @@
// RUN: %clang_cc1 %s -Wno-uninitialized -std=c++1z -fsyntax-only -verify
const extern int arr[];
constexpr auto p = arr; // ok
constexpr int f(int i) {return p[i];} // expected-note {{read of dereferenced one-past-the-end pointer}}
constexpr int arr[] {1, 2, 3};
constexpr auto p2 = arr + 2; // ok
constexpr int x = f(2); // ok
constexpr int y = f(3); // expected-error {{constant expression}}
// expected-note-re@-1 {{in call to 'f({{.*}})'}}
struct A {int m[];} a;
constexpr auto p3 = a.m; // ok
constexpr auto p4 = a.m + 1; // expected-error {{constant expression}} expected-note {{constant bound}}
void g(int i) {
int arr[i];
constexpr auto *p = arr + 2; // expected-error {{constant expression}} expected-note {{constant bound}}
// FIXME: Give a better diagnostic here. The issue is that computing
// sizeof(*arr2) within the array indexing fails due to the VLA.
int arr2[2][i];
constexpr int m = ((void)arr2[2], 0); // expected-error {{constant expression}}
}

View File

@ -3,229 +3,298 @@
// RUN: %clang_cc1 -std=c++1z -verify -fsyntax-only -fblocks -fms-extensions %s -DMS_EXTENSIONS
// RUN: %clang_cc1 -std=c++1z -verify -fsyntax-only -fblocks -fdelayed-template-parsing -fms-extensions %s -DMS_EXTENSIONS -DDELAYED_TEMPLATE_PARSING
template<class, class> constexpr bool is_same = false;
template<class T> constexpr bool is_same<T, T> = true;
template <class, class>
constexpr bool is_same = false;
template <class T>
constexpr bool is_same<T, T> = true;
namespace test_star_this {
namespace ns1 {
class A {
int x = 345;
auto foo() {
(void) [*this, this] { }; //expected-error{{'this' can appear only once}}
(void) [this] { ++x; };
(void) [*this] { ++x; }; //expected-error{{read-only variable}}
(void) [*this] () mutable { ++x; };
(void) [=] { return x; };
(void) [&, this] { return x; };
(void) [=, *this] { return x; };
(void) [&, *this] { return x; };
(void)[ *this, this ]{}; //expected-error{{'this' can appear only once}}
(void)[this] { ++x; };
(void)[*this] { ++x; }; //expected-error{{read-only variable}}
(void)[*this]() mutable { ++x; };
(void)[=] { return x; };
(void)[&, this ] { return x; };
(void)[ =, *this ] { return x; };
(void)[&, *this ] { return x; };
}
};
} // end ns1
} // namespace ns1
namespace ns2 {
class B {
B(const B&) = delete; //expected-note{{deleted here}}
int *x = (int *) 456;
void foo() {
(void)[this] { return x; };
(void)[*this] { return x; }; //expected-error{{call to deleted}}
}
};
} // end ns2
class B {
B(const B &) = delete; //expected-note{{deleted here}}
int *x = (int *)456;
void foo() {
(void)[this] { return x; };
(void)[*this] { return x; }; //expected-error{{call to deleted}}
}
};
} // namespace ns2
namespace ns3 {
class B {
B(const B&) = delete; //expected-note2{{deleted here}}
int *x = (int *) 456;
public:
template<class T = int>
void foo() {
(void)[this] { return x; };
(void)[*this] { return x; }; //expected-error2{{call to deleted}}
}
B() = default;
} b;
B *c = (b.foo(), nullptr); //expected-note{{in instantiation}}
} // end ns3
class B {
B(const B &) = delete; //expected-note2{{deleted here}}
int *x = (int *)456;
public:
template <class T = int>
void foo() {
(void)[this] { return x; };
(void)[*this] { return x; }; //expected-error2{{call to deleted}}
}
B() = default;
} b;
B *c = (b.foo(), nullptr); //expected-note{{in instantiation}}
} // namespace ns3
namespace ns4 {
template<class U>
template <class U>
class B {
B(const B&) = delete; //expected-note{{deleted here}}
B(const B &) = delete; //expected-note{{deleted here}}
double d = 3.14;
public:
template<class T = int>
public:
template <class T = int>
auto foo() {
const auto &L = [*this] (auto a) mutable { //expected-error{{call to deleted}}
d += a;
return [this] (auto b) { return d +=b; };
};
const auto &L = [*this](auto a) mutable { //expected-error{{call to deleted}}
d += a;
return [this](auto b) { return d += b; };
};
}
B() = default;
};
void main() {
B<int*> b;
B<int *> b;
b.foo(); //expected-note{{in instantiation}}
} // end main
} // end ns4
} // end main
} // namespace ns4
namespace ns5 {
struct X {
double d = 3.14;
X(const volatile X&);
X(const volatile X &);
void foo() {
}
void foo() const { //expected-note{{const}}
auto L = [*this] () mutable {
static_assert(is_same<decltype(this), X*>);
auto L = [*this]() mutable {
static_assert(is_same<decltype(this), X *>);
++d;
auto M = [this] {
static_assert(is_same<decltype(this), X*>);
auto M = [this] {
static_assert(is_same<decltype(this), X *>);
++d;
auto N = [] {
static_assert(is_same<decltype(this), X*>);
static_assert(is_same<decltype(this), X *>);
};
};
};
auto L1 = [*this] {
static_assert(is_same<decltype(this), const X*>);
auto M = [this] () mutable {
static_assert(is_same<decltype(this), const X*>);
auto L1 = [*this] {
static_assert(is_same<decltype(this), const X *>);
auto M = [this]() mutable {
static_assert(is_same<decltype(this), const X *>);
auto N = [] {
static_assert(is_same<decltype(this), const X*>);
static_assert(is_same<decltype(this), const X *>);
};
};
auto M2 = [*this] () mutable {
static_assert(is_same<decltype(this), X*>);
auto M2 = [*this]() mutable {
static_assert(is_same<decltype(this), X *>);
auto N = [] {
static_assert(is_same<decltype(this), X*>);
static_assert(is_same<decltype(this), X *>);
};
};
};
auto GL1 = [*this] (auto a) {
static_assert(is_same<decltype(this), const X*>);
auto M = [this] (auto b) mutable {
static_assert(is_same<decltype(this), const X*>);
auto N = [] (auto c) {
static_assert(is_same<decltype(this), const X*>);
auto GL1 = [*this](auto a) {
static_assert(is_same<decltype(this), const X *>);
auto M = [this](auto b) mutable {
static_assert(is_same<decltype(this), const X *>);
auto N = [](auto c) {
static_assert(is_same<decltype(this), const X *>);
};
return N;
};
auto M2 = [*this] (auto a) mutable {
static_assert(is_same<decltype(this), X*>);
auto N = [] (auto b) {
static_assert(is_same<decltype(this), X*>);
auto M2 = [*this](auto a) mutable {
static_assert(is_same<decltype(this), X *>);
auto N = [](auto b) {
static_assert(is_same<decltype(this), X *>);
};
return N;
};
return [=](auto a) mutable { M(a)(a); M2(a)(a); };
};
GL1("abc")("abc");
auto L2 = [this] () mutable {
static_assert(is_same<decltype(this), const X*>);
GL1("abc")
("abc");
auto L2 = [this]() mutable {
static_assert(is_same<decltype(this), const X *>);
++d; //expected-error{{cannot assign}}
};
auto GL = [*this] (auto a) mutable {
static_assert(is_same<decltype(this), X*>);
auto GL = [*this](auto a) mutable {
static_assert(is_same<decltype(this), X *>);
++d;
auto M = [this] (auto b) {
static_assert(is_same<decltype(this), X*>);
auto M = [this](auto b) {
static_assert(is_same<decltype(this), X *>);
++d;
auto N = [] (auto c) {
static_assert(is_same<decltype(this), X*>);
auto N = [](auto c) {
static_assert(is_same<decltype(this), X *>);
};
N(3.14);
};
M("abc");
};
GL(3.14);
}
void foo() volatile const {
auto L = [this] () {
static_assert(is_same<decltype(this), const volatile X*>);
auto M = [*this] () mutable {
static_assert(is_same<decltype(this), X*>);
auto L = [this]() {
static_assert(is_same<decltype(this), const volatile X *>);
auto M = [*this]() mutable {
static_assert(is_same<decltype(this), X *>);
auto N = [this] {
static_assert(is_same<decltype(this), X*>);
static_assert(is_same<decltype(this), X *>);
auto M = [] {
static_assert(is_same<decltype(this), X*>);
static_assert(is_same<decltype(this), X *>);
};
};
auto N2 = [*this] {
static_assert(is_same<decltype(this), const X*>);
static_assert(is_same<decltype(this), const X *>);
};
};
auto M2 = [*this] () {
static_assert(is_same<decltype(this), const X*>);
auto M2 = [*this]() {
static_assert(is_same<decltype(this), const X *>);
auto N = [this] {
static_assert(is_same<decltype(this), const X*>);
static_assert(is_same<decltype(this), const X *>);
};
};
};
}
};
} //end ns5
} // namespace ns5
namespace ns6 {
struct X {
double d;
auto foo() const {
auto L = [*this] () mutable {
auto M = [=] (auto a) {
auto L = [*this]() mutable {
auto M = [=](auto a) {
auto N = [this] {
++d;
static_assert(is_same<decltype(this), X*>);
static_assert(is_same<decltype(this), X *>);
auto O = [*this] {
static_assert(is_same<decltype(this), const X*>);
static_assert(is_same<decltype(this), const X *>);
};
};
N();
static_assert(is_same<decltype(this), X*>);
static_assert(is_same<decltype(this), X *>);
};
return M;
};
return L;
}
};
};
int main() {
auto L = X{}.foo();
auto M = L();
M(3.14);
}
} // end ns6
} // namespace ns6
namespace ns7 {
struct X {
double d;
X();
X(const X&);
X(X&) = delete;
X(const X &);
X(X &) = delete;
auto foo() const {
//OK - the object used to initialize our capture is a const object and so prefers the non-deleted ctor.
const auto &&L = [*this] { };
const auto &&L = [*this]{};
}
};
};
int main() {
X x;
x.foo();
}
} // end ns7
} // namespace ns7
} //end ns test_star_this
} // namespace test_star_this
namespace PR32831 {
// https://bugs.llvm.org/show_bug.cgi?id=32831
namespace ns1 {
template <typename Func>
void fun_template(Func func) {
(void)[&]() {
func(0);
};
}
class A {
void member_foo() {
(void)[this] {
(void)[this] {
fun_template(
[this](auto X) {
auto L = [this](auto Y) { member_foo(); };
L(5);
});
fun_template(
[this](auto) { member_foo(); });
};
};
}
};
} // namespace ns1
namespace ns2 {
struct B {
int data = 0;
template <class F>
void mem2(F f) {
(void)[&](auto f) {
(void)[&] { f(this->data); };
}
(f);
}
};
class A {
void member_foo() {
(void)[this] {
(void)[this] {
B{}.mem2(
[this](auto X) {
auto L = [this](auto Y) { member_foo(); };
L(5);
});
B{}.mem2(
[this](auto) { member_foo(); });
};
};
}
int data = 0;
auto m2() {
return [this] { return [] () -> decltype(data){ return 0; }; };
}
auto m3() {
return [] { return [] () -> decltype(data){ return 0; }; };
}
};
} // namespace ns2
} // namespace PR32831

View File

@ -1,6 +1,6 @@
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety %s
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -std=c++98 %s
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -std=c++11 %s
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -std=c++11 %s -D CPP11
#define LOCKABLE __attribute__ ((lockable))
#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
@ -1513,3 +1513,15 @@ public:
} // end namespace FunctionAttributesInsideClass_ICE_Test
#ifdef CPP11
namespace CRASH_POST_R301735 {
class SomeClass {
public:
void foo() {
auto l = [this] { auto l = [] () EXCLUSIVE_LOCKS_REQUIRED(mu_) {}; };
}
Mutex mu_;
};
}
#endif

View File

@ -199,4 +199,4 @@ class rdar10142572 {
};
id rdar10142572::f() { return 0; } // okay: merged down
id __attribute__((ns_returns_retained)) rdar10142572::g() { return 0; } // expected-error{{function declared with the ns_returns_retained attribute was previously declared without the ns_returns_retained attribute}}
id __attribute__((ns_returns_retained)) rdar10142572::g() { return 0; } // expected-error{{function declared with 'ns_returns_retained' attribute was previously declared without the 'ns_returns_retained' attribute}}

View File

@ -3313,7 +3313,7 @@ clang_parseTranslationUnit_Impl(CXIndex CIdx, const char *source_filename,
Diags(CompilerInstance::createDiagnostics(new DiagnosticOptions));
if (options & CXTranslationUnit_KeepGoing)
Diags->setFatalsAsError(true);
Diags->setSuppressAfterFatalError(false);
// Recover resources if we crash before exiting this function.
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,

View File

@ -46,27 +46,30 @@ TEST(DiagnosticTest, suppressAndTrap) {
EXPECT_FALSE(Diags.hasUnrecoverableErrorOccurred());
}
// Check that FatalsAsErrors works as intended
TEST(DiagnosticTest, fatalsAsErrors) {
DiagnosticsEngine Diags(new DiagnosticIDs(),
new DiagnosticOptions,
new IgnoringDiagConsumer());
Diags.setFatalsAsError(true);
// Check that SuppressAfterFatalError works as intended
TEST(DiagnosticTest, suppressAfterFatalError) {
for (unsigned Suppress = 0; Suppress != 2; ++Suppress) {
DiagnosticsEngine Diags(new DiagnosticIDs(),
new DiagnosticOptions,
new IgnoringDiagConsumer());
Diags.setSuppressAfterFatalError(Suppress);
// Diag that would set UncompilableErrorOccurred and ErrorOccurred.
Diags.Report(diag::err_target_unknown_triple) << "unknown";
// Diag that would set UnrecoverableErrorOccurred and ErrorOccurred.
Diags.Report(diag::err_cannot_open_file) << "file" << "error";
// Diag that would set UnrecoverableErrorOccurred and ErrorOccurred.
Diags.Report(diag::err_cannot_open_file) << "file" << "error";
// Diag that would set FatalErrorOccurred
// (via non-note following a fatal error).
Diags.Report(diag::warn_mt_message) << "warning";
// Diag that would set FatalErrorOccurred
// (via non-note following a fatal error).
Diags.Report(diag::warn_mt_message) << "warning";
EXPECT_TRUE(Diags.hasErrorOccurred());
EXPECT_TRUE(Diags.hasFatalErrorOccurred());
EXPECT_TRUE(Diags.hasUncompilableErrorOccurred());
EXPECT_TRUE(Diags.hasUnrecoverableErrorOccurred());
EXPECT_TRUE(Diags.hasErrorOccurred());
EXPECT_FALSE(Diags.hasFatalErrorOccurred());
EXPECT_TRUE(Diags.hasUncompilableErrorOccurred());
EXPECT_TRUE(Diags.hasUnrecoverableErrorOccurred());
// The warning should be emitted and counted only if we're not suppressing
// after fatal errors.
EXPECT_EQ(Diags.getNumWarnings(), Suppress ? 0u : 1u);
}
}
}