CodeGenFunction.cpp 79.2 KB
Newer Older
1
2
3
4
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
//
//                     The LLVM Compiler Infrastructure
//
5
6
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
7
8
9
10
11
12
13
14
//
//===----------------------------------------------------------------------===//
//
// This coordinates the per-function state used while generating code.
//
//===----------------------------------------------------------------------===//

#include "CodeGenFunction.h"
15
#include "CGBlocks.h"
16
#include "CGCleanup.h"
17
#include "CGCUDARuntime.h"
18
#include "CGCXXABI.h"
19
#include "CGDebugInfo.h"
20
#include "CGOpenMPRuntime.h"
21
#include "CodeGenModule.h"
22
#include "CodeGenPGO.h"
23
#include "TargetInfo.h"
Daniel Dunbar's avatar
Daniel Dunbar committed
24
#include "clang/AST/ASTContext.h"
Daniel Dunbar's avatar
Daniel Dunbar committed
25
#include "clang/AST/Decl.h"
26
#include "clang/AST/DeclCXX.h"
27
#include "clang/AST/StmtCXX.h"
28
#include "clang/AST/StmtObjC.h"
29
#include "clang/Basic/Builtins.h"
30
#include "clang/Basic/TargetInfo.h"
31
#include "clang/CodeGen/CGFunctionInfo.h"
32
#include "clang/Frontend/CodeGenOptions.h"
33
#include "clang/Sema/SemaDiagnostic.h"
34
35
36
37
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Operator.h"
38
39
40
using namespace clang;
using namespace CodeGen;

41
42
43
44
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
/// markers.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
                                      const LangOptions &LangOpts) {
45
46
47
  if (CGOpts.DisableLifetimeMarkers)
    return false;

48
49
50
51
52
53
54
55
56
57
58
59
60
  // Asan uses markers for use-after-scope checks.
  if (CGOpts.SanitizeAddressUseAfterScope)
    return true;

  // Disable lifetime markers in msan builds.
  // FIXME: Remove this when msan works with lifetime markers.
  if (LangOpts.Sanitize.has(SanitizerKind::Memory))
    return false;

  // For now, only in optimized builds.
  return CGOpts.OptimizationLevel != 0;
}

61
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
62
    : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
63
      Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
David Blaikie's avatar
David Blaikie committed
64
              CGBuilderInserterTy(this)),
65
      CurFn(nullptr), ReturnValue(Address::invalid()),
66
67
68
69
70
71
72
      CapturedStmtInfo(nullptr), SanOpts(CGM.getLangOpts().Sanitize),
      IsSanitizerScope(false), CurFuncIsThunk(false), AutoreleaseResult(false),
      SawAsmBlock(false), IsOutlinedSEHHelper(false), BlockInfo(nullptr),
      BlockPointer(nullptr), LambdaThisCaptureField(nullptr),
      NormalCleanupDest(nullptr), NextCleanupDestIndex(1),
      FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr),
      EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()),
73
74
75
76
77
      DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
      PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
      CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
      NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
      CXXABIThisValue(nullptr), CXXThisValue(nullptr),
78
      CXXStructorImplicitParamDecl(nullptr),
79
80
      CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
      CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
81
82
83
      TerminateHandler(nullptr), TrapBB(nullptr),
      ShouldEmitLifetimeMarkers(
          shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
84
85
  if (!suppressNewContext)
    CGM.getCXXABI().getMangleContext().startNewFunction();
86
87
88

  llvm::FastMathFlags FMF;
  if (CGM.getLangOpts().FastMath)
89
    FMF.setUnsafeAlgebra();
90
  if (CGM.getLangOpts().FiniteMathOnly) {
91
92
    FMF.setNoNaNs();
    FMF.setNoInfs();
93
  }
94
95
96
97
98
99
  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
    FMF.setNoNaNs();
  }
  if (CGM.getCodeGenOpts().NoSignedZeros) {
    FMF.setNoSignedZeros();
  }
100
101
102
  if (CGM.getCodeGenOpts().ReciprocalMath) {
    FMF.setAllowReciprocal();
  }
103
  Builder.setFastMathFlags(FMF);
104
}
105

106
CodeGenFunction::~CodeGenFunction() {
107
108
  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");

109
110
111
112
113
  // If there are any unclaimed block infos, go ahead and destroy them
  // now.  This can happen if IR-gen gets clever and skips evaluating
  // something.
  if (FirstBlockInfo)
    destroyBlockInfos(FirstBlockInfo);
114

Hans Wennborg's avatar
Hans Wennborg committed
115
  if (getLangOpts().OpenMP && CurFn)
116
    CGM.getOpenMPRuntime().functionFinished(*this);
117
118
}

119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
                                                     AlignmentSource *Source) {
  return getNaturalTypeAlignment(T->getPointeeType(), Source,
                                 /*forPointee*/ true);
}

CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
                                                   AlignmentSource *Source,
                                                   bool forPointeeType) {
  // Honor alignment typedef attributes even on incomplete types.
  // We also honor them straight for C++ class types, even as pointees;
  // there's an expressivity gap here.
  if (auto TT = T->getAs<TypedefType>()) {
    if (auto Align = TT->getDecl()->getMaxAlignment()) {
      if (Source) *Source = AlignmentSource::AttributedType;
      return getContext().toCharUnitsFromBits(Align);
    }
  }

  if (Source) *Source = AlignmentSource::Type;

140
  CharUnits Alignment;
141
  if (T->isIncompleteType()) {
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
    Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
  } else {
    // For C++ class pointees, we don't know whether we're pointing at a
    // base or a complete object, so we generally need to use the
    // non-virtual alignment.
    const CXXRecordDecl *RD;
    if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
      Alignment = CGM.getClassPointerAlignment(RD);
    } else {
      Alignment = getContext().getTypeAlignInChars(T);
    }

    // Cap to the global maximum type alignment unless the alignment
    // was somehow explicit on the type.
    if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
      if (Alignment.getQuantity() > MaxAlign &&
          !getContext().isAlignmentRequired(T))
        Alignment = CharUnits::fromQuantity(MaxAlign);
    }
161
  }
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
  return Alignment;
}

LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
  AlignmentSource AlignSource;
  CharUnits Alignment = getNaturalTypeAlignment(T, &AlignSource);
  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), AlignSource,
                          CGM.getTBAAInfo(T));
}

/// Given a value of type T* that may not be to a complete object,
/// construct an l-value with the natural pointee alignment of T.
LValue
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
  AlignmentSource AlignSource;
  CharUnits Align = getNaturalTypeAlignment(T, &AlignSource, /*pointee*/ true);
  return MakeAddrLValue(Address(V, Align), T, AlignSource);
179
}
180

181

182
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
183
184
185
  return CGM.getTypes().ConvertTypeForMem(T);
}

186
llvm::Type *CodeGenFunction::ConvertType(QualType T) {
187
188
189
  return CGM.getTypes().ConvertType(T);
}

190
191
192
193
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
  type = type.getCanonicalType();
  while (true) {
    switch (type->getTypeClass()) {
194
195
196
197
198
199
#define TYPE(name, parent)
#define ABSTRACT_TYPE(name, parent)
#define NON_CANONICAL_TYPE(name, parent) case Type::name:
#define DEPENDENT_TYPE(name, parent) case Type::name:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
#include "clang/AST/TypeNodes.def"
200
201
      llvm_unreachable("non-canonical or dependent type in IR-generation");

202
203
204
    case Type::Auto:
      llvm_unreachable("undeduced auto type in IR-generation");

205
206
207
208
209
210
211
212
213
214
215
216
217
    // Various scalar types.
    case Type::Builtin:
    case Type::Pointer:
    case Type::BlockPointer:
    case Type::LValueReference:
    case Type::RValueReference:
    case Type::MemberPointer:
    case Type::Vector:
    case Type::ExtVector:
    case Type::FunctionProto:
    case Type::FunctionNoProto:
    case Type::Enum:
    case Type::ObjCObjectPointer:
Xiuli Pan's avatar
Xiuli Pan committed
218
    case Type::Pipe:
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
      return TEK_Scalar;

    // Complexes.
    case Type::Complex:
      return TEK_Complex;

    // Arrays, records, and Objective-C objects.
    case Type::ConstantArray:
    case Type::IncompleteArray:
    case Type::VariableArray:
    case Type::Record:
    case Type::ObjCObject:
    case Type::ObjCInterface:
      return TEK_Aggregate;

    // We operate on atomic values according to their underlying type.
    case Type::Atomic:
      type = cast<AtomicType>(type)->getValueType();
      continue;
    }
    llvm_unreachable("unknown type kind!");
240
  }
241
}
242

243
llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
244
245
246
247
248
249
250
  // For cleanliness, we try to avoid emitting the return block for
  // simple cases.
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();

  if (CurBB) {
    assert(!CurBB->getTerminator() && "Unexpected terminated block.");

251
252
    // We have a valid insert point, reuse it if it is empty or there are no
    // explicit jumps to the return block.
253
254
255
    if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
      delete ReturnBlock.getBlock();
256
    } else
257
      EmitBlock(ReturnBlock.getBlock());
258
    return llvm::DebugLoc();
259
260
261
262
263
  }

  // Otherwise, if the return block is the target of a single direct
  // branch then we can just put the code in that block instead. This
  // cleans up functions which started with a unified return block.
264
  if (ReturnBlock.getBlock()->hasOneUse()) {
265
    llvm::BranchInst *BI =
266
      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
267
    if (BI && BI->isUnconditional() &&
268
        BI->getSuccessor(0) == ReturnBlock.getBlock()) {
269
270
271
      // Record/return the DebugLoc of the simple 'return' expression to be used
      // later by the actual 'ret' instruction.
      llvm::DebugLoc Loc = BI->getDebugLoc();
272
273
      Builder.SetInsertPoint(BI->getParent());
      BI->eraseFromParent();
274
      delete ReturnBlock.getBlock();
275
      return Loc;
276
277
278
    }
  }

Mike Stump's avatar
Mike Stump committed
279
280
281
  // FIXME: We are at an unreachable point, there is no reason to emit the block
  // unless it has uses. However, we still need a place to put the debug
  // region.end for now.
282

283
  EmitBlock(ReturnBlock.getBlock());
284
  return llvm::DebugLoc();
285
286
287
288
289
290
291
}

static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
  if (!BB) return;
  if (!BB->use_empty())
    return CGF.CurFn->getBasicBlockList().push_back(BB);
  delete BB;
292
293
}

294
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
295
296
  assert(BreakContinueStack.empty() &&
         "mismatched push/pop in break/continue stack!");
297

298
  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
299
300
301
302
303
304
305
306
307
308
309
310
311
312
    && NumSimpleReturnExprs == NumReturnExprs
    && ReturnBlock.getBlock()->use_empty();
  // Usually the return expression is evaluated before the cleanup
  // code.  If the function contains only a simple return statement,
  // such as a constant, the location before the cleanup code becomes
  // the last useful breakpoint in the function, because the simple
  // return expression will be evaluated after the cleanup code. To be
  // safe, set the debug location for cleanup code to the location of
  // the return statement.  Otherwise the cleanup code should be at the
  // end of the function's lexical scope.
  //
  // If there are multiple branches to the return block, the branch
  // instructions will get the location of the return statements and
  // all will be fine.
313
  if (CGDebugInfo *DI = getDebugInfo()) {
314
    if (OnlySimpleReturnStmts)
315
      DI->EmitLocation(Builder, LastStopPoint);
316
    else
317
      DI->EmitLocation(Builder, EndLoc);
318
  }
319

John McCall's avatar
John McCall committed
320
321
322
323
  // Pop any cleanups that might have been associated with the
  // parameters.  Do this in whatever block we're currently in; it's
  // important to do this before we enter the return block or return
  // edges will be *really* confused.
324
325
326
327
328
  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
  bool HasOnlyLifetimeMarkers =
      HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
  bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
  if (HasCleanups) {
329
330
331
    // Make sure the line table doesn't jump back into the body for
    // the ret after it's been at EndLoc.
    if (CGDebugInfo *DI = getDebugInfo())
332
      if (OnlySimpleReturnStmts)
333
        DI->EmitLocation(Builder, EndLoc);
334
335

    PopCleanupBlocks(PrologueCleanupDepth);
336
337
  }

338
  // Emit function epilog (to return).
339
  llvm::DebugLoc Loc = EmitReturnBlock();
340

341
342
  if (ShouldInstrumentFunction())
    EmitFunctionInstrumentation("__cyg_profile_func_exit");
343

344
  // Emit debug descriptor for function end.
345
  if (CGDebugInfo *DI = getDebugInfo())
346
    DI->EmitFunctionEnd(Builder);
347

348
349
350
  // Reset the debug location to that of the simple 'return' expression, if any
  // rather than that of the end of the function's scope '}'.
  ApplyDebugLocation AL(*this, Loc);
351
  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
352
  EmitEndEHSpec(CurCodeDecl);
Daniel Dunbar's avatar
Daniel Dunbar committed
353

354
355
356
  assert(EHStack.empty() &&
         "did not remove all scopes from cleanup stack!");

357
358
359
360
361
  // If someone did an indirect goto, emit the indirect goto block at the end of
  // the function.
  if (IndirectBranch) {
    EmitBlock(IndirectBranch->getParent());
    Builder.ClearInsertionPoint();
362
363
  }

364
  // If some of our locals escaped, insert a call to llvm.localescape in the
365
366
367
368
369
370
371
372
373
  // entry block.
  if (!EscapedLocals.empty()) {
    // Invert the map from local to index into a simple vector. There should be
    // no holes.
    SmallVector<llvm::Value *, 4> EscapeArgs;
    EscapeArgs.resize(EscapedLocals.size());
    for (auto &Pair : EscapedLocals)
      EscapeArgs[Pair.second] = Pair.first;
    llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
374
        &CGM.getModule(), llvm::Intrinsic::localescape);
375
    CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
376
  }
Michael Ilseman's avatar
Michael Ilseman committed
377

378
  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
379
  llvm::Instruction *Ptr = AllocaInsertPt;
380
  AllocaInsertPt = nullptr;
381
  Ptr->eraseFromParent();
Michael Ilseman's avatar
Michael Ilseman committed
382

383
384
385
386
387
388
389
390
391
  // If someone took the address of a label but never did an indirect goto, we
  // made a zero entry PHI node, which is illegal, zap it now.
  if (IndirectBranch) {
    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
    if (PN->getNumIncomingValues() == 0) {
      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
      PN->eraseFromParent();
    }
  }
392

393
  EmitIfUsed(*this, EHResumeBlock);
394
395
396
  EmitIfUsed(*this, TerminateLandingPad);
  EmitIfUsed(*this, TerminateHandler);
  EmitIfUsed(*this, UnreachableBlock);
397
398
399

  if (CGM.getCodeGenOpts().EmitDeclMetadata)
    EmitDeclMetadata();
400
401
402
403
404
405
406
407

  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
           I = DeferredReplacements.begin(),
           E = DeferredReplacements.end();
       I != E; ++I) {
    I->first->replaceAllUsesWith(I->second);
    I->first->eraseFromParent();
  }
408
409
}

410
411
412
413
414
/// ShouldInstrumentFunction - Return true if the current function should be
/// instrumented with __cyg_profile_func_* calls
bool CodeGenFunction::ShouldInstrumentFunction() {
  if (!CGM.getCodeGenOpts().InstrumentFunctions)
    return false;
415
  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
416
417
418
419
    return false;
  return true;
}

420
421
422
423
424
425
/// ShouldXRayInstrument - Return true if the current function should be
/// instrumented with XRay nop sleds.
bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
}

426
427
428
429
/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
/// instrumentation function with the current function and the call site, if
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
430
  auto NL = ApplyDebugLocation::CreateArtificial(*this);
431
  // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
432
433
  llvm::PointerType *PointerTy = Int8PtrTy;
  llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
434
  llvm::FunctionType *FunctionTy =
435
    llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
436
437
438

  llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
  llvm::CallInst *CallSite = Builder.CreateCall(
439
    CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
440
    llvm::ConstantInt::get(Int32Ty, 0),
441
442
    "callsite");

443
444
445
446
447
448
  llvm::Value *args[] = {
    llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
    CallSite
  };

  EmitNounwindRuntimeCall(F, args);
449
450
}

451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
static void removeImageAccessQualifier(std::string& TyName) {
  std::string ReadOnlyQual("__read_only");
  std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
  if (ReadOnlyPos != std::string::npos)
    // "+ 1" for the space after access qualifier.
    TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
  else {
    std::string WriteOnlyQual("__write_only");
    std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
    if (WriteOnlyPos != std::string::npos)
      TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
    else {
      std::string ReadWriteQual("__read_write");
      std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
      if (ReadWritePos != std::string::npos)
        TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
    }
  }
}

471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
// Returns the address space id that should be produced to the
// kernel_arg_addr_space metadata. This is always fixed to the ids
// as specified in the SPIR 2.0 specification in order to differentiate
// for example in clGetKernelArgInfo() implementation between the address
// spaces with targets without unique mapping to the OpenCL address spaces
// (basically all single AS CPUs).
static unsigned ArgInfoAddressSpace(unsigned LangAS) {
  switch (LangAS) {
  case LangAS::opencl_global:   return 1;
  case LangAS::opencl_constant: return 2;
  case LangAS::opencl_local:    return 3;
  case LangAS::opencl_generic:  return 4; // Not in SPIR 2.0 specs.
  default:
    return 0; // Assume private.
  }
}

488
489
490
491
// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
// information in the program executable. The argument information stored
// includes the argument name, its type, the address and access qualifiers used.
static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
492
493
                                 CodeGenModule &CGM, llvm::LLVMContext &Context,
                                 CGBuilderTy &Builder, ASTContext &ASTCtx) {
494
  // Create MDNodes that represent the kernel arg metadata.
495
496
  // Each MDNode is a list in the form of "key", N number of values which is
  // the same number of values as their are kernel arguments.
Michael Ilseman's avatar
Michael Ilseman committed
497

498
499
  const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();

500
  // MDNode for the kernel argument address space qualifiers.
501
  SmallVector<llvm::Metadata *, 8> addressQuals;
502
503

  // MDNode for the kernel argument access qualifiers (images only).
504
  SmallVector<llvm::Metadata *, 8> accessQuals;
505
506

  // MDNode for the kernel argument type names.
507
  SmallVector<llvm::Metadata *, 8> argTypeNames;
508

509
  // MDNode for the kernel argument base type names.
510
  SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
511

512
  // MDNode for the kernel argument type qualifiers.
513
  SmallVector<llvm::Metadata *, 8> argTypeQuals;
514

515
  // MDNode for the kernel argument names.
516
  SmallVector<llvm::Metadata *, 8> argNames;
Michael Ilseman's avatar
Michael Ilseman committed
517

518
519
  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
    const ParmVarDecl *parm = FD->getParamDecl(i);
520
521
522
523
524
525
526
    QualType ty = parm->getType();
    std::string typeQuals;

    if (ty->isPointerType()) {
      QualType pointeeTy = ty->getPointeeType();

      // Get address qualifier.
527
      addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
528
        ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
529
530

      // Get argument type name.
531
532
      std::string typeName =
          pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
533
534
535

      // Turn "unsigned type" to "utype"
      std::string::size_type pos = typeName.find("unsigned");
536
      if (pointeeTy.isCanonical() && pos != std::string::npos)
537
        typeName.erase(pos+1, 8);
538
539
540

      argTypeNames.push_back(llvm::MDString::get(Context, typeName));

541
542
543
544
545
546
547
548
549
550
551
552
      std::string baseTypeName =
          pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
              Policy) +
          "*";

      // Turn "unsigned type" to "utype"
      pos = baseTypeName.find("unsigned");
      if (pos != std::string::npos)
        baseTypeName.erase(pos+1, 8);

      argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));

553
554
555
556
557
      // Get argument type qualifiers:
      if (ty.isRestrictQualified())
        typeQuals = "restrict";
      if (pointeeTy.isConstQualified() ||
          (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
558
        typeQuals += typeQuals.empty() ? "const" : " const";
559
      if (pointeeTy.isVolatileQualified())
560
        typeQuals += typeQuals.empty() ? "volatile" : " volatile";
561
    } else {
562
      uint32_t AddrSpc = 0;
Xiuli Pan's avatar
Xiuli Pan committed
563
564
      bool isPipe = ty->isPipeType();
      if (ty->isImageType() || isPipe)
565
        AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
Bob Wilson's avatar
Bob Wilson committed
566

567
568
      addressQuals.push_back(
          llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
569
570

      // Get argument type name.
Xiuli Pan's avatar
Xiuli Pan committed
571
572
      std::string typeName;
      if (isPipe)
573
574
        typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType()
                     .getAsString(Policy);
Xiuli Pan's avatar
Xiuli Pan committed
575
576
      else
        typeName = ty.getUnqualifiedType().getAsString(Policy);
577
578
579

      // Turn "unsigned type" to "utype"
      std::string::size_type pos = typeName.find("unsigned");
580
      if (ty.isCanonical() && pos != std::string::npos)
581
        typeName.erase(pos+1, 8);
582

Xiuli Pan's avatar
Xiuli Pan committed
583
584
      std::string baseTypeName;
      if (isPipe)
585
586
587
        baseTypeName = ty.getCanonicalType()->getAs<PipeType>()
                          ->getElementType().getCanonicalType()
                          .getAsString(Policy);
Xiuli Pan's avatar
Xiuli Pan committed
588
589
      else
        baseTypeName =
590
591
          ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);

592
593
594
595
596
597
598
599
600
601
602
      // Remove access qualifiers on images
      // (as they are inseparable from type in clang implementation,
      // but OpenCL spec provides a special query to get access qualifier
      // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
      if (ty->isImageType()) {
        removeImageAccessQualifier(typeName);
        removeImageAccessQualifier(baseTypeName);
      }

      argTypeNames.push_back(llvm::MDString::get(Context, typeName));

603
604
605
606
607
608
609
      // Turn "unsigned type" to "utype"
      pos = baseTypeName.find("unsigned");
      if (pos != std::string::npos)
        baseTypeName.erase(pos+1, 8);

      argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));

610
611
612
613
      // Get argument type qualifiers:
      if (ty.isConstQualified())
        typeQuals = "const";
      if (ty.isVolatileQualified())
614
        typeQuals += typeQuals.empty() ? "volatile" : " volatile";
Xiuli Pan's avatar
Xiuli Pan committed
615
616
      if (isPipe)
        typeQuals = "pipe";
617
    }
Justin Bogner's avatar
Justin Bogner committed
618

619
620
    argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));

Xiuli Pan's avatar
Xiuli Pan committed
621
622
    // Get image and pipe access qualifier:
    if (ty->isImageType()|| ty->isPipeType()) {
623
      const OpenCLAccessAttr *A = parm->getAttr<OpenCLAccessAttr>();
624
      if (A && A->isWriteOnly())
625
        accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
626
627
      else if (A && A->isReadWrite())
        accessQuals.push_back(llvm::MDString::get(Context, "read_write"));
628
629
630
631
      else
        accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
    } else
      accessQuals.push_back(llvm::MDString::get(Context, "none"));
Michael Ilseman's avatar
Michael Ilseman committed
632

633
634
635
    // Get argument name.
    argNames.push_back(llvm::MDString::get(Context, parm->getName()));
  }
636

637
638
639
640
641
642
643
644
645
646
  Fn->setMetadata("kernel_arg_addr_space",
                  llvm::MDNode::get(Context, addressQuals));
  Fn->setMetadata("kernel_arg_access_qual",
                  llvm::MDNode::get(Context, accessQuals));
  Fn->setMetadata("kernel_arg_type",
                  llvm::MDNode::get(Context, argTypeNames));
  Fn->setMetadata("kernel_arg_base_type",
                  llvm::MDNode::get(Context, argBaseTypeNames));
  Fn->setMetadata("kernel_arg_type_qual",
                  llvm::MDNode::get(Context, argTypeQuals));
647
  if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
648
649
    Fn->setMetadata("kernel_arg_name",
                    llvm::MDNode::get(Context, argNames));
650
651
}

Michael Ilseman's avatar
Michael Ilseman committed
652
void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
653
654
655
656
657
658
659
                                               llvm::Function *Fn)
{
  if (!FD->hasAttr<OpenCLKernelAttr>())
    return;

  llvm::LLVMContext &Context = getLLVMContext();

660
  GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext());
Michael Ilseman's avatar
Michael Ilseman committed
661

662
663
  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
    QualType hintQTy = A->getTypeHint();
664
665
666
667
    const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
    bool isSignedInteger =
        hintQTy->isSignedIntegerType() ||
        (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
668
669
670
671
672
673
    llvm::Metadata *attrMDArgs[] = {
        llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
            CGM.getTypes().ConvertType(A->getTypeHint()))),
        llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
            llvm::IntegerType::get(Context, 32),
            llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
674
    Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, attrMDArgs));
675
676
  }

677
  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
678
679
680
681
    llvm::Metadata *attrMDArgs[] = {
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
682
    Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, attrMDArgs));
683
684
  }

685
  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
686
687
688
689
    llvm::Metadata *attrMDArgs[] = {
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
690
    Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, attrMDArgs));
691
692
693
  }
}

694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
/// Determine whether the function F ends with a return stmt.
static bool endsWithReturn(const Decl* F) {
  const Stmt *Body = nullptr;
  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
    Body = FD->getBody();
  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
    Body = OMD->getBody();

  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
    auto LastStmt = CS->body_rbegin();
    if (LastStmt != CS->body_rend())
      return isa<ReturnStmt>(*LastStmt);
  }
  return false;
}

710
711
void CodeGenFunction::StartFunction(GlobalDecl GD,
                                    QualType RetTy,
712
                                    llvm::Function *Fn,
713
                                    const CGFunctionInfo &FnInfo,
714
                                    const FunctionArgList &Args,
715
                                    SourceLocation Loc,
716
                                    SourceLocation StartLoc) {
717
718
719
  assert(!CurFn &&
         "Do not use a CodeGenFunction object for more than one function");

720
  const Decl *D = GD.getDecl();
Michael Ilseman's avatar
Michael Ilseman committed
721

722
  DidCallStackSave = false;
723
  CurCodeDecl = D;
724
725
726
  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
    if (FD->usesSEHTry())
      CurSEHParent = FD;
727
  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
728
  FnRetTy = RetTy;
729
  CurFn = Fn;
730
  CurFnInfo = &FnInfo;
731
  assert(CurFn->isDeclaration() && "Function already has body?");
732

733
  if (CGM.isInSanitizerBlacklist(Fn, Loc))
734
    SanOpts.clear();
735

736
737
738
739
740
741
742
  if (D) {
    // Apply the no_sanitize* attributes to SanOpts.
    for (auto Attr : D->specific_attrs<NoSanitizeAttr>())
      SanOpts.Mask &= ~Attr->getMask();
  }

  // Apply sanitizer attributes to the function.
743
  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
744
745
746
747
748
    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
  if (SanOpts.has(SanitizerKind::Thread))
    Fn->addFnAttr(llvm::Attribute::SanitizeThread);
  if (SanOpts.has(SanitizerKind::Memory))
    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
749
750
  if (SanOpts.has(SanitizerKind::SafeStack))
    Fn->addFnAttr(llvm::Attribute::SafeStack);
751

752
753
754
755
756
757
758
759
760
761
762
763
764
765
  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
  // .cxx_destruct and all of their calees at run time.
  if (SanOpts.has(SanitizerKind::Thread)) {
    if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
      IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
      if (OMD->getMethodFamily() == OMF_dealloc ||
          OMD->getMethodFamily() == OMF_initialize ||
          (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
        Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
        Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
      }
    }
  }

766
767
768
769
770
771
772
773
774
775
776
777
778
779
  // Apply xray attributes to the function (as a string, for now)
  if (D && ShouldXRayInstrumentFunction()) {
    if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
      if (XRayAttr->alwaysXRayInstrument())
        Fn->addFnAttr("function-instrument", "xray-always");
      if (XRayAttr->neverXRayInstrument())
        Fn->addFnAttr("function-instrument", "xray-never");
    } else {
      Fn->addFnAttr(
          "xray-instruction-threshold",
          llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
    }
  }

780
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
781
782
    if (CGM.getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
      CGM.getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn);
783

784
785
786
787
  // Add no-jump-tables value.
  Fn->addFnAttr("no-jump-tables",
                llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));

788
  if (getLangOpts().OpenCL) {
789
790
    // Add metadata for a kernel function.
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
791
      EmitOpenCLKernelMetadata(FD, Fn);
792
793
  }

794
  // If we are checking function types, emit a function type signature as
795
  // prologue data.
796
  if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
797
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
798
      if (llvm::Constant *PrologueSig =
799
800
801
              CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
        llvm::Constant *FTRTTIConst =
            CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
802
803
804
805
        llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
        llvm::Constant *PrologueStructConst =
            llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
        Fn->setPrologueData(PrologueStructConst);
806
807
808
809
      }
    }
  }

810
811
812
813
814
815
816
  // If we're in C++ mode and the function name is "main", it is guaranteed
  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
  // used within a program").
  if (getLangOpts().CPlusPlus)
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
      if (FD->isMain())
        Fn->addFnAttr(llvm::Attribute::NoRecurse);
817

818
  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
Daniel Dunbar's avatar
Daniel Dunbar committed
819

820
  // Create a marker to make it easy to insert allocas into the entryblock
821
822
  // later.  Don't create this with the builder, because we don't want it
  // folded.
823
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
824
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
825

826
  ReturnBlock = getJumpDestInCurrentScope("return");
827

828
  Builder.SetInsertPoint(EntryBB);
829

Sanjiv Gupta's avatar
Sanjiv Gupta committed
830
  // Emit subprogram debug descriptor.
831
  if (CGDebugInfo *DI = getDebugInfo()) {
832
833
834
835
836
837
838
    // Reconstruct the type from the argument list so that implicit parameters,
    // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
    // convention.
    CallingConv CC = CallingConv::CC_C;
    if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
      if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
        CC = SrcFnTy->getCallConv();
839
    SmallVector<QualType, 16> ArgTypes;
840
841
842
843
    for (const VarDecl *VD : Args)
      ArgTypes.push_back(VD->getType());
    QualType FnType = getContext().getFunctionType(
        RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
844
    DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
Sanjiv Gupta's avatar
Sanjiv Gupta committed
845
846
  }

847
848
  if (ShouldInstrumentFunction())
    EmitFunctionInstrumentation("__cyg_profile_func_enter");
849

850
851
852
853
  // Since emitting the mcount call here impacts optimizations such as function
  // inlining, we just add an attribute to insert a mcount call in backend.
  // The attribute "counting-function" is set to mcount function name which is
  // architecture dependent.
854
  if (CGM.getCodeGenOpts().InstrumentForProfiling)
855
    Fn->addFnAttr("counting-function", getTarget().getMCountName());
856

857
858
  if (RetTy->isVoidType()) {
    // Void type; nothing to return.
859
    ReturnValue = Address::invalid();
860
861
862
863

    // Count the implicit return.
    if (!endsWithReturn(D))
      ++NumReturnExprs;
864
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
865
             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
866
    // Indirect aggregate return; emit returned value directly into sret slot.
867
    // This reduces code size, and affects correctness in C++.
868
869
870
    auto AI = CurFn->arg_begin();
    if (CurFnInfo->getReturnInfo().isSRetAfterThis())
      ++AI;
871
    ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
872
873
874
875
876
877
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
    // Load the sret pointer from the argument struct and return into that.
    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
    llvm::Function::arg_iterator EI = CurFn->arg_end();
    --EI;
878
    llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
879
880
    Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
    ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
881
  } else {
882
    ReturnValue = CreateIRTemp(RetTy, "retval");
John McCall's avatar
John McCall committed
883
884
885
886

    // Tell the epilog emitter to autorelease the result.  We do this
    // now so that various specialized functions can suppress it
    // during their IR-generation.
887
    if (getLangOpts().ObjCAutoRefCount &&
John McCall's avatar
John McCall committed
888
889
890
        !CurFnInfo->isReturnsRetained() &&
        RetTy->isObjCRetainableType())
      AutoreleaseResult = true;
891
892
  }

893
  EmitStartEHSpec(CurCodeDecl);
John McCall's avatar
John McCall committed
894
895

  PrologueCleanupDepth = EHStack.stable_begin();
Daniel Dunbar's avatar
Daniel Dunbar committed
896
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
897

898
  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
899
    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
900
901
902
903
904
905
906
    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
    if (MD->getParent()->isLambda() &&
        MD->getOverloadedOperator() == OO_Call) {
      // We're in a lambda; figure out the captures.
      MD->getParent()->getCaptureFields(LambdaCaptureFields,
                                        LambdaThisCaptureField);
      if (LambdaThisCaptureField) {
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
        // If the lambda captures the object referred to by '*this' - either by
        // value or by reference, make sure CXXThisValue points to the correct
        // object.

        // Get the lvalue for the field (which is a copy of the enclosing object
        // or contains the address of the enclosing object).
        LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
        if (!LambdaThisCaptureField->getType()->isPointerType()) {
          // If the enclosing object was captured by value, just use its address.
          CXXThisValue = ThisFieldLValue.getAddress().getPointer();
        } else {
          // Load the lvalue pointed to by the field, since '*this' was captured
          // by reference.
          CXXThisValue =
              EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
        }
923
      }
924
925
926
927
928
929
930
931
      for (auto *FD : MD->getParent()->fields()) {
        if (FD->hasCapturedVLAType()) {
          auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
                                           SourceLocation()).getScalarVal();
          auto VAT = FD->getCapturedVLAType();
          VLASizeMap[VAT->getSizeExpr()] = ExprArg;
        }
      }
932
933
934
935
936
937
938
    } else {
      // Not in a lambda; just use 'this' from the method.
      // FIXME: Should we generate a new load for each use of 'this'?  The
      // fast register allocator would be happier...
      CXXThisValue = CXXABIThisValue;
    }
  }
939

940
941
942
943
  // If any of the arguments have a variably modified type, make sure to
  // emit the type size.
  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
       i != e; ++i) {
944
945
946
947
948
949
950
951
952
953
    const VarDecl *VD = *i;

    // Dig out the type as written from ParmVarDecls; it's unclear whether
    // the standard (C99 6.9.1p10) requires this, but we're following the
    // precedent set by gcc.
    QualType Ty;
    if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
      Ty = PVD->getOriginalType();
    else
      Ty = VD->getType();
954
955

    if (Ty->isVariablyModifiedType())
956
      EmitVariablyModifiedType(Ty);
957
  }
Eric Christopher's avatar
Eric Christopher committed
958
959
960
  // Emit a location at the end of the prologue.
  if (CGDebugInfo *DI = getDebugInfo())
    DI->EmitLocation(Builder, StartLoc);
961
}
962

963
964
void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
                                       const Stmt *Body) {
965
  incrementProfileCounter(Body);
966
  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
967
968
    EmitCompoundStmtWithoutScope(*S);
  else
969
    EmitStmt(Body);
970
971
}

972
973
974
975
976
/// When instrumenting to collect profile data, the counts for some blocks
/// such as switch cases need to not include the fall-through counts, so
/// emit a branch around the instrumentation code. When not instrumenting,
/// this just calls EmitBlock().
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
977
                                               const Stmt *S) {
978
  llvm::BasicBlock *SkipCountBB = nullptr;
979
  if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
980
981
982
983
984
985
986
    // When instrumenting for profiling, the fallthrough to certain
    // statements needs to skip over the instrumentation code so that we
    // get an accurate count.
    SkipCountBB = createBasicBlock("skipcount");
    EmitBranch(SkipCountBB);
  }
  EmitBlock(BB);
987
988
989
  uint64_t CurrentCount = getCurrentProfileCount();
  incrementProfileCounter(S);
  setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
990
991
992
993
  if (SkipCountBB)
    EmitBlock(SkipCountBB);
}

994
995
996
997
/// Tries to mark the given function nounwind based on the
/// non-existence of any throwing calls within it.  We believe this is
/// lightweight enough to do at -O0.
static void TryMarkNoThrow(llvm::Function *F) {
998
999
  // LLVM treats 'nounwind' on a function as part of the type, so we
  // can't do this on functions that can be overwritten.
Sanjoy Das's avatar
Sanjoy Das committed
1000
  if (F->isInterposable()) return;
For faster browsing, not all history is shown. View entire blame