1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit OpenMP nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Stmt.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "llvm/IR/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
25
26 namespace {
27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
28 /// for captured expressions.
29 class OMPLexicalScope final : public CodeGenFunction::LexicalScope {
emitPreInitStmt(CodeGenFunction & CGF,const OMPExecutableDirective & S)30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
31 for (const auto *C : S.clauses()) {
32 if (auto *CPI = OMPClauseWithPreInit::get(C)) {
33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
34 for (const auto *I : PreInit->decls()) {
35 if (!I->hasAttr<OMPCaptureNoInitAttr>())
36 CGF.EmitVarDecl(cast<VarDecl>(*I));
37 else {
38 CodeGenFunction::AutoVarEmission Emission =
39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
40 CGF.EmitAutoVarCleanups(Emission);
41 }
42 }
43 }
44 }
45 }
46 }
47 CodeGenFunction::OMPPrivateScope InlinedShareds;
48
isCapturedVar(CodeGenFunction & CGF,const VarDecl * VD)49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
50 return CGF.LambdaCaptureFields.lookup(VD) ||
51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
53 }
54
55 public:
OMPLexicalScope(CodeGenFunction & CGF,const OMPExecutableDirective & S,bool AsInlined=false)56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 bool AsInlined = false)
58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
59 InlinedShareds(CGF) {
60 emitPreInitStmt(CGF, S);
61 if (AsInlined) {
62 if (S.hasAssociatedStmt()) {
63 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
64 for (auto &C : CS->captures()) {
65 if (C.capturesVariable() || C.capturesVariableByCopy()) {
66 auto *VD = C.getCapturedVar();
67 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
68 isCapturedVar(CGF, VD) ||
69 (CGF.CapturedStmtInfo &&
70 InlinedShareds.isGlobalVarCaptured(VD)),
71 VD->getType().getNonReferenceType(), VK_LValue,
72 SourceLocation());
73 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
74 return CGF.EmitLValue(&DRE).getAddress();
75 });
76 }
77 }
78 (void)InlinedShareds.Privatize();
79 }
80 }
81 }
82 };
83
84 /// Private scope for OpenMP loop-based directives, that supports capturing
85 /// of used expression from loop statement.
86 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
emitPreInitStmt(CodeGenFunction & CGF,const OMPLoopDirective & S)87 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
88 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
89 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
90 for (const auto *I : PreInits->decls())
91 CGF.EmitVarDecl(cast<VarDecl>(*I));
92 }
93 }
94 }
95
96 public:
OMPLoopScope(CodeGenFunction & CGF,const OMPLoopDirective & S)97 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
98 : CodeGenFunction::RunCleanupsScope(CGF) {
99 emitPreInitStmt(CGF, S);
100 }
101 };
102
103 } // namespace
104
getTypeSize(QualType Ty)105 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
106 auto &C = getContext();
107 llvm::Value *Size = nullptr;
108 auto SizeInChars = C.getTypeSizeInChars(Ty);
109 if (SizeInChars.isZero()) {
110 // getTypeSizeInChars() returns 0 for a VLA.
111 while (auto *VAT = C.getAsVariableArrayType(Ty)) {
112 llvm::Value *ArraySize;
113 std::tie(ArraySize, Ty) = getVLASize(VAT);
114 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
115 }
116 SizeInChars = C.getTypeSizeInChars(Ty);
117 if (SizeInChars.isZero())
118 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
119 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
120 } else
121 Size = CGM.getSize(SizeInChars);
122 return Size;
123 }
124
GenerateOpenMPCapturedVars(const CapturedStmt & S,SmallVectorImpl<llvm::Value * > & CapturedVars)125 void CodeGenFunction::GenerateOpenMPCapturedVars(
126 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
127 const RecordDecl *RD = S.getCapturedRecordDecl();
128 auto CurField = RD->field_begin();
129 auto CurCap = S.captures().begin();
130 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
131 E = S.capture_init_end();
132 I != E; ++I, ++CurField, ++CurCap) {
133 if (CurField->hasCapturedVLAType()) {
134 auto VAT = CurField->getCapturedVLAType();
135 auto *Val = VLASizeMap[VAT->getSizeExpr()];
136 CapturedVars.push_back(Val);
137 } else if (CurCap->capturesThis())
138 CapturedVars.push_back(CXXThisValue);
139 else if (CurCap->capturesVariableByCopy()) {
140 llvm::Value *CV =
141 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
142
143 // If the field is not a pointer, we need to save the actual value
144 // and load it as a void pointer.
145 if (!CurField->getType()->isAnyPointerType()) {
146 auto &Ctx = getContext();
147 auto DstAddr = CreateMemTemp(
148 Ctx.getUIntPtrType(),
149 Twine(CurCap->getCapturedVar()->getName()) + ".casted");
150 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
151
152 auto *SrcAddrVal = EmitScalarConversion(
153 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
154 Ctx.getPointerType(CurField->getType()), SourceLocation());
155 LValue SrcLV =
156 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
157
158 // Store the value using the source type pointer.
159 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
160
161 // Load the value using the destination type pointer.
162 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
163 }
164 CapturedVars.push_back(CV);
165 } else {
166 assert(CurCap->capturesVariable() && "Expected capture by reference.");
167 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
168 }
169 }
170 }
171
castValueFromUintptr(CodeGenFunction & CGF,QualType DstType,StringRef Name,LValue AddrLV,bool isReferenceType=false)172 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
173 StringRef Name, LValue AddrLV,
174 bool isReferenceType = false) {
175 ASTContext &Ctx = CGF.getContext();
176
177 auto *CastedPtr = CGF.EmitScalarConversion(
178 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
179 Ctx.getPointerType(DstType), SourceLocation());
180 auto TmpAddr =
181 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
182 .getAddress();
183
184 // If we are dealing with references we need to return the address of the
185 // reference instead of the reference of the value.
186 if (isReferenceType) {
187 QualType RefType = Ctx.getLValueReferenceType(DstType);
188 auto *RefVal = TmpAddr.getPointer();
189 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
190 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
191 CGF.EmitScalarInit(RefVal, TmpLVal);
192 }
193
194 return TmpAddr;
195 }
196
197 llvm::Function *
GenerateOpenMPCapturedStmtFunction(const CapturedStmt & S)198 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
199 assert(
200 CapturedStmtInfo &&
201 "CapturedStmtInfo should be set when generating the captured function");
202 const CapturedDecl *CD = S.getCapturedDecl();
203 const RecordDecl *RD = S.getCapturedRecordDecl();
204 assert(CD->hasBody() && "missing CapturedDecl body");
205
206 // Build the argument list.
207 ASTContext &Ctx = CGM.getContext();
208 FunctionArgList Args;
209 Args.append(CD->param_begin(),
210 std::next(CD->param_begin(), CD->getContextParamPosition()));
211 auto I = S.captures().begin();
212 for (auto *FD : RD->fields()) {
213 QualType ArgType = FD->getType();
214 IdentifierInfo *II = nullptr;
215 VarDecl *CapVar = nullptr;
216
217 // If this is a capture by copy and the type is not a pointer, the outlined
218 // function argument type should be uintptr and the value properly casted to
219 // uintptr. This is necessary given that the runtime library is only able to
220 // deal with pointers. We can pass in the same way the VLA type sizes to the
221 // outlined function.
222 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
223 I->capturesVariableArrayType())
224 ArgType = Ctx.getUIntPtrType();
225
226 if (I->capturesVariable() || I->capturesVariableByCopy()) {
227 CapVar = I->getCapturedVar();
228 II = CapVar->getIdentifier();
229 } else if (I->capturesThis())
230 II = &getContext().Idents.get("this");
231 else {
232 assert(I->capturesVariableArrayType());
233 II = &getContext().Idents.get("vla");
234 }
235 if (ArgType->isVariablyModifiedType())
236 ArgType = getContext().getVariableArrayDecayedType(ArgType);
237 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
238 FD->getLocation(), II, ArgType));
239 ++I;
240 }
241 Args.append(
242 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
243 CD->param_end());
244
245 // Create the function declaration.
246 FunctionType::ExtInfo ExtInfo;
247 const CGFunctionInfo &FuncInfo =
248 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
249 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
250
251 llvm::Function *F = llvm::Function::Create(
252 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
253 CapturedStmtInfo->getHelperName(), &CGM.getModule());
254 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
255 if (CD->isNothrow())
256 F->addFnAttr(llvm::Attribute::NoUnwind);
257
258 // Generate the function.
259 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
260 CD->getBody()->getLocStart());
261 unsigned Cnt = CD->getContextParamPosition();
262 I = S.captures().begin();
263 for (auto *FD : RD->fields()) {
264 // If we are capturing a pointer by copy we don't need to do anything, just
265 // use the value that we get from the arguments.
266 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
267 setAddrOfLocalVar(I->getCapturedVar(), GetAddrOfLocalVar(Args[Cnt]));
268 ++Cnt;
269 ++I;
270 continue;
271 }
272
273 LValue ArgLVal =
274 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
275 AlignmentSource::Decl);
276 if (FD->hasCapturedVLAType()) {
277 LValue CastedArgLVal =
278 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
279 Args[Cnt]->getName(), ArgLVal),
280 FD->getType(), AlignmentSource::Decl);
281 auto *ExprArg =
282 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
283 auto VAT = FD->getCapturedVLAType();
284 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
285 } else if (I->capturesVariable()) {
286 auto *Var = I->getCapturedVar();
287 QualType VarTy = Var->getType();
288 Address ArgAddr = ArgLVal.getAddress();
289 if (!VarTy->isReferenceType()) {
290 ArgAddr = EmitLoadOfReference(
291 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
292 }
293 setAddrOfLocalVar(
294 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
295 } else if (I->capturesVariableByCopy()) {
296 assert(!FD->getType()->isAnyPointerType() &&
297 "Not expecting a captured pointer.");
298 auto *Var = I->getCapturedVar();
299 QualType VarTy = Var->getType();
300 setAddrOfLocalVar(Var, castValueFromUintptr(*this, FD->getType(),
301 Args[Cnt]->getName(), ArgLVal,
302 VarTy->isReferenceType()));
303 } else {
304 // If 'this' is captured, load it into CXXThisValue.
305 assert(I->capturesThis());
306 CXXThisValue =
307 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
308 }
309 ++Cnt;
310 ++I;
311 }
312
313 PGO.assignRegionCounters(GlobalDecl(CD), F);
314 CapturedStmtInfo->EmitBody(*this, CD->getBody());
315 FinishFunction(CD->getBodyRBrace());
316
317 return F;
318 }
319
320 //===----------------------------------------------------------------------===//
321 // OpenMP Directive Emission
322 //===----------------------------------------------------------------------===//
EmitOMPAggregateAssign(Address DestAddr,Address SrcAddr,QualType OriginalType,const llvm::function_ref<void (Address,Address)> & CopyGen)323 void CodeGenFunction::EmitOMPAggregateAssign(
324 Address DestAddr, Address SrcAddr, QualType OriginalType,
325 const llvm::function_ref<void(Address, Address)> &CopyGen) {
326 // Perform element-by-element initialization.
327 QualType ElementTy;
328
329 // Drill down to the base element type on both arrays.
330 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
331 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
332 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
333
334 auto SrcBegin = SrcAddr.getPointer();
335 auto DestBegin = DestAddr.getPointer();
336 // Cast from pointer to array type to pointer to single element.
337 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
338 // The basic structure here is a while-do loop.
339 auto BodyBB = createBasicBlock("omp.arraycpy.body");
340 auto DoneBB = createBasicBlock("omp.arraycpy.done");
341 auto IsEmpty =
342 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
343 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
344
345 // Enter the loop body, making that address the current address.
346 auto EntryBB = Builder.GetInsertBlock();
347 EmitBlock(BodyBB);
348
349 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
350
351 llvm::PHINode *SrcElementPHI =
352 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
353 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
354 Address SrcElementCurrent =
355 Address(SrcElementPHI,
356 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
357
358 llvm::PHINode *DestElementPHI =
359 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
360 DestElementPHI->addIncoming(DestBegin, EntryBB);
361 Address DestElementCurrent =
362 Address(DestElementPHI,
363 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
364
365 // Emit copy.
366 CopyGen(DestElementCurrent, SrcElementCurrent);
367
368 // Shift the address forward by one element.
369 auto DestElementNext = Builder.CreateConstGEP1_32(
370 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
371 auto SrcElementNext = Builder.CreateConstGEP1_32(
372 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
373 // Check whether we've reached the end.
374 auto Done =
375 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
376 Builder.CreateCondBr(Done, DoneBB, BodyBB);
377 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
378 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
379
380 // Done.
381 EmitBlock(DoneBB, /*IsFinished=*/true);
382 }
383
384 /// Check if the combiner is a call to UDR combiner and if it is so return the
385 /// UDR decl used for reduction.
386 static const OMPDeclareReductionDecl *
getReductionInit(const Expr * ReductionOp)387 getReductionInit(const Expr *ReductionOp) {
388 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
389 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
390 if (auto *DRE =
391 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
392 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
393 return DRD;
394 return nullptr;
395 }
396
emitInitWithReductionInitializer(CodeGenFunction & CGF,const OMPDeclareReductionDecl * DRD,const Expr * InitOp,Address Private,Address Original,QualType Ty)397 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
398 const OMPDeclareReductionDecl *DRD,
399 const Expr *InitOp,
400 Address Private, Address Original,
401 QualType Ty) {
402 if (DRD->getInitializer()) {
403 std::pair<llvm::Function *, llvm::Function *> Reduction =
404 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
405 auto *CE = cast<CallExpr>(InitOp);
406 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
407 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
408 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
409 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
410 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
411 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
412 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
413 [=]() -> Address { return Private; });
414 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
415 [=]() -> Address { return Original; });
416 (void)PrivateScope.Privatize();
417 RValue Func = RValue::get(Reduction.second);
418 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
419 CGF.EmitIgnoredExpr(InitOp);
420 } else {
421 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
422 auto *GV = new llvm::GlobalVariable(
423 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
424 llvm::GlobalValue::PrivateLinkage, Init, ".init");
425 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
426 RValue InitRVal;
427 switch (CGF.getEvaluationKind(Ty)) {
428 case TEK_Scalar:
429 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
430 break;
431 case TEK_Complex:
432 InitRVal =
433 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
434 break;
435 case TEK_Aggregate:
436 InitRVal = RValue::getAggregate(LV.getAddress());
437 break;
438 }
439 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
440 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
441 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
442 /*IsInitializer=*/false);
443 }
444 }
445
446 /// \brief Emit initialization of arrays of complex types.
447 /// \param DestAddr Address of the array.
448 /// \param Type Type of array.
449 /// \param Init Initial expression of array.
450 /// \param SrcAddr Address of the original array.
EmitOMPAggregateInit(CodeGenFunction & CGF,Address DestAddr,QualType Type,const Expr * Init,Address SrcAddr=Address::invalid ())451 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
452 QualType Type, const Expr *Init,
453 Address SrcAddr = Address::invalid()) {
454 auto *DRD = getReductionInit(Init);
455 // Perform element-by-element initialization.
456 QualType ElementTy;
457
458 // Drill down to the base element type on both arrays.
459 auto ArrayTy = Type->getAsArrayTypeUnsafe();
460 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
461 DestAddr =
462 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
463 if (DRD)
464 SrcAddr =
465 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
466
467 llvm::Value *SrcBegin = nullptr;
468 if (DRD)
469 SrcBegin = SrcAddr.getPointer();
470 auto DestBegin = DestAddr.getPointer();
471 // Cast from pointer to array type to pointer to single element.
472 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
473 // The basic structure here is a while-do loop.
474 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
475 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
476 auto IsEmpty =
477 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
478 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
479
480 // Enter the loop body, making that address the current address.
481 auto EntryBB = CGF.Builder.GetInsertBlock();
482 CGF.EmitBlock(BodyBB);
483
484 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
485
486 llvm::PHINode *SrcElementPHI = nullptr;
487 Address SrcElementCurrent = Address::invalid();
488 if (DRD) {
489 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
490 "omp.arraycpy.srcElementPast");
491 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
492 SrcElementCurrent =
493 Address(SrcElementPHI,
494 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
495 }
496 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
497 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
498 DestElementPHI->addIncoming(DestBegin, EntryBB);
499 Address DestElementCurrent =
500 Address(DestElementPHI,
501 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
502
503 // Emit copy.
504 {
505 CodeGenFunction::RunCleanupsScope InitScope(CGF);
506 if (DRD && (DRD->getInitializer() || !Init)) {
507 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
508 SrcElementCurrent, ElementTy);
509 } else
510 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
511 /*IsInitializer=*/false);
512 }
513
514 if (DRD) {
515 // Shift the address forward by one element.
516 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
517 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
518 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
519 }
520
521 // Shift the address forward by one element.
522 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
523 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
524 // Check whether we've reached the end.
525 auto Done =
526 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
527 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
528 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
529
530 // Done.
531 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
532 }
533
EmitOMPCopy(QualType OriginalType,Address DestAddr,Address SrcAddr,const VarDecl * DestVD,const VarDecl * SrcVD,const Expr * Copy)534 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
535 Address SrcAddr, const VarDecl *DestVD,
536 const VarDecl *SrcVD, const Expr *Copy) {
537 if (OriginalType->isArrayType()) {
538 auto *BO = dyn_cast<BinaryOperator>(Copy);
539 if (BO && BO->getOpcode() == BO_Assign) {
540 // Perform simple memcpy for simple copying.
541 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
542 } else {
543 // For arrays with complex element types perform element by element
544 // copying.
545 EmitOMPAggregateAssign(
546 DestAddr, SrcAddr, OriginalType,
547 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
548 // Working with the single array element, so have to remap
549 // destination and source variables to corresponding array
550 // elements.
551 CodeGenFunction::OMPPrivateScope Remap(*this);
552 Remap.addPrivate(DestVD, [DestElement]() -> Address {
553 return DestElement;
554 });
555 Remap.addPrivate(
556 SrcVD, [SrcElement]() -> Address { return SrcElement; });
557 (void)Remap.Privatize();
558 EmitIgnoredExpr(Copy);
559 });
560 }
561 } else {
562 // Remap pseudo source variable to private copy.
563 CodeGenFunction::OMPPrivateScope Remap(*this);
564 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
565 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
566 (void)Remap.Privatize();
567 // Emit copying of the whole variable.
568 EmitIgnoredExpr(Copy);
569 }
570 }
571
EmitOMPFirstprivateClause(const OMPExecutableDirective & D,OMPPrivateScope & PrivateScope)572 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
573 OMPPrivateScope &PrivateScope) {
574 if (!HaveInsertPoint())
575 return false;
576 bool FirstprivateIsLastprivate = false;
577 llvm::DenseSet<const VarDecl *> Lastprivates;
578 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
579 for (const auto *D : C->varlists())
580 Lastprivates.insert(
581 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
582 }
583 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
584 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
585 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
586 auto IRef = C->varlist_begin();
587 auto InitsRef = C->inits().begin();
588 for (auto IInit : C->private_copies()) {
589 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
590 bool ThisFirstprivateIsLastprivate =
591 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
592 auto *CapFD = CapturesInfo.lookup(OrigVD);
593 auto *FD = CapturedStmtInfo->lookup(OrigVD);
594 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
595 !FD->getType()->isReferenceType()) {
596 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
597 ++IRef;
598 ++InitsRef;
599 continue;
600 }
601 FirstprivateIsLastprivate =
602 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
603 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
604 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
605 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
606 bool IsRegistered;
607 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
608 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
609 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
610 Address OriginalAddr = EmitLValue(&DRE).getAddress();
611 QualType Type = VD->getType();
612 if (Type->isArrayType()) {
613 // Emit VarDecl with copy init for arrays.
614 // Get the address of the original variable captured in current
615 // captured region.
616 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
617 auto Emission = EmitAutoVarAlloca(*VD);
618 auto *Init = VD->getInit();
619 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
620 // Perform simple memcpy.
621 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
622 Type);
623 } else {
624 EmitOMPAggregateAssign(
625 Emission.getAllocatedAddress(), OriginalAddr, Type,
626 [this, VDInit, Init](Address DestElement,
627 Address SrcElement) {
628 // Clean up any temporaries needed by the initialization.
629 RunCleanupsScope InitScope(*this);
630 // Emit initialization for single element.
631 setAddrOfLocalVar(VDInit, SrcElement);
632 EmitAnyExprToMem(Init, DestElement,
633 Init->getType().getQualifiers(),
634 /*IsInitializer*/ false);
635 LocalDeclMap.erase(VDInit);
636 });
637 }
638 EmitAutoVarCleanups(Emission);
639 return Emission.getAllocatedAddress();
640 });
641 } else {
642 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
643 // Emit private VarDecl with copy init.
644 // Remap temp VDInit variable to the address of the original
645 // variable
646 // (for proper handling of captured global variables).
647 setAddrOfLocalVar(VDInit, OriginalAddr);
648 EmitDecl(*VD);
649 LocalDeclMap.erase(VDInit);
650 return GetAddrOfLocalVar(VD);
651 });
652 }
653 assert(IsRegistered &&
654 "firstprivate var already registered as private");
655 // Silence the warning about unused variable.
656 (void)IsRegistered;
657 }
658 ++IRef;
659 ++InitsRef;
660 }
661 }
662 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
663 }
664
EmitOMPPrivateClause(const OMPExecutableDirective & D,CodeGenFunction::OMPPrivateScope & PrivateScope)665 void CodeGenFunction::EmitOMPPrivateClause(
666 const OMPExecutableDirective &D,
667 CodeGenFunction::OMPPrivateScope &PrivateScope) {
668 if (!HaveInsertPoint())
669 return;
670 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
671 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
672 auto IRef = C->varlist_begin();
673 for (auto IInit : C->private_copies()) {
674 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
675 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
676 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
677 bool IsRegistered =
678 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
679 // Emit private VarDecl with copy init.
680 EmitDecl(*VD);
681 return GetAddrOfLocalVar(VD);
682 });
683 assert(IsRegistered && "private var already registered as private");
684 // Silence the warning about unused variable.
685 (void)IsRegistered;
686 }
687 ++IRef;
688 }
689 }
690 }
691
EmitOMPCopyinClause(const OMPExecutableDirective & D)692 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
693 if (!HaveInsertPoint())
694 return false;
695 // threadprivate_var1 = master_threadprivate_var1;
696 // operator=(threadprivate_var2, master_threadprivate_var2);
697 // ...
698 // __kmpc_barrier(&loc, global_tid);
699 llvm::DenseSet<const VarDecl *> CopiedVars;
700 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
701 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
702 auto IRef = C->varlist_begin();
703 auto ISrcRef = C->source_exprs().begin();
704 auto IDestRef = C->destination_exprs().begin();
705 for (auto *AssignOp : C->assignment_ops()) {
706 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
707 QualType Type = VD->getType();
708 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
709 // Get the address of the master variable. If we are emitting code with
710 // TLS support, the address is passed from the master as field in the
711 // captured declaration.
712 Address MasterAddr = Address::invalid();
713 if (getLangOpts().OpenMPUseTLS &&
714 getContext().getTargetInfo().isTLSSupported()) {
715 assert(CapturedStmtInfo->lookup(VD) &&
716 "Copyin threadprivates should have been captured!");
717 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
718 VK_LValue, (*IRef)->getExprLoc());
719 MasterAddr = EmitLValue(&DRE).getAddress();
720 LocalDeclMap.erase(VD);
721 } else {
722 MasterAddr =
723 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
724 : CGM.GetAddrOfGlobal(VD),
725 getContext().getDeclAlign(VD));
726 }
727 // Get the address of the threadprivate variable.
728 Address PrivateAddr = EmitLValue(*IRef).getAddress();
729 if (CopiedVars.size() == 1) {
730 // At first check if current thread is a master thread. If it is, no
731 // need to copy data.
732 CopyBegin = createBasicBlock("copyin.not.master");
733 CopyEnd = createBasicBlock("copyin.not.master.end");
734 Builder.CreateCondBr(
735 Builder.CreateICmpNE(
736 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
737 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
738 CopyBegin, CopyEnd);
739 EmitBlock(CopyBegin);
740 }
741 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
742 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
743 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
744 }
745 ++IRef;
746 ++ISrcRef;
747 ++IDestRef;
748 }
749 }
750 if (CopyEnd) {
751 // Exit out of copying procedure for non-master thread.
752 EmitBlock(CopyEnd, /*IsFinished=*/true);
753 return true;
754 }
755 return false;
756 }
757
EmitOMPLastprivateClauseInit(const OMPExecutableDirective & D,OMPPrivateScope & PrivateScope)758 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
759 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
760 if (!HaveInsertPoint())
761 return false;
762 bool HasAtLeastOneLastprivate = false;
763 llvm::DenseSet<const VarDecl *> SIMDLCVs;
764 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
765 auto *LoopDirective = cast<OMPLoopDirective>(&D);
766 for (auto *C : LoopDirective->counters()) {
767 SIMDLCVs.insert(
768 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
769 }
770 }
771 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
772 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
773 HasAtLeastOneLastprivate = true;
774 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
775 break;
776 auto IRef = C->varlist_begin();
777 auto IDestRef = C->destination_exprs().begin();
778 for (auto *IInit : C->private_copies()) {
779 // Keep the address of the original variable for future update at the end
780 // of the loop.
781 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
782 // Taskloops do not require additional initialization, it is done in
783 // runtime support library.
784 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
785 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
786 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
787 DeclRefExpr DRE(
788 const_cast<VarDecl *>(OrigVD),
789 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
790 OrigVD) != nullptr,
791 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
792 return EmitLValue(&DRE).getAddress();
793 });
794 // Check if the variable is also a firstprivate: in this case IInit is
795 // not generated. Initialization of this variable will happen in codegen
796 // for 'firstprivate' clause.
797 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
798 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
799 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
800 // Emit private VarDecl with copy init.
801 EmitDecl(*VD);
802 return GetAddrOfLocalVar(VD);
803 });
804 assert(IsRegistered &&
805 "lastprivate var already registered as private");
806 (void)IsRegistered;
807 }
808 }
809 ++IRef;
810 ++IDestRef;
811 }
812 }
813 return HasAtLeastOneLastprivate;
814 }
815
EmitOMPLastprivateClauseFinal(const OMPExecutableDirective & D,bool NoFinals,llvm::Value * IsLastIterCond)816 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
817 const OMPExecutableDirective &D, bool NoFinals,
818 llvm::Value *IsLastIterCond) {
819 if (!HaveInsertPoint())
820 return;
821 // Emit following code:
822 // if (<IsLastIterCond>) {
823 // orig_var1 = private_orig_var1;
824 // ...
825 // orig_varn = private_orig_varn;
826 // }
827 llvm::BasicBlock *ThenBB = nullptr;
828 llvm::BasicBlock *DoneBB = nullptr;
829 if (IsLastIterCond) {
830 ThenBB = createBasicBlock(".omp.lastprivate.then");
831 DoneBB = createBasicBlock(".omp.lastprivate.done");
832 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
833 EmitBlock(ThenBB);
834 }
835 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
836 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
837 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
838 auto IC = LoopDirective->counters().begin();
839 for (auto F : LoopDirective->finals()) {
840 auto *D =
841 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
842 if (NoFinals)
843 AlreadyEmittedVars.insert(D);
844 else
845 LoopCountersAndUpdates[D] = F;
846 ++IC;
847 }
848 }
849 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
850 auto IRef = C->varlist_begin();
851 auto ISrcRef = C->source_exprs().begin();
852 auto IDestRef = C->destination_exprs().begin();
853 for (auto *AssignOp : C->assignment_ops()) {
854 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
855 QualType Type = PrivateVD->getType();
856 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
857 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
858 // If lastprivate variable is a loop control variable for loop-based
859 // directive, update its value before copyin back to original
860 // variable.
861 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
862 EmitIgnoredExpr(FinalExpr);
863 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
864 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
865 // Get the address of the original variable.
866 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
867 // Get the address of the private variable.
868 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
869 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
870 PrivateAddr =
871 Address(Builder.CreateLoad(PrivateAddr),
872 getNaturalTypeAlignment(RefTy->getPointeeType()));
873 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
874 }
875 ++IRef;
876 ++ISrcRef;
877 ++IDestRef;
878 }
879 if (auto *PostUpdate = C->getPostUpdateExpr())
880 EmitIgnoredExpr(PostUpdate);
881 }
882 if (IsLastIterCond)
883 EmitBlock(DoneBB, /*IsFinished=*/true);
884 }
885
castToBase(CodeGenFunction & CGF,QualType BaseTy,QualType ElTy,LValue BaseLV,llvm::Value * Addr)886 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
887 LValue BaseLV, llvm::Value *Addr) {
888 Address Tmp = Address::invalid();
889 Address TopTmp = Address::invalid();
890 Address MostTopTmp = Address::invalid();
891 BaseTy = BaseTy.getNonReferenceType();
892 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
893 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
894 Tmp = CGF.CreateMemTemp(BaseTy);
895 if (TopTmp.isValid())
896 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
897 else
898 MostTopTmp = Tmp;
899 TopTmp = Tmp;
900 BaseTy = BaseTy->getPointeeType();
901 }
902 llvm::Type *Ty = BaseLV.getPointer()->getType();
903 if (Tmp.isValid())
904 Ty = Tmp.getElementType();
905 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
906 if (Tmp.isValid()) {
907 CGF.Builder.CreateStore(Addr, Tmp);
908 return MostTopTmp;
909 }
910 return Address(Addr, BaseLV.getAlignment());
911 }
912
loadToBegin(CodeGenFunction & CGF,QualType BaseTy,QualType ElTy,LValue BaseLV)913 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
914 LValue BaseLV) {
915 BaseTy = BaseTy.getNonReferenceType();
916 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
917 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
918 if (auto *PtrTy = BaseTy->getAs<PointerType>())
919 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
920 else {
921 BaseLV = CGF.EmitLoadOfReferenceLValue(BaseLV.getAddress(),
922 BaseTy->castAs<ReferenceType>());
923 }
924 BaseTy = BaseTy->getPointeeType();
925 }
926 return CGF.MakeAddrLValue(
927 Address(
928 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
929 BaseLV.getPointer(), CGF.ConvertTypeForMem(ElTy)->getPointerTo()),
930 BaseLV.getAlignment()),
931 BaseLV.getType(), BaseLV.getAlignmentSource());
932 }
933
EmitOMPReductionClauseInit(const OMPExecutableDirective & D,CodeGenFunction::OMPPrivateScope & PrivateScope)934 void CodeGenFunction::EmitOMPReductionClauseInit(
935 const OMPExecutableDirective &D,
936 CodeGenFunction::OMPPrivateScope &PrivateScope) {
937 if (!HaveInsertPoint())
938 return;
939 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
940 auto ILHS = C->lhs_exprs().begin();
941 auto IRHS = C->rhs_exprs().begin();
942 auto IPriv = C->privates().begin();
943 auto IRed = C->reduction_ops().begin();
944 for (auto IRef : C->varlists()) {
945 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
946 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
947 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
948 auto *DRD = getReductionInit(*IRed);
949 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
950 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
951 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
952 Base = TempOASE->getBase()->IgnoreParenImpCasts();
953 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
954 Base = TempASE->getBase()->IgnoreParenImpCasts();
955 auto *DE = cast<DeclRefExpr>(Base);
956 auto *OrigVD = cast<VarDecl>(DE->getDecl());
957 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
958 auto OASELValueUB =
959 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
960 auto OriginalBaseLValue = EmitLValue(DE);
961 LValue BaseLValue =
962 loadToBegin(*this, OrigVD->getType(), OASELValueLB.getType(),
963 OriginalBaseLValue);
964 // Store the address of the original variable associated with the LHS
965 // implicit variable.
966 PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
967 return OASELValueLB.getAddress();
968 });
969 // Emit reduction copy.
970 bool IsRegistered = PrivateScope.addPrivate(
971 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, OASELValueLB,
972 OASELValueUB, OriginalBaseLValue, DRD, IRed]() -> Address {
973 // Emit VarDecl with copy init for arrays.
974 // Get the address of the original variable captured in current
975 // captured region.
976 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
977 OASELValueLB.getPointer());
978 Size = Builder.CreateNUWAdd(
979 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
980 CodeGenFunction::OpaqueValueMapping OpaqueMap(
981 *this, cast<OpaqueValueExpr>(
982 getContext()
983 .getAsVariableArrayType(PrivateVD->getType())
984 ->getSizeExpr()),
985 RValue::get(Size));
986 EmitVariablyModifiedType(PrivateVD->getType());
987 auto Emission = EmitAutoVarAlloca(*PrivateVD);
988 auto Addr = Emission.getAllocatedAddress();
989 auto *Init = PrivateVD->getInit();
990 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
991 DRD ? *IRed : Init,
992 OASELValueLB.getAddress());
993 EmitAutoVarCleanups(Emission);
994 // Emit private VarDecl with reduction init.
995 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
996 OASELValueLB.getPointer());
997 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
998 return castToBase(*this, OrigVD->getType(),
999 OASELValueLB.getType(), OriginalBaseLValue,
1000 Ptr);
1001 });
1002 assert(IsRegistered && "private var already registered as private");
1003 // Silence the warning about unused variable.
1004 (void)IsRegistered;
1005 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1006 return GetAddrOfLocalVar(PrivateVD);
1007 });
1008 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
1009 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1010 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1011 Base = TempASE->getBase()->IgnoreParenImpCasts();
1012 auto *DE = cast<DeclRefExpr>(Base);
1013 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1014 auto ASELValue = EmitLValue(ASE);
1015 auto OriginalBaseLValue = EmitLValue(DE);
1016 LValue BaseLValue = loadToBegin(
1017 *this, OrigVD->getType(), ASELValue.getType(), OriginalBaseLValue);
1018 // Store the address of the original variable associated with the LHS
1019 // implicit variable.
1020 PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
1021 return ASELValue.getAddress();
1022 });
1023 // Emit reduction copy.
1024 bool IsRegistered = PrivateScope.addPrivate(
1025 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, ASELValue,
1026 OriginalBaseLValue, DRD, IRed]() -> Address {
1027 // Emit private VarDecl with reduction init.
1028 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1029 auto Addr = Emission.getAllocatedAddress();
1030 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1031 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1032 ASELValue.getAddress(),
1033 ASELValue.getType());
1034 } else
1035 EmitAutoVarInit(Emission);
1036 EmitAutoVarCleanups(Emission);
1037 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1038 ASELValue.getPointer());
1039 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1040 return castToBase(*this, OrigVD->getType(), ASELValue.getType(),
1041 OriginalBaseLValue, Ptr);
1042 });
1043 assert(IsRegistered && "private var already registered as private");
1044 // Silence the warning about unused variable.
1045 (void)IsRegistered;
1046 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1047 return Builder.CreateElementBitCast(
1048 GetAddrOfLocalVar(PrivateVD), ConvertTypeForMem(RHSVD->getType()),
1049 "rhs.begin");
1050 });
1051 } else {
1052 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
1053 QualType Type = PrivateVD->getType();
1054 if (getContext().getAsArrayType(Type)) {
1055 // Store the address of the original variable associated with the LHS
1056 // implicit variable.
1057 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1058 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1059 IRef->getType(), VK_LValue, IRef->getExprLoc());
1060 Address OriginalAddr = EmitLValue(&DRE).getAddress();
1061 PrivateScope.addPrivate(LHSVD, [this, &OriginalAddr,
1062 LHSVD]() -> Address {
1063 OriginalAddr = Builder.CreateElementBitCast(
1064 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1065 return OriginalAddr;
1066 });
1067 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
1068 if (Type->isVariablyModifiedType()) {
1069 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1070 *this, cast<OpaqueValueExpr>(
1071 getContext()
1072 .getAsVariableArrayType(PrivateVD->getType())
1073 ->getSizeExpr()),
1074 RValue::get(
1075 getTypeSize(OrigVD->getType().getNonReferenceType())));
1076 EmitVariablyModifiedType(Type);
1077 }
1078 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1079 auto Addr = Emission.getAllocatedAddress();
1080 auto *Init = PrivateVD->getInit();
1081 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1082 DRD ? *IRed : Init, OriginalAddr);
1083 EmitAutoVarCleanups(Emission);
1084 return Emission.getAllocatedAddress();
1085 });
1086 assert(IsRegistered && "private var already registered as private");
1087 // Silence the warning about unused variable.
1088 (void)IsRegistered;
1089 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1090 return Builder.CreateElementBitCast(
1091 GetAddrOfLocalVar(PrivateVD),
1092 ConvertTypeForMem(RHSVD->getType()), "rhs.begin");
1093 });
1094 } else {
1095 // Store the address of the original variable associated with the LHS
1096 // implicit variable.
1097 Address OriginalAddr = Address::invalid();
1098 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef,
1099 &OriginalAddr]() -> Address {
1100 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1101 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1102 IRef->getType(), VK_LValue, IRef->getExprLoc());
1103 OriginalAddr = EmitLValue(&DRE).getAddress();
1104 return OriginalAddr;
1105 });
1106 // Emit reduction copy.
1107 bool IsRegistered = PrivateScope.addPrivate(
1108 OrigVD, [this, PrivateVD, OriginalAddr, DRD, IRed]() -> Address {
1109 // Emit private VarDecl with reduction init.
1110 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1111 auto Addr = Emission.getAllocatedAddress();
1112 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1113 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1114 OriginalAddr,
1115 PrivateVD->getType());
1116 } else
1117 EmitAutoVarInit(Emission);
1118 EmitAutoVarCleanups(Emission);
1119 return Addr;
1120 });
1121 assert(IsRegistered && "private var already registered as private");
1122 // Silence the warning about unused variable.
1123 (void)IsRegistered;
1124 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1125 return GetAddrOfLocalVar(PrivateVD);
1126 });
1127 }
1128 }
1129 ++ILHS;
1130 ++IRHS;
1131 ++IPriv;
1132 ++IRed;
1133 }
1134 }
1135 }
1136
EmitOMPReductionClauseFinal(const OMPExecutableDirective & D)1137 void CodeGenFunction::EmitOMPReductionClauseFinal(
1138 const OMPExecutableDirective &D) {
1139 if (!HaveInsertPoint())
1140 return;
1141 llvm::SmallVector<const Expr *, 8> Privates;
1142 llvm::SmallVector<const Expr *, 8> LHSExprs;
1143 llvm::SmallVector<const Expr *, 8> RHSExprs;
1144 llvm::SmallVector<const Expr *, 8> ReductionOps;
1145 bool HasAtLeastOneReduction = false;
1146 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1147 HasAtLeastOneReduction = true;
1148 Privates.append(C->privates().begin(), C->privates().end());
1149 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1150 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1151 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1152 }
1153 if (HasAtLeastOneReduction) {
1154 // Emit nowait reduction if nowait clause is present or directive is a
1155 // parallel directive (it always has implicit barrier).
1156 CGM.getOpenMPRuntime().emitReduction(
1157 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
1158 D.getSingleClause<OMPNowaitClause>() ||
1159 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1160 D.getDirectiveKind() == OMPD_simd,
1161 D.getDirectiveKind() == OMPD_simd);
1162 }
1163 }
1164
emitPostUpdateForReductionClause(CodeGenFunction & CGF,const OMPExecutableDirective & D,const llvm::function_ref<llvm::Value * (CodeGenFunction &)> & CondGen)1165 static void emitPostUpdateForReductionClause(
1166 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1167 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1168 if (!CGF.HaveInsertPoint())
1169 return;
1170 llvm::BasicBlock *DoneBB = nullptr;
1171 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1172 if (auto *PostUpdate = C->getPostUpdateExpr()) {
1173 if (!DoneBB) {
1174 if (auto *Cond = CondGen(CGF)) {
1175 // If the first post-update expression is found, emit conditional
1176 // block if it was requested.
1177 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1178 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1179 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1180 CGF.EmitBlock(ThenBB);
1181 }
1182 }
1183 CGF.EmitIgnoredExpr(PostUpdate);
1184 }
1185 }
1186 if (DoneBB)
1187 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1188 }
1189
emitCommonOMPParallelDirective(CodeGenFunction & CGF,const OMPExecutableDirective & S,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)1190 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
1191 const OMPExecutableDirective &S,
1192 OpenMPDirectiveKind InnermostKind,
1193 const RegionCodeGenTy &CodeGen) {
1194 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1195 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
1196 emitParallelOrTeamsOutlinedFunction(S,
1197 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1198 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1199 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1200 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1201 /*IgnoreResultAssign*/ true);
1202 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1203 CGF, NumThreads, NumThreadsClause->getLocStart());
1204 }
1205 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1206 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1207 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1208 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
1209 }
1210 const Expr *IfCond = nullptr;
1211 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1212 if (C->getNameModifier() == OMPD_unknown ||
1213 C->getNameModifier() == OMPD_parallel) {
1214 IfCond = C->getCondition();
1215 break;
1216 }
1217 }
1218
1219 OMPLexicalScope Scope(CGF, S);
1220 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1221 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1222 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
1223 CapturedVars, IfCond);
1224 }
1225
EmitOMPParallelDirective(const OMPParallelDirective & S)1226 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1227 // Emit parallel region as a standalone region.
1228 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1229 OMPPrivateScope PrivateScope(CGF);
1230 bool Copyins = CGF.EmitOMPCopyinClause(S);
1231 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1232 if (Copyins) {
1233 // Emit implicit barrier to synchronize threads and avoid data races on
1234 // propagation master's thread values of threadprivate variables to local
1235 // instances of that variables of all other implicit threads.
1236 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1237 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1238 /*ForceSimpleCall=*/true);
1239 }
1240 CGF.EmitOMPPrivateClause(S, PrivateScope);
1241 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1242 (void)PrivateScope.Privatize();
1243 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1244 CGF.EmitOMPReductionClauseFinal(S);
1245 };
1246 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
1247 emitPostUpdateForReductionClause(
1248 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1249 }
1250
EmitOMPLoopBody(const OMPLoopDirective & D,JumpDest LoopExit)1251 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1252 JumpDest LoopExit) {
1253 RunCleanupsScope BodyScope(*this);
1254 // Update counters values on current iteration.
1255 for (auto I : D.updates()) {
1256 EmitIgnoredExpr(I);
1257 }
1258 // Update the linear variables.
1259 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1260 for (auto *U : C->updates())
1261 EmitIgnoredExpr(U);
1262 }
1263
1264 // On a continue in the body, jump to the end.
1265 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
1266 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1267 // Emit loop body.
1268 EmitStmt(D.getBody());
1269 // The end (updates/cleanups).
1270 EmitBlock(Continue.getBlock());
1271 BreakContinueStack.pop_back();
1272 }
1273
EmitOMPInnerLoop(const Stmt & S,bool RequiresCleanup,const Expr * LoopCond,const Expr * IncExpr,const llvm::function_ref<void (CodeGenFunction &)> & BodyGen,const llvm::function_ref<void (CodeGenFunction &)> & PostIncGen)1274 void CodeGenFunction::EmitOMPInnerLoop(
1275 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1276 const Expr *IncExpr,
1277 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
1278 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
1279 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1280
1281 // Start the loop with a block that tests the condition.
1282 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1283 EmitBlock(CondBlock);
1284 LoopStack.push(CondBlock, Builder.getCurrentDebugLocation());
1285
1286 // If there are any cleanups between here and the loop-exit scope,
1287 // create a block to stage a loop exit along.
1288 auto ExitBlock = LoopExit.getBlock();
1289 if (RequiresCleanup)
1290 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1291
1292 auto LoopBody = createBasicBlock("omp.inner.for.body");
1293
1294 // Emit condition.
1295 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1296 if (ExitBlock != LoopExit.getBlock()) {
1297 EmitBlock(ExitBlock);
1298 EmitBranchThroughCleanup(LoopExit);
1299 }
1300
1301 EmitBlock(LoopBody);
1302 incrementProfileCounter(&S);
1303
1304 // Create a block for the increment.
1305 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1306 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1307
1308 BodyGen(*this);
1309
1310 // Emit "IV = IV + 1" and a back-edge to the condition block.
1311 EmitBlock(Continue.getBlock());
1312 EmitIgnoredExpr(IncExpr);
1313 PostIncGen(*this);
1314 BreakContinueStack.pop_back();
1315 EmitBranch(CondBlock);
1316 LoopStack.pop();
1317 // Emit the fall-through block.
1318 EmitBlock(LoopExit.getBlock());
1319 }
1320
EmitOMPLinearClauseInit(const OMPLoopDirective & D)1321 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1322 if (!HaveInsertPoint())
1323 return;
1324 // Emit inits for the linear variables.
1325 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1326 for (auto *Init : C->inits()) {
1327 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1328 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1329 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1330 auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1331 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1332 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1333 VD->getInit()->getType(), VK_LValue,
1334 VD->getInit()->getExprLoc());
1335 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1336 VD->getType()),
1337 /*capturedByInit=*/false);
1338 EmitAutoVarCleanups(Emission);
1339 } else
1340 EmitVarDecl(*VD);
1341 }
1342 // Emit the linear steps for the linear clauses.
1343 // If a step is not constant, it is pre-calculated before the loop.
1344 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1345 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1346 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1347 // Emit calculation of the linear step.
1348 EmitIgnoredExpr(CS);
1349 }
1350 }
1351 }
1352
EmitOMPLinearClauseFinal(const OMPLoopDirective & D,const llvm::function_ref<llvm::Value * (CodeGenFunction &)> & CondGen)1353 void CodeGenFunction::EmitOMPLinearClauseFinal(
1354 const OMPLoopDirective &D,
1355 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1356 if (!HaveInsertPoint())
1357 return;
1358 llvm::BasicBlock *DoneBB = nullptr;
1359 // Emit the final values of the linear variables.
1360 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1361 auto IC = C->varlist_begin();
1362 for (auto *F : C->finals()) {
1363 if (!DoneBB) {
1364 if (auto *Cond = CondGen(*this)) {
1365 // If the first post-update expression is found, emit conditional
1366 // block if it was requested.
1367 auto *ThenBB = createBasicBlock(".omp.linear.pu");
1368 DoneBB = createBasicBlock(".omp.linear.pu.done");
1369 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1370 EmitBlock(ThenBB);
1371 }
1372 }
1373 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1374 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1375 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1376 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1377 Address OrigAddr = EmitLValue(&DRE).getAddress();
1378 CodeGenFunction::OMPPrivateScope VarScope(*this);
1379 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
1380 (void)VarScope.Privatize();
1381 EmitIgnoredExpr(F);
1382 ++IC;
1383 }
1384 if (auto *PostUpdate = C->getPostUpdateExpr())
1385 EmitIgnoredExpr(PostUpdate);
1386 }
1387 if (DoneBB)
1388 EmitBlock(DoneBB, /*IsFinished=*/true);
1389 }
1390
emitAlignedClause(CodeGenFunction & CGF,const OMPExecutableDirective & D)1391 static void emitAlignedClause(CodeGenFunction &CGF,
1392 const OMPExecutableDirective &D) {
1393 if (!CGF.HaveInsertPoint())
1394 return;
1395 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1396 unsigned ClauseAlignment = 0;
1397 if (auto AlignmentExpr = Clause->getAlignment()) {
1398 auto AlignmentCI =
1399 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1400 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1401 }
1402 for (auto E : Clause->varlists()) {
1403 unsigned Alignment = ClauseAlignment;
1404 if (Alignment == 0) {
1405 // OpenMP [2.8.1, Description]
1406 // If no optional parameter is specified, implementation-defined default
1407 // alignments for SIMD instructions on the target platforms are assumed.
1408 Alignment =
1409 CGF.getContext()
1410 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1411 E->getType()->getPointeeType()))
1412 .getQuantity();
1413 }
1414 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1415 "alignment is not power of 2");
1416 if (Alignment != 0) {
1417 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1418 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1419 }
1420 }
1421 }
1422 }
1423
EmitOMPPrivateLoopCounters(const OMPLoopDirective & S,CodeGenFunction::OMPPrivateScope & LoopScope)1424 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1425 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1426 if (!HaveInsertPoint())
1427 return;
1428 auto I = S.private_counters().begin();
1429 for (auto *E : S.counters()) {
1430 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1431 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1432 (void)LoopScope.addPrivate(VD, [&]() -> Address {
1433 // Emit var without initialization.
1434 if (!LocalDeclMap.count(PrivateVD)) {
1435 auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
1436 EmitAutoVarCleanups(VarEmission);
1437 }
1438 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1439 /*RefersToEnclosingVariableOrCapture=*/false,
1440 (*I)->getType(), VK_LValue, (*I)->getExprLoc());
1441 return EmitLValue(&DRE).getAddress();
1442 });
1443 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1444 VD->hasGlobalStorage()) {
1445 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1446 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
1447 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1448 E->getType(), VK_LValue, E->getExprLoc());
1449 return EmitLValue(&DRE).getAddress();
1450 });
1451 }
1452 ++I;
1453 }
1454 }
1455
emitPreCond(CodeGenFunction & CGF,const OMPLoopDirective & S,const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount)1456 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1457 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1458 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1459 if (!CGF.HaveInsertPoint())
1460 return;
1461 {
1462 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1463 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1464 (void)PreCondScope.Privatize();
1465 // Get initial values of real counters.
1466 for (auto I : S.inits()) {
1467 CGF.EmitIgnoredExpr(I);
1468 }
1469 }
1470 // Check that loop is executed at least one time.
1471 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1472 }
1473
EmitOMPLinearClause(const OMPLoopDirective & D,CodeGenFunction::OMPPrivateScope & PrivateScope)1474 void CodeGenFunction::EmitOMPLinearClause(
1475 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1476 if (!HaveInsertPoint())
1477 return;
1478 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1479 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1480 auto *LoopDirective = cast<OMPLoopDirective>(&D);
1481 for (auto *C : LoopDirective->counters()) {
1482 SIMDLCVs.insert(
1483 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1484 }
1485 }
1486 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1487 auto CurPrivate = C->privates().begin();
1488 for (auto *E : C->varlists()) {
1489 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1490 auto *PrivateVD =
1491 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1492 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1493 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1494 // Emit private VarDecl with copy init.
1495 EmitVarDecl(*PrivateVD);
1496 return GetAddrOfLocalVar(PrivateVD);
1497 });
1498 assert(IsRegistered && "linear var already registered as private");
1499 // Silence the warning about unused variable.
1500 (void)IsRegistered;
1501 } else
1502 EmitVarDecl(*PrivateVD);
1503 ++CurPrivate;
1504 }
1505 }
1506 }
1507
emitSimdlenSafelenClause(CodeGenFunction & CGF,const OMPExecutableDirective & D,bool IsMonotonic)1508 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1509 const OMPExecutableDirective &D,
1510 bool IsMonotonic) {
1511 if (!CGF.HaveInsertPoint())
1512 return;
1513 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1514 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1515 /*ignoreResult=*/true);
1516 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1517 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1518 // In presence of finite 'safelen', it may be unsafe to mark all
1519 // the memory instructions parallel, because loop-carried
1520 // dependences of 'safelen' iterations are possible.
1521 if (!IsMonotonic)
1522 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1523 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1524 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1525 /*ignoreResult=*/true);
1526 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1527 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1528 // In presence of finite 'safelen', it may be unsafe to mark all
1529 // the memory instructions parallel, because loop-carried
1530 // dependences of 'safelen' iterations are possible.
1531 CGF.LoopStack.setParallel(false);
1532 }
1533 }
1534
EmitOMPSimdInit(const OMPLoopDirective & D,bool IsMonotonic)1535 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1536 bool IsMonotonic) {
1537 // Walk clauses and process safelen/lastprivate.
1538 LoopStack.setParallel(!IsMonotonic);
1539 LoopStack.setVectorizeEnable(true);
1540 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1541 }
1542
EmitOMPSimdFinal(const OMPLoopDirective & D,const llvm::function_ref<llvm::Value * (CodeGenFunction &)> & CondGen)1543 void CodeGenFunction::EmitOMPSimdFinal(
1544 const OMPLoopDirective &D,
1545 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1546 if (!HaveInsertPoint())
1547 return;
1548 llvm::BasicBlock *DoneBB = nullptr;
1549 auto IC = D.counters().begin();
1550 auto IPC = D.private_counters().begin();
1551 for (auto F : D.finals()) {
1552 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1553 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1554 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1555 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1556 OrigVD->hasGlobalStorage() || CED) {
1557 if (!DoneBB) {
1558 if (auto *Cond = CondGen(*this)) {
1559 // If the first post-update expression is found, emit conditional
1560 // block if it was requested.
1561 auto *ThenBB = createBasicBlock(".omp.final.then");
1562 DoneBB = createBasicBlock(".omp.final.done");
1563 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1564 EmitBlock(ThenBB);
1565 }
1566 }
1567 Address OrigAddr = Address::invalid();
1568 if (CED)
1569 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1570 else {
1571 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1572 /*RefersToEnclosingVariableOrCapture=*/false,
1573 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1574 OrigAddr = EmitLValue(&DRE).getAddress();
1575 }
1576 OMPPrivateScope VarScope(*this);
1577 VarScope.addPrivate(OrigVD,
1578 [OrigAddr]() -> Address { return OrigAddr; });
1579 (void)VarScope.Privatize();
1580 EmitIgnoredExpr(F);
1581 }
1582 ++IC;
1583 ++IPC;
1584 }
1585 if (DoneBB)
1586 EmitBlock(DoneBB, /*IsFinished=*/true);
1587 }
1588
EmitOMPSimdDirective(const OMPSimdDirective & S)1589 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1590 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1591 OMPLoopScope PreInitScope(CGF, S);
1592 // if (PreCond) {
1593 // for (IV in 0..LastIteration) BODY;
1594 // <Final counter/linear vars updates>;
1595 // }
1596 //
1597
1598 // Emit: if (PreCond) - begin.
1599 // If the condition constant folds and can be elided, avoid emitting the
1600 // whole loop.
1601 bool CondConstant;
1602 llvm::BasicBlock *ContBlock = nullptr;
1603 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1604 if (!CondConstant)
1605 return;
1606 } else {
1607 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1608 ContBlock = CGF.createBasicBlock("simd.if.end");
1609 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1610 CGF.getProfileCount(&S));
1611 CGF.EmitBlock(ThenBlock);
1612 CGF.incrementProfileCounter(&S);
1613 }
1614
1615 // Emit the loop iteration variable.
1616 const Expr *IVExpr = S.getIterationVariable();
1617 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1618 CGF.EmitVarDecl(*IVDecl);
1619 CGF.EmitIgnoredExpr(S.getInit());
1620
1621 // Emit the iterations count variable.
1622 // If it is not a variable, Sema decided to calculate iterations count on
1623 // each iteration (e.g., it is foldable into a constant).
1624 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1625 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1626 // Emit calculation of the iterations count.
1627 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1628 }
1629
1630 CGF.EmitOMPSimdInit(S);
1631
1632 emitAlignedClause(CGF, S);
1633 CGF.EmitOMPLinearClauseInit(S);
1634 {
1635 OMPPrivateScope LoopScope(CGF);
1636 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1637 CGF.EmitOMPLinearClause(S, LoopScope);
1638 CGF.EmitOMPPrivateClause(S, LoopScope);
1639 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1640 bool HasLastprivateClause =
1641 CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1642 (void)LoopScope.Privatize();
1643 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1644 S.getInc(),
1645 [&S](CodeGenFunction &CGF) {
1646 CGF.EmitOMPLoopBody(S, JumpDest());
1647 CGF.EmitStopPoint(&S);
1648 },
1649 [](CodeGenFunction &) {});
1650 CGF.EmitOMPSimdFinal(
1651 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1652 // Emit final copy of the lastprivate variables at the end of loops.
1653 if (HasLastprivateClause)
1654 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1655 CGF.EmitOMPReductionClauseFinal(S);
1656 emitPostUpdateForReductionClause(
1657 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1658 }
1659 CGF.EmitOMPLinearClauseFinal(
1660 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1661 // Emit: if (PreCond) - end.
1662 if (ContBlock) {
1663 CGF.EmitBranch(ContBlock);
1664 CGF.EmitBlock(ContBlock, true);
1665 }
1666 };
1667 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1668 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1669 }
1670
EmitOMPOuterLoop(bool DynamicOrOrdered,bool IsMonotonic,const OMPLoopDirective & S,OMPPrivateScope & LoopScope,bool Ordered,Address LB,Address UB,Address ST,Address IL,llvm::Value * Chunk)1671 void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
1672 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1673 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1674 auto &RT = CGM.getOpenMPRuntime();
1675
1676 const Expr *IVExpr = S.getIterationVariable();
1677 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1678 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1679
1680 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1681
1682 // Start the loop with a block that tests the condition.
1683 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1684 EmitBlock(CondBlock);
1685 LoopStack.push(CondBlock, Builder.getCurrentDebugLocation());
1686
1687 llvm::Value *BoolCondVal = nullptr;
1688 if (!DynamicOrOrdered) {
1689 // UB = min(UB, GlobalUB)
1690 EmitIgnoredExpr(S.getEnsureUpperBound());
1691 // IV = LB
1692 EmitIgnoredExpr(S.getInit());
1693 // IV < UB
1694 BoolCondVal = EvaluateExprAsBool(S.getCond());
1695 } else {
1696 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL,
1697 LB, UB, ST);
1698 }
1699
1700 // If there are any cleanups between here and the loop-exit scope,
1701 // create a block to stage a loop exit along.
1702 auto ExitBlock = LoopExit.getBlock();
1703 if (LoopScope.requiresCleanups())
1704 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1705
1706 auto LoopBody = createBasicBlock("omp.dispatch.body");
1707 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1708 if (ExitBlock != LoopExit.getBlock()) {
1709 EmitBlock(ExitBlock);
1710 EmitBranchThroughCleanup(LoopExit);
1711 }
1712 EmitBlock(LoopBody);
1713
1714 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1715 // LB for loop condition and emitted it above).
1716 if (DynamicOrOrdered)
1717 EmitIgnoredExpr(S.getInit());
1718
1719 // Create a block for the increment.
1720 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1721 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1722
1723 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1724 // with dynamic/guided scheduling and without ordered clause.
1725 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1726 LoopStack.setParallel(!IsMonotonic);
1727 else
1728 EmitOMPSimdInit(S, IsMonotonic);
1729
1730 SourceLocation Loc = S.getLocStart();
1731 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
1732 [&S, LoopExit](CodeGenFunction &CGF) {
1733 CGF.EmitOMPLoopBody(S, LoopExit);
1734 CGF.EmitStopPoint(&S);
1735 },
1736 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
1737 if (Ordered) {
1738 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
1739 CGF, Loc, IVSize, IVSigned);
1740 }
1741 });
1742
1743 EmitBlock(Continue.getBlock());
1744 BreakContinueStack.pop_back();
1745 if (!DynamicOrOrdered) {
1746 // Emit "LB = LB + Stride", "UB = UB + Stride".
1747 EmitIgnoredExpr(S.getNextLowerBound());
1748 EmitIgnoredExpr(S.getNextUpperBound());
1749 }
1750
1751 EmitBranch(CondBlock);
1752 LoopStack.pop();
1753 // Emit the fall-through block.
1754 EmitBlock(LoopExit.getBlock());
1755
1756 // Tell the runtime we are done.
1757 if (!DynamicOrOrdered)
1758 RT.emitForStaticFinish(*this, S.getLocEnd());
1759
1760 }
1761
EmitOMPForOuterLoop(const OpenMPScheduleTy & ScheduleKind,bool IsMonotonic,const OMPLoopDirective & S,OMPPrivateScope & LoopScope,bool Ordered,Address LB,Address UB,Address ST,Address IL,llvm::Value * Chunk)1762 void CodeGenFunction::EmitOMPForOuterLoop(
1763 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1764 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1765 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1766 auto &RT = CGM.getOpenMPRuntime();
1767
1768 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1769 const bool DynamicOrOrdered =
1770 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1771
1772 assert((Ordered ||
1773 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1774 /*Chunked=*/Chunk != nullptr)) &&
1775 "static non-chunked schedule does not need outer loop");
1776
1777 // Emit outer loop.
1778 //
1779 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1780 // When schedule(dynamic,chunk_size) is specified, the iterations are
1781 // distributed to threads in the team in chunks as the threads request them.
1782 // Each thread executes a chunk of iterations, then requests another chunk,
1783 // until no chunks remain to be distributed. Each chunk contains chunk_size
1784 // iterations, except for the last chunk to be distributed, which may have
1785 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1786 //
1787 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1788 // to threads in the team in chunks as the executing threads request them.
1789 // Each thread executes a chunk of iterations, then requests another chunk,
1790 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1791 // each chunk is proportional to the number of unassigned iterations divided
1792 // by the number of threads in the team, decreasing to 1. For a chunk_size
1793 // with value k (greater than 1), the size of each chunk is determined in the
1794 // same way, with the restriction that the chunks do not contain fewer than k
1795 // iterations (except for the last chunk to be assigned, which may have fewer
1796 // than k iterations).
1797 //
1798 // When schedule(auto) is specified, the decision regarding scheduling is
1799 // delegated to the compiler and/or runtime system. The programmer gives the
1800 // implementation the freedom to choose any possible mapping of iterations to
1801 // threads in the team.
1802 //
1803 // When schedule(runtime) is specified, the decision regarding scheduling is
1804 // deferred until run time, and the schedule and chunk size are taken from the
1805 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1806 // implementation defined
1807 //
1808 // while(__kmpc_dispatch_next(&LB, &UB)) {
1809 // idx = LB;
1810 // while (idx <= UB) { BODY; ++idx;
1811 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1812 // } // inner loop
1813 // }
1814 //
1815 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1816 // When schedule(static, chunk_size) is specified, iterations are divided into
1817 // chunks of size chunk_size, and the chunks are assigned to the threads in
1818 // the team in a round-robin fashion in the order of the thread number.
1819 //
1820 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1821 // while (idx <= UB) { BODY; ++idx; } // inner loop
1822 // LB = LB + ST;
1823 // UB = UB + ST;
1824 // }
1825 //
1826
1827 const Expr *IVExpr = S.getIterationVariable();
1828 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1829 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1830
1831 if (DynamicOrOrdered) {
1832 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
1833 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1834 IVSigned, Ordered, UBVal, Chunk);
1835 } else {
1836 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1837 Ordered, IL, LB, UB, ST, Chunk);
1838 }
1839
1840 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB,
1841 ST, IL, Chunk);
1842 }
1843
EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,const OMPDistributeDirective & S,OMPPrivateScope & LoopScope,Address LB,Address UB,Address ST,Address IL,llvm::Value * Chunk)1844 void CodeGenFunction::EmitOMPDistributeOuterLoop(
1845 OpenMPDistScheduleClauseKind ScheduleKind,
1846 const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
1847 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1848
1849 auto &RT = CGM.getOpenMPRuntime();
1850
1851 // Emit outer loop.
1852 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
1853 // dynamic
1854 //
1855
1856 const Expr *IVExpr = S.getIterationVariable();
1857 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1858 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1859
1860 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
1861 IVSize, IVSigned, /* Ordered = */ false,
1862 IL, LB, UB, ST, Chunk);
1863
1864 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,
1865 S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);
1866 }
1867
EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective & S)1868 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
1869 const OMPDistributeParallelForDirective &S) {
1870 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1871 CGM.getOpenMPRuntime().emitInlinedDirective(
1872 *this, OMPD_distribute_parallel_for,
1873 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1874 OMPLoopScope PreInitScope(CGF, S);
1875 CGF.EmitStmt(
1876 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1877 });
1878 }
1879
EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective & S)1880 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
1881 const OMPDistributeParallelForSimdDirective &S) {
1882 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1883 CGM.getOpenMPRuntime().emitInlinedDirective(
1884 *this, OMPD_distribute_parallel_for_simd,
1885 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1886 OMPLoopScope PreInitScope(CGF, S);
1887 CGF.EmitStmt(
1888 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1889 });
1890 }
1891
EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective & S)1892 void CodeGenFunction::EmitOMPDistributeSimdDirective(
1893 const OMPDistributeSimdDirective &S) {
1894 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1895 CGM.getOpenMPRuntime().emitInlinedDirective(
1896 *this, OMPD_distribute_simd,
1897 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1898 OMPLoopScope PreInitScope(CGF, S);
1899 CGF.EmitStmt(
1900 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1901 });
1902 }
1903
EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective & S)1904 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
1905 const OMPTargetParallelForSimdDirective &S) {
1906 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1907 CGM.getOpenMPRuntime().emitInlinedDirective(
1908 *this, OMPD_target_parallel_for_simd,
1909 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1910 OMPLoopScope PreInitScope(CGF, S);
1911 CGF.EmitStmt(
1912 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1913 });
1914 }
1915
1916 /// \brief Emit a helper variable and return corresponding lvalue.
EmitOMPHelperVar(CodeGenFunction & CGF,const DeclRefExpr * Helper)1917 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1918 const DeclRefExpr *Helper) {
1919 auto VDecl = cast<VarDecl>(Helper->getDecl());
1920 CGF.EmitVarDecl(*VDecl);
1921 return CGF.EmitLValue(Helper);
1922 }
1923
1924 namespace {
1925 struct ScheduleKindModifiersTy {
1926 OpenMPScheduleClauseKind Kind;
1927 OpenMPScheduleClauseModifier M1;
1928 OpenMPScheduleClauseModifier M2;
ScheduleKindModifiersTy__anone99e10462f11::ScheduleKindModifiersTy1929 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
1930 OpenMPScheduleClauseModifier M1,
1931 OpenMPScheduleClauseModifier M2)
1932 : Kind(Kind), M1(M1), M2(M2) {}
1933 };
1934 } // namespace
1935
EmitOMPWorksharingLoop(const OMPLoopDirective & S)1936 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
1937 // Emit the loop iteration variable.
1938 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
1939 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
1940 EmitVarDecl(*IVDecl);
1941
1942 // Emit the iterations count variable.
1943 // If it is not a variable, Sema decided to calculate iterations count on each
1944 // iteration (e.g., it is foldable into a constant).
1945 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1946 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1947 // Emit calculation of the iterations count.
1948 EmitIgnoredExpr(S.getCalcLastIteration());
1949 }
1950
1951 auto &RT = CGM.getOpenMPRuntime();
1952
1953 bool HasLastprivateClause;
1954 // Check pre-condition.
1955 {
1956 OMPLoopScope PreInitScope(*this, S);
1957 // Skip the entire loop if we don't meet the precondition.
1958 // If the condition constant folds and can be elided, avoid emitting the
1959 // whole loop.
1960 bool CondConstant;
1961 llvm::BasicBlock *ContBlock = nullptr;
1962 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1963 if (!CondConstant)
1964 return false;
1965 } else {
1966 auto *ThenBlock = createBasicBlock("omp.precond.then");
1967 ContBlock = createBasicBlock("omp.precond.end");
1968 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
1969 getProfileCount(&S));
1970 EmitBlock(ThenBlock);
1971 incrementProfileCounter(&S);
1972 }
1973
1974 bool Ordered = false;
1975 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
1976 if (OrderedClause->getNumForLoops())
1977 RT.emitDoacrossInit(*this, S);
1978 else
1979 Ordered = true;
1980 }
1981
1982 llvm::DenseSet<const Expr *> EmittedFinals;
1983 emitAlignedClause(*this, S);
1984 EmitOMPLinearClauseInit(S);
1985 // Emit helper vars inits.
1986 LValue LB =
1987 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1988 LValue UB =
1989 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1990 LValue ST =
1991 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
1992 LValue IL =
1993 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
1994
1995 // Emit 'then' code.
1996 {
1997 OMPPrivateScope LoopScope(*this);
1998 if (EmitOMPFirstprivateClause(S, LoopScope)) {
1999 // Emit implicit barrier to synchronize threads and avoid data races on
2000 // initialization of firstprivate variables and post-update of
2001 // lastprivate variables.
2002 CGM.getOpenMPRuntime().emitBarrierCall(
2003 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2004 /*ForceSimpleCall=*/true);
2005 }
2006 EmitOMPPrivateClause(S, LoopScope);
2007 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2008 EmitOMPReductionClauseInit(S, LoopScope);
2009 EmitOMPPrivateLoopCounters(S, LoopScope);
2010 EmitOMPLinearClause(S, LoopScope);
2011 (void)LoopScope.Privatize();
2012
2013 // Detect the loop schedule kind and chunk.
2014 llvm::Value *Chunk = nullptr;
2015 OpenMPScheduleTy ScheduleKind;
2016 if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
2017 ScheduleKind.Schedule = C->getScheduleKind();
2018 ScheduleKind.M1 = C->getFirstScheduleModifier();
2019 ScheduleKind.M2 = C->getSecondScheduleModifier();
2020 if (const auto *Ch = C->getChunkSize()) {
2021 Chunk = EmitScalarExpr(Ch);
2022 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2023 S.getIterationVariable()->getType(),
2024 S.getLocStart());
2025 }
2026 }
2027 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2028 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2029 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2030 // If the static schedule kind is specified or if the ordered clause is
2031 // specified, and if no monotonic modifier is specified, the effect will
2032 // be as if the monotonic modifier was specified.
2033 if (RT.isStaticNonchunked(ScheduleKind.Schedule,
2034 /* Chunked */ Chunk != nullptr) &&
2035 !Ordered) {
2036 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2037 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2038 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2039 // When no chunk_size is specified, the iteration space is divided into
2040 // chunks that are approximately equal in size, and at most one chunk is
2041 // distributed to each thread. Note that the size of the chunks is
2042 // unspecified in this case.
2043 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
2044 IVSize, IVSigned, Ordered,
2045 IL.getAddress(), LB.getAddress(),
2046 UB.getAddress(), ST.getAddress());
2047 auto LoopExit =
2048 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2049 // UB = min(UB, GlobalUB);
2050 EmitIgnoredExpr(S.getEnsureUpperBound());
2051 // IV = LB;
2052 EmitIgnoredExpr(S.getInit());
2053 // while (idx <= UB) { BODY; ++idx; }
2054 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2055 S.getInc(),
2056 [&S, LoopExit](CodeGenFunction &CGF) {
2057 CGF.EmitOMPLoopBody(S, LoopExit);
2058 CGF.EmitStopPoint(&S);
2059 },
2060 [](CodeGenFunction &) {});
2061 EmitBlock(LoopExit.getBlock());
2062 // Tell the runtime we are done.
2063 RT.emitForStaticFinish(*this, S.getLocStart());
2064 } else {
2065 const bool IsMonotonic =
2066 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2067 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2068 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2069 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2070 // Emit the outer loop, which requests its work chunk [LB..UB] from
2071 // runtime and runs the inner loop to process it.
2072 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2073 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2074 IL.getAddress(), Chunk);
2075 }
2076 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2077 EmitOMPSimdFinal(S,
2078 [&](CodeGenFunction &CGF) -> llvm::Value * {
2079 return CGF.Builder.CreateIsNotNull(
2080 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2081 });
2082 }
2083 EmitOMPReductionClauseFinal(S);
2084 // Emit post-update of the reduction variables if IsLastIter != 0.
2085 emitPostUpdateForReductionClause(
2086 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2087 return CGF.Builder.CreateIsNotNull(
2088 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2089 });
2090 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2091 if (HasLastprivateClause)
2092 EmitOMPLastprivateClauseFinal(
2093 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2094 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
2095 }
2096 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2097 return CGF.Builder.CreateIsNotNull(
2098 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2099 });
2100 // We're now done with the loop, so jump to the continuation block.
2101 if (ContBlock) {
2102 EmitBranch(ContBlock);
2103 EmitBlock(ContBlock, true);
2104 }
2105 }
2106 return HasLastprivateClause;
2107 }
2108
EmitOMPForDirective(const OMPForDirective & S)2109 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2110 bool HasLastprivates = false;
2111 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2112 PrePostActionTy &) {
2113 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2114 };
2115 {
2116 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2117 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2118 S.hasCancel());
2119 }
2120
2121 // Emit an implicit barrier at the end.
2122 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2123 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2124 }
2125 }
2126
EmitOMPForSimdDirective(const OMPForSimdDirective & S)2127 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2128 bool HasLastprivates = false;
2129 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2130 PrePostActionTy &) {
2131 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2132 };
2133 {
2134 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2135 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2136 }
2137
2138 // Emit an implicit barrier at the end.
2139 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2140 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2141 }
2142 }
2143
createSectionLVal(CodeGenFunction & CGF,QualType Ty,const Twine & Name,llvm::Value * Init=nullptr)2144 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2145 const Twine &Name,
2146 llvm::Value *Init = nullptr) {
2147 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2148 if (Init)
2149 CGF.EmitScalarInit(Init, LVal);
2150 return LVal;
2151 }
2152
EmitSections(const OMPExecutableDirective & S)2153 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2154 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
2155 auto *CS = dyn_cast<CompoundStmt>(Stmt);
2156 bool HasLastprivates = false;
2157 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
2158 PrePostActionTy &) {
2159 auto &C = CGF.CGM.getContext();
2160 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2161 // Emit helper vars inits.
2162 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2163 CGF.Builder.getInt32(0));
2164 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
2165 : CGF.Builder.getInt32(0);
2166 LValue UB =
2167 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2168 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2169 CGF.Builder.getInt32(1));
2170 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2171 CGF.Builder.getInt32(0));
2172 // Loop counter.
2173 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2174 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2175 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2176 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2177 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2178 // Generate condition for loop.
2179 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2180 OK_Ordinary, S.getLocStart(),
2181 /*fpContractable=*/false);
2182 // Increment for loop counter.
2183 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2184 S.getLocStart());
2185 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
2186 // Iterate through all sections and emit a switch construct:
2187 // switch (IV) {
2188 // case 0:
2189 // <SectionStmt[0]>;
2190 // break;
2191 // ...
2192 // case <NumSection> - 1:
2193 // <SectionStmt[<NumSection> - 1]>;
2194 // break;
2195 // }
2196 // .omp.sections.exit:
2197 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2198 auto *SwitchStmt = CGF.Builder.CreateSwitch(
2199 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
2200 CS == nullptr ? 1 : CS->size());
2201 if (CS) {
2202 unsigned CaseNumber = 0;
2203 for (auto *SubStmt : CS->children()) {
2204 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2205 CGF.EmitBlock(CaseBB);
2206 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2207 CGF.EmitStmt(SubStmt);
2208 CGF.EmitBranch(ExitBB);
2209 ++CaseNumber;
2210 }
2211 } else {
2212 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2213 CGF.EmitBlock(CaseBB);
2214 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2215 CGF.EmitStmt(Stmt);
2216 CGF.EmitBranch(ExitBB);
2217 }
2218 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2219 };
2220
2221 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2222 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2223 // Emit implicit barrier to synchronize threads and avoid data races on
2224 // initialization of firstprivate variables and post-update of lastprivate
2225 // variables.
2226 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2227 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2228 /*ForceSimpleCall=*/true);
2229 }
2230 CGF.EmitOMPPrivateClause(S, LoopScope);
2231 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2232 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2233 (void)LoopScope.Privatize();
2234
2235 // Emit static non-chunked loop.
2236 OpenMPScheduleTy ScheduleKind;
2237 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2238 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2239 CGF, S.getLocStart(), ScheduleKind, /*IVSize=*/32,
2240 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
2241 UB.getAddress(), ST.getAddress());
2242 // UB = min(UB, GlobalUB);
2243 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
2244 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
2245 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2246 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2247 // IV = LB;
2248 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
2249 // while (idx <= UB) { BODY; ++idx; }
2250 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2251 [](CodeGenFunction &) {});
2252 // Tell the runtime we are done.
2253 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
2254 CGF.EmitOMPReductionClauseFinal(S);
2255 // Emit post-update of the reduction variables if IsLastIter != 0.
2256 emitPostUpdateForReductionClause(
2257 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2258 return CGF.Builder.CreateIsNotNull(
2259 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2260 });
2261
2262 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2263 if (HasLastprivates)
2264 CGF.EmitOMPLastprivateClauseFinal(
2265 S, /*NoFinals=*/false,
2266 CGF.Builder.CreateIsNotNull(
2267 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
2268 };
2269
2270 bool HasCancel = false;
2271 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2272 HasCancel = OSD->hasCancel();
2273 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2274 HasCancel = OPSD->hasCancel();
2275 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2276 HasCancel);
2277 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2278 // clause. Otherwise the barrier will be generated by the codegen for the
2279 // directive.
2280 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2281 // Emit implicit barrier to synchronize threads and avoid data races on
2282 // initialization of firstprivate variables.
2283 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2284 OMPD_unknown);
2285 }
2286 }
2287
EmitOMPSectionsDirective(const OMPSectionsDirective & S)2288 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2289 {
2290 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2291 EmitSections(S);
2292 }
2293 // Emit an implicit barrier at the end.
2294 if (!S.getSingleClause<OMPNowaitClause>()) {
2295 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2296 OMPD_sections);
2297 }
2298 }
2299
EmitOMPSectionDirective(const OMPSectionDirective & S)2300 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2301 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2302 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2303 };
2304 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2305 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2306 S.hasCancel());
2307 }
2308
EmitOMPSingleDirective(const OMPSingleDirective & S)2309 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2310 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2311 llvm::SmallVector<const Expr *, 8> DestExprs;
2312 llvm::SmallVector<const Expr *, 8> SrcExprs;
2313 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2314 // Check if there are any 'copyprivate' clauses associated with this
2315 // 'single' construct.
2316 // Build a list of copyprivate variables along with helper expressions
2317 // (<source>, <destination>, <destination>=<source> expressions)
2318 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2319 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2320 DestExprs.append(C->destination_exprs().begin(),
2321 C->destination_exprs().end());
2322 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2323 AssignmentOps.append(C->assignment_ops().begin(),
2324 C->assignment_ops().end());
2325 }
2326 // Emit code for 'single' region along with 'copyprivate' clauses
2327 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2328 Action.Enter(CGF);
2329 OMPPrivateScope SingleScope(CGF);
2330 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2331 CGF.EmitOMPPrivateClause(S, SingleScope);
2332 (void)SingleScope.Privatize();
2333 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2334 };
2335 {
2336 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2337 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
2338 CopyprivateVars, DestExprs,
2339 SrcExprs, AssignmentOps);
2340 }
2341 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2342 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2343 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2344 CGM.getOpenMPRuntime().emitBarrierCall(
2345 *this, S.getLocStart(),
2346 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2347 }
2348 }
2349
EmitOMPMasterDirective(const OMPMasterDirective & S)2350 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2351 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2352 Action.Enter(CGF);
2353 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2354 };
2355 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2356 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
2357 }
2358
EmitOMPCriticalDirective(const OMPCriticalDirective & S)2359 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2360 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2361 Action.Enter(CGF);
2362 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2363 };
2364 Expr *Hint = nullptr;
2365 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
2366 Hint = HintClause->getHint();
2367 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2368 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2369 S.getDirectiveName().getAsString(),
2370 CodeGen, S.getLocStart(), Hint);
2371 }
2372
EmitOMPParallelForDirective(const OMPParallelForDirective & S)2373 void CodeGenFunction::EmitOMPParallelForDirective(
2374 const OMPParallelForDirective &S) {
2375 // Emit directive as a combined directive that consists of two implicit
2376 // directives: 'parallel' with 'for' directive.
2377 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2378 CGF.EmitOMPWorksharingLoop(S);
2379 };
2380 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
2381 }
2382
EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective & S)2383 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2384 const OMPParallelForSimdDirective &S) {
2385 // Emit directive as a combined directive that consists of two implicit
2386 // directives: 'parallel' with 'for' directive.
2387 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2388 CGF.EmitOMPWorksharingLoop(S);
2389 };
2390 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
2391 }
2392
EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective & S)2393 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2394 const OMPParallelSectionsDirective &S) {
2395 // Emit directive as a combined directive that consists of two implicit
2396 // directives: 'parallel' with 'sections' directive.
2397 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2398 CGF.EmitSections(S);
2399 };
2400 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
2401 }
2402
EmitOMPTaskBasedDirective(const OMPExecutableDirective & S,const RegionCodeGenTy & BodyGen,const TaskGenTy & TaskGen,OMPTaskDataTy & Data)2403 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2404 const RegionCodeGenTy &BodyGen,
2405 const TaskGenTy &TaskGen,
2406 OMPTaskDataTy &Data) {
2407 // Emit outlined function for task construct.
2408 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2409 auto *I = CS->getCapturedDecl()->param_begin();
2410 auto *PartId = std::next(I);
2411 auto *TaskT = std::next(I, 4);
2412 // Check if the task is final
2413 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2414 // If the condition constant folds and can be elided, try to avoid emitting
2415 // the condition and the dead arm of the if/else.
2416 auto *Cond = Clause->getCondition();
2417 bool CondConstant;
2418 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2419 Data.Final.setInt(CondConstant);
2420 else
2421 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2422 } else {
2423 // By default the task is not final.
2424 Data.Final.setInt(/*IntVal=*/false);
2425 }
2426 // Check if the task has 'priority' clause.
2427 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2428 // Runtime currently does not support codegen for priority clause argument.
2429 // TODO: Add codegen for priority clause arg when runtime lib support it.
2430 auto *Prio = Clause->getPriority();
2431 Data.Priority.setInt(Prio);
2432 Data.Priority.setPointer(EmitScalarConversion(
2433 EmitScalarExpr(Prio), Prio->getType(),
2434 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2435 Prio->getExprLoc()));
2436 }
2437 // The first function argument for tasks is a thread id, the second one is a
2438 // part id (0 for tied tasks, >=0 for untied task).
2439 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2440 // Get list of private variables.
2441 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2442 auto IRef = C->varlist_begin();
2443 for (auto *IInit : C->private_copies()) {
2444 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2445 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2446 Data.PrivateVars.push_back(*IRef);
2447 Data.PrivateCopies.push_back(IInit);
2448 }
2449 ++IRef;
2450 }
2451 }
2452 EmittedAsPrivate.clear();
2453 // Get list of firstprivate variables.
2454 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2455 auto IRef = C->varlist_begin();
2456 auto IElemInitRef = C->inits().begin();
2457 for (auto *IInit : C->private_copies()) {
2458 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2459 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2460 Data.FirstprivateVars.push_back(*IRef);
2461 Data.FirstprivateCopies.push_back(IInit);
2462 Data.FirstprivateInits.push_back(*IElemInitRef);
2463 }
2464 ++IRef;
2465 ++IElemInitRef;
2466 }
2467 }
2468 // Get list of lastprivate variables (for taskloops).
2469 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2470 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2471 auto IRef = C->varlist_begin();
2472 auto ID = C->destination_exprs().begin();
2473 for (auto *IInit : C->private_copies()) {
2474 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2475 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2476 Data.LastprivateVars.push_back(*IRef);
2477 Data.LastprivateCopies.push_back(IInit);
2478 }
2479 LastprivateDstsOrigs.insert(
2480 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2481 cast<DeclRefExpr>(*IRef)});
2482 ++IRef;
2483 ++ID;
2484 }
2485 }
2486 // Build list of dependences.
2487 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2488 for (auto *IRef : C->varlists())
2489 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
2490 auto &&CodeGen = [PartId, &S, &Data, CS, &BodyGen, &LastprivateDstsOrigs](
2491 CodeGenFunction &CGF, PrePostActionTy &Action) {
2492 // Set proper addresses for generated private copies.
2493 OMPPrivateScope Scope(CGF);
2494 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2495 !Data.LastprivateVars.empty()) {
2496 auto *CopyFn = CGF.Builder.CreateLoad(
2497 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
2498 auto *PrivatesPtr = CGF.Builder.CreateLoad(
2499 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
2500 // Map privates.
2501 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2502 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2503 CallArgs.push_back(PrivatesPtr);
2504 for (auto *E : Data.PrivateVars) {
2505 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2506 Address PrivatePtr = CGF.CreateMemTemp(
2507 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2508 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2509 CallArgs.push_back(PrivatePtr.getPointer());
2510 }
2511 for (auto *E : Data.FirstprivateVars) {
2512 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2513 Address PrivatePtr =
2514 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2515 ".firstpriv.ptr.addr");
2516 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2517 CallArgs.push_back(PrivatePtr.getPointer());
2518 }
2519 for (auto *E : Data.LastprivateVars) {
2520 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2521 Address PrivatePtr =
2522 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2523 ".lastpriv.ptr.addr");
2524 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2525 CallArgs.push_back(PrivatePtr.getPointer());
2526 }
2527 CGF.EmitRuntimeCall(CopyFn, CallArgs);
2528 for (auto &&Pair : LastprivateDstsOrigs) {
2529 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2530 DeclRefExpr DRE(
2531 const_cast<VarDecl *>(OrigVD),
2532 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
2533 OrigVD) != nullptr,
2534 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
2535 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2536 return CGF.EmitLValue(&DRE).getAddress();
2537 });
2538 }
2539 for (auto &&Pair : PrivatePtrs) {
2540 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2541 CGF.getContext().getDeclAlign(Pair.first));
2542 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2543 }
2544 }
2545 (void)Scope.Privatize();
2546
2547 Action.Enter(CGF);
2548 BodyGen(CGF);
2549 };
2550 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
2551 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
2552 Data.NumberOfParts);
2553 OMPLexicalScope Scope(*this, S);
2554 TaskGen(*this, OutlinedFn, Data);
2555 }
2556
EmitOMPTaskDirective(const OMPTaskDirective & S)2557 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
2558 // Emit outlined function for task construct.
2559 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2560 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
2561 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
2562 const Expr *IfCond = nullptr;
2563 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2564 if (C->getNameModifier() == OMPD_unknown ||
2565 C->getNameModifier() == OMPD_task) {
2566 IfCond = C->getCondition();
2567 break;
2568 }
2569 }
2570
2571 OMPTaskDataTy Data;
2572 // Check if we should emit tied or untied task.
2573 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
2574 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
2575 CGF.EmitStmt(CS->getCapturedStmt());
2576 };
2577 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
2578 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
2579 const OMPTaskDataTy &Data) {
2580 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
2581 SharedsTy, CapturedStruct, IfCond,
2582 Data);
2583 };
2584 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
2585 }
2586
EmitOMPTaskyieldDirective(const OMPTaskyieldDirective & S)2587 void CodeGenFunction::EmitOMPTaskyieldDirective(
2588 const OMPTaskyieldDirective &S) {
2589 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2590 }
2591
EmitOMPBarrierDirective(const OMPBarrierDirective & S)2592 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2593 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
2594 }
2595
EmitOMPTaskwaitDirective(const OMPTaskwaitDirective & S)2596 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
2597 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
2598 }
2599
EmitOMPTaskgroupDirective(const OMPTaskgroupDirective & S)2600 void CodeGenFunction::EmitOMPTaskgroupDirective(
2601 const OMPTaskgroupDirective &S) {
2602 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2603 Action.Enter(CGF);
2604 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2605 };
2606 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2607 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
2608 }
2609
EmitOMPFlushDirective(const OMPFlushDirective & S)2610 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
2611 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
2612 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
2613 return llvm::makeArrayRef(FlushClause->varlist_begin(),
2614 FlushClause->varlist_end());
2615 }
2616 return llvm::None;
2617 }(), S.getLocStart());
2618 }
2619
EmitOMPDistributeLoop(const OMPDistributeDirective & S)2620 void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
2621 // Emit the loop iteration variable.
2622 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2623 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2624 EmitVarDecl(*IVDecl);
2625
2626 // Emit the iterations count variable.
2627 // If it is not a variable, Sema decided to calculate iterations count on each
2628 // iteration (e.g., it is foldable into a constant).
2629 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2630 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2631 // Emit calculation of the iterations count.
2632 EmitIgnoredExpr(S.getCalcLastIteration());
2633 }
2634
2635 auto &RT = CGM.getOpenMPRuntime();
2636
2637 // Check pre-condition.
2638 {
2639 OMPLoopScope PreInitScope(*this, S);
2640 // Skip the entire loop if we don't meet the precondition.
2641 // If the condition constant folds and can be elided, avoid emitting the
2642 // whole loop.
2643 bool CondConstant;
2644 llvm::BasicBlock *ContBlock = nullptr;
2645 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2646 if (!CondConstant)
2647 return;
2648 } else {
2649 auto *ThenBlock = createBasicBlock("omp.precond.then");
2650 ContBlock = createBasicBlock("omp.precond.end");
2651 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2652 getProfileCount(&S));
2653 EmitBlock(ThenBlock);
2654 incrementProfileCounter(&S);
2655 }
2656
2657 // Emit 'then' code.
2658 {
2659 // Emit helper vars inits.
2660 LValue LB =
2661 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2662 LValue UB =
2663 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2664 LValue ST =
2665 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2666 LValue IL =
2667 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2668
2669 OMPPrivateScope LoopScope(*this);
2670 EmitOMPPrivateLoopCounters(S, LoopScope);
2671 (void)LoopScope.Privatize();
2672
2673 // Detect the distribute schedule kind and chunk.
2674 llvm::Value *Chunk = nullptr;
2675 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
2676 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
2677 ScheduleKind = C->getDistScheduleKind();
2678 if (const auto *Ch = C->getChunkSize()) {
2679 Chunk = EmitScalarExpr(Ch);
2680 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2681 S.getIterationVariable()->getType(),
2682 S.getLocStart());
2683 }
2684 }
2685 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2686 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2687
2688 // OpenMP [2.10.8, distribute Construct, Description]
2689 // If dist_schedule is specified, kind must be static. If specified,
2690 // iterations are divided into chunks of size chunk_size, chunks are
2691 // assigned to the teams of the league in a round-robin fashion in the
2692 // order of the team number. When no chunk_size is specified, the
2693 // iteration space is divided into chunks that are approximately equal
2694 // in size, and at most one chunk is distributed to each team of the
2695 // league. The size of the chunks is unspecified in this case.
2696 if (RT.isStaticNonchunked(ScheduleKind,
2697 /* Chunked */ Chunk != nullptr)) {
2698 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
2699 IVSize, IVSigned, /* Ordered = */ false,
2700 IL.getAddress(), LB.getAddress(),
2701 UB.getAddress(), ST.getAddress());
2702 auto LoopExit =
2703 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2704 // UB = min(UB, GlobalUB);
2705 EmitIgnoredExpr(S.getEnsureUpperBound());
2706 // IV = LB;
2707 EmitIgnoredExpr(S.getInit());
2708 // while (idx <= UB) { BODY; ++idx; }
2709 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2710 S.getInc(),
2711 [&S, LoopExit](CodeGenFunction &CGF) {
2712 CGF.EmitOMPLoopBody(S, LoopExit);
2713 CGF.EmitStopPoint(&S);
2714 },
2715 [](CodeGenFunction &) {});
2716 EmitBlock(LoopExit.getBlock());
2717 // Tell the runtime we are done.
2718 RT.emitForStaticFinish(*this, S.getLocStart());
2719 } else {
2720 // Emit the outer loop, which requests its work chunk [LB..UB] from
2721 // runtime and runs the inner loop to process it.
2722 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,
2723 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2724 IL.getAddress(), Chunk);
2725 }
2726 }
2727
2728 // We're now done with the loop, so jump to the continuation block.
2729 if (ContBlock) {
2730 EmitBranch(ContBlock);
2731 EmitBlock(ContBlock, true);
2732 }
2733 }
2734 }
2735
EmitOMPDistributeDirective(const OMPDistributeDirective & S)2736 void CodeGenFunction::EmitOMPDistributeDirective(
2737 const OMPDistributeDirective &S) {
2738 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2739 CGF.EmitOMPDistributeLoop(S);
2740 };
2741 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2742 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
2743 false);
2744 }
2745
emitOutlinedOrderedFunction(CodeGenModule & CGM,const CapturedStmt * S)2746 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
2747 const CapturedStmt *S) {
2748 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2749 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
2750 CGF.CapturedStmtInfo = &CapStmtInfo;
2751 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
2752 Fn->addFnAttr(llvm::Attribute::NoInline);
2753 return Fn;
2754 }
2755
EmitOMPOrderedDirective(const OMPOrderedDirective & S)2756 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
2757 if (!S.getAssociatedStmt()) {
2758 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
2759 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
2760 return;
2761 }
2762 auto *C = S.getSingleClause<OMPSIMDClause>();
2763 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
2764 PrePostActionTy &Action) {
2765 if (C) {
2766 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2767 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2768 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
2769 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
2770 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
2771 } else {
2772 Action.Enter(CGF);
2773 CGF.EmitStmt(
2774 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2775 }
2776 };
2777 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2778 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
2779 }
2780
convertToScalarValue(CodeGenFunction & CGF,RValue Val,QualType SrcType,QualType DestType,SourceLocation Loc)2781 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
2782 QualType SrcType, QualType DestType,
2783 SourceLocation Loc) {
2784 assert(CGF.hasScalarEvaluationKind(DestType) &&
2785 "DestType must have scalar evaluation kind.");
2786 assert(!Val.isAggregate() && "Must be a scalar or complex.");
2787 return Val.isScalar()
2788 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
2789 Loc)
2790 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
2791 DestType, Loc);
2792 }
2793
2794 static CodeGenFunction::ComplexPairTy
convertToComplexValue(CodeGenFunction & CGF,RValue Val,QualType SrcType,QualType DestType,SourceLocation Loc)2795 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
2796 QualType DestType, SourceLocation Loc) {
2797 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
2798 "DestType must have complex evaluation kind.");
2799 CodeGenFunction::ComplexPairTy ComplexVal;
2800 if (Val.isScalar()) {
2801 // Convert the input element to the element type of the complex.
2802 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2803 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
2804 DestElementType, Loc);
2805 ComplexVal = CodeGenFunction::ComplexPairTy(
2806 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
2807 } else {
2808 assert(Val.isComplex() && "Must be a scalar or complex.");
2809 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
2810 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2811 ComplexVal.first = CGF.EmitScalarConversion(
2812 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
2813 ComplexVal.second = CGF.EmitScalarConversion(
2814 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
2815 }
2816 return ComplexVal;
2817 }
2818
emitSimpleAtomicStore(CodeGenFunction & CGF,bool IsSeqCst,LValue LVal,RValue RVal)2819 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
2820 LValue LVal, RValue RVal) {
2821 if (LVal.isGlobalReg()) {
2822 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
2823 } else {
2824 CGF.EmitAtomicStore(RVal, LVal,
2825 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
2826 : llvm::AtomicOrdering::Monotonic,
2827 LVal.isVolatile(), /*IsInit=*/false);
2828 }
2829 }
2830
emitOMPSimpleStore(LValue LVal,RValue RVal,QualType RValTy,SourceLocation Loc)2831 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
2832 QualType RValTy, SourceLocation Loc) {
2833 switch (getEvaluationKind(LVal.getType())) {
2834 case TEK_Scalar:
2835 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
2836 *this, RVal, RValTy, LVal.getType(), Loc)),
2837 LVal);
2838 break;
2839 case TEK_Complex:
2840 EmitStoreOfComplex(
2841 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
2842 /*isInit=*/false);
2843 break;
2844 case TEK_Aggregate:
2845 llvm_unreachable("Must be a scalar or complex.");
2846 }
2847 }
2848
EmitOMPAtomicReadExpr(CodeGenFunction & CGF,bool IsSeqCst,const Expr * X,const Expr * V,SourceLocation Loc)2849 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
2850 const Expr *X, const Expr *V,
2851 SourceLocation Loc) {
2852 // v = x;
2853 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
2854 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
2855 LValue XLValue = CGF.EmitLValue(X);
2856 LValue VLValue = CGF.EmitLValue(V);
2857 RValue Res = XLValue.isGlobalReg()
2858 ? CGF.EmitLoadOfLValue(XLValue, Loc)
2859 : CGF.EmitAtomicLoad(
2860 XLValue, Loc,
2861 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
2862 : llvm::AtomicOrdering::Monotonic,
2863 XLValue.isVolatile());
2864 // OpenMP, 2.12.6, atomic Construct
2865 // Any atomic construct with a seq_cst clause forces the atomically
2866 // performed operation to include an implicit flush operation without a
2867 // list.
2868 if (IsSeqCst)
2869 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2870 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
2871 }
2872
EmitOMPAtomicWriteExpr(CodeGenFunction & CGF,bool IsSeqCst,const Expr * X,const Expr * E,SourceLocation Loc)2873 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
2874 const Expr *X, const Expr *E,
2875 SourceLocation Loc) {
2876 // x = expr;
2877 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
2878 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
2879 // OpenMP, 2.12.6, atomic Construct
2880 // Any atomic construct with a seq_cst clause forces the atomically
2881 // performed operation to include an implicit flush operation without a
2882 // list.
2883 if (IsSeqCst)
2884 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2885 }
2886
emitOMPAtomicRMW(CodeGenFunction & CGF,LValue X,RValue Update,BinaryOperatorKind BO,llvm::AtomicOrdering AO,bool IsXLHSInRHSPart)2887 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
2888 RValue Update,
2889 BinaryOperatorKind BO,
2890 llvm::AtomicOrdering AO,
2891 bool IsXLHSInRHSPart) {
2892 auto &Context = CGF.CGM.getContext();
2893 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
2894 // expression is simple and atomic is allowed for the given type for the
2895 // target platform.
2896 if (BO == BO_Comma || !Update.isScalar() ||
2897 !Update.getScalarVal()->getType()->isIntegerTy() ||
2898 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
2899 (Update.getScalarVal()->getType() !=
2900 X.getAddress().getElementType())) ||
2901 !X.getAddress().getElementType()->isIntegerTy() ||
2902 !Context.getTargetInfo().hasBuiltinAtomic(
2903 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
2904 return std::make_pair(false, RValue::get(nullptr));
2905
2906 llvm::AtomicRMWInst::BinOp RMWOp;
2907 switch (BO) {
2908 case BO_Add:
2909 RMWOp = llvm::AtomicRMWInst::Add;
2910 break;
2911 case BO_Sub:
2912 if (!IsXLHSInRHSPart)
2913 return std::make_pair(false, RValue::get(nullptr));
2914 RMWOp = llvm::AtomicRMWInst::Sub;
2915 break;
2916 case BO_And:
2917 RMWOp = llvm::AtomicRMWInst::And;
2918 break;
2919 case BO_Or:
2920 RMWOp = llvm::AtomicRMWInst::Or;
2921 break;
2922 case BO_Xor:
2923 RMWOp = llvm::AtomicRMWInst::Xor;
2924 break;
2925 case BO_LT:
2926 RMWOp = X.getType()->hasSignedIntegerRepresentation()
2927 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
2928 : llvm::AtomicRMWInst::Max)
2929 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
2930 : llvm::AtomicRMWInst::UMax);
2931 break;
2932 case BO_GT:
2933 RMWOp = X.getType()->hasSignedIntegerRepresentation()
2934 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
2935 : llvm::AtomicRMWInst::Min)
2936 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
2937 : llvm::AtomicRMWInst::UMin);
2938 break;
2939 case BO_Assign:
2940 RMWOp = llvm::AtomicRMWInst::Xchg;
2941 break;
2942 case BO_Mul:
2943 case BO_Div:
2944 case BO_Rem:
2945 case BO_Shl:
2946 case BO_Shr:
2947 case BO_LAnd:
2948 case BO_LOr:
2949 return std::make_pair(false, RValue::get(nullptr));
2950 case BO_PtrMemD:
2951 case BO_PtrMemI:
2952 case BO_LE:
2953 case BO_GE:
2954 case BO_EQ:
2955 case BO_NE:
2956 case BO_AddAssign:
2957 case BO_SubAssign:
2958 case BO_AndAssign:
2959 case BO_OrAssign:
2960 case BO_XorAssign:
2961 case BO_MulAssign:
2962 case BO_DivAssign:
2963 case BO_RemAssign:
2964 case BO_ShlAssign:
2965 case BO_ShrAssign:
2966 case BO_Comma:
2967 llvm_unreachable("Unsupported atomic update operation");
2968 }
2969 auto *UpdateVal = Update.getScalarVal();
2970 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
2971 UpdateVal = CGF.Builder.CreateIntCast(
2972 IC, X.getAddress().getElementType(),
2973 X.getType()->hasSignedIntegerRepresentation());
2974 }
2975 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
2976 return std::make_pair(true, RValue::get(Res));
2977 }
2978
EmitOMPAtomicSimpleUpdateExpr(LValue X,RValue E,BinaryOperatorKind BO,bool IsXLHSInRHSPart,llvm::AtomicOrdering AO,SourceLocation Loc,const llvm::function_ref<RValue (RValue)> & CommonGen)2979 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
2980 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
2981 llvm::AtomicOrdering AO, SourceLocation Loc,
2982 const llvm::function_ref<RValue(RValue)> &CommonGen) {
2983 // Update expressions are allowed to have the following forms:
2984 // x binop= expr; -> xrval + expr;
2985 // x++, ++x -> xrval + 1;
2986 // x--, --x -> xrval - 1;
2987 // x = x binop expr; -> xrval binop expr
2988 // x = expr Op x; - > expr binop xrval;
2989 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
2990 if (!Res.first) {
2991 if (X.isGlobalReg()) {
2992 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
2993 // 'xrval'.
2994 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
2995 } else {
2996 // Perform compare-and-swap procedure.
2997 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
2998 }
2999 }
3000 return Res;
3001 }
3002
EmitOMPAtomicUpdateExpr(CodeGenFunction & CGF,bool IsSeqCst,const Expr * X,const Expr * E,const Expr * UE,bool IsXLHSInRHSPart,SourceLocation Loc)3003 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3004 const Expr *X, const Expr *E,
3005 const Expr *UE, bool IsXLHSInRHSPart,
3006 SourceLocation Loc) {
3007 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3008 "Update expr in 'atomic update' must be a binary operator.");
3009 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3010 // Update expressions are allowed to have the following forms:
3011 // x binop= expr; -> xrval + expr;
3012 // x++, ++x -> xrval + 1;
3013 // x--, --x -> xrval - 1;
3014 // x = x binop expr; -> xrval binop expr
3015 // x = expr Op x; - > expr binop xrval;
3016 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3017 LValue XLValue = CGF.EmitLValue(X);
3018 RValue ExprRValue = CGF.EmitAnyExpr(E);
3019 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3020 : llvm::AtomicOrdering::Monotonic;
3021 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3022 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3023 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3024 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3025 auto Gen =
3026 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
3027 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3028 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3029 return CGF.EmitAnyExpr(UE);
3030 };
3031 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3032 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3033 // OpenMP, 2.12.6, atomic Construct
3034 // Any atomic construct with a seq_cst clause forces the atomically
3035 // performed operation to include an implicit flush operation without a
3036 // list.
3037 if (IsSeqCst)
3038 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3039 }
3040
convertToType(CodeGenFunction & CGF,RValue Value,QualType SourceType,QualType ResType,SourceLocation Loc)3041 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3042 QualType SourceType, QualType ResType,
3043 SourceLocation Loc) {
3044 switch (CGF.getEvaluationKind(ResType)) {
3045 case TEK_Scalar:
3046 return RValue::get(
3047 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3048 case TEK_Complex: {
3049 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3050 return RValue::getComplex(Res.first, Res.second);
3051 }
3052 case TEK_Aggregate:
3053 break;
3054 }
3055 llvm_unreachable("Must be a scalar or complex.");
3056 }
3057
EmitOMPAtomicCaptureExpr(CodeGenFunction & CGF,bool IsSeqCst,bool IsPostfixUpdate,const Expr * V,const Expr * X,const Expr * E,const Expr * UE,bool IsXLHSInRHSPart,SourceLocation Loc)3058 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3059 bool IsPostfixUpdate, const Expr *V,
3060 const Expr *X, const Expr *E,
3061 const Expr *UE, bool IsXLHSInRHSPart,
3062 SourceLocation Loc) {
3063 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3064 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3065 RValue NewVVal;
3066 LValue VLValue = CGF.EmitLValue(V);
3067 LValue XLValue = CGF.EmitLValue(X);
3068 RValue ExprRValue = CGF.EmitAnyExpr(E);
3069 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3070 : llvm::AtomicOrdering::Monotonic;
3071 QualType NewVValType;
3072 if (UE) {
3073 // 'x' is updated with some additional value.
3074 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3075 "Update expr in 'atomic capture' must be a binary operator.");
3076 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3077 // Update expressions are allowed to have the following forms:
3078 // x binop= expr; -> xrval + expr;
3079 // x++, ++x -> xrval + 1;
3080 // x--, --x -> xrval - 1;
3081 // x = x binop expr; -> xrval binop expr
3082 // x = expr Op x; - > expr binop xrval;
3083 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3084 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3085 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3086 NewVValType = XRValExpr->getType();
3087 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3088 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3089 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
3090 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3091 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3092 RValue Res = CGF.EmitAnyExpr(UE);
3093 NewVVal = IsPostfixUpdate ? XRValue : Res;
3094 return Res;
3095 };
3096 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3097 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3098 if (Res.first) {
3099 // 'atomicrmw' instruction was generated.
3100 if (IsPostfixUpdate) {
3101 // Use old value from 'atomicrmw'.
3102 NewVVal = Res.second;
3103 } else {
3104 // 'atomicrmw' does not provide new value, so evaluate it using old
3105 // value of 'x'.
3106 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3107 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3108 NewVVal = CGF.EmitAnyExpr(UE);
3109 }
3110 }
3111 } else {
3112 // 'x' is simply rewritten with some 'expr'.
3113 NewVValType = X->getType().getNonReferenceType();
3114 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3115 X->getType().getNonReferenceType(), Loc);
3116 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
3117 NewVVal = XRValue;
3118 return ExprRValue;
3119 };
3120 // Try to perform atomicrmw xchg, otherwise simple exchange.
3121 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3122 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3123 Loc, Gen);
3124 if (Res.first) {
3125 // 'atomicrmw' instruction was generated.
3126 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3127 }
3128 }
3129 // Emit post-update store to 'v' of old/new 'x' value.
3130 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3131 // OpenMP, 2.12.6, atomic Construct
3132 // Any atomic construct with a seq_cst clause forces the atomically
3133 // performed operation to include an implicit flush operation without a
3134 // list.
3135 if (IsSeqCst)
3136 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3137 }
3138
EmitOMPAtomicExpr(CodeGenFunction & CGF,OpenMPClauseKind Kind,bool IsSeqCst,bool IsPostfixUpdate,const Expr * X,const Expr * V,const Expr * E,const Expr * UE,bool IsXLHSInRHSPart,SourceLocation Loc)3139 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3140 bool IsSeqCst, bool IsPostfixUpdate,
3141 const Expr *X, const Expr *V, const Expr *E,
3142 const Expr *UE, bool IsXLHSInRHSPart,
3143 SourceLocation Loc) {
3144 switch (Kind) {
3145 case OMPC_read:
3146 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3147 break;
3148 case OMPC_write:
3149 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3150 break;
3151 case OMPC_unknown:
3152 case OMPC_update:
3153 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3154 break;
3155 case OMPC_capture:
3156 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3157 IsXLHSInRHSPart, Loc);
3158 break;
3159 case OMPC_if:
3160 case OMPC_final:
3161 case OMPC_num_threads:
3162 case OMPC_private:
3163 case OMPC_firstprivate:
3164 case OMPC_lastprivate:
3165 case OMPC_reduction:
3166 case OMPC_safelen:
3167 case OMPC_simdlen:
3168 case OMPC_collapse:
3169 case OMPC_default:
3170 case OMPC_seq_cst:
3171 case OMPC_shared:
3172 case OMPC_linear:
3173 case OMPC_aligned:
3174 case OMPC_copyin:
3175 case OMPC_copyprivate:
3176 case OMPC_flush:
3177 case OMPC_proc_bind:
3178 case OMPC_schedule:
3179 case OMPC_ordered:
3180 case OMPC_nowait:
3181 case OMPC_untied:
3182 case OMPC_threadprivate:
3183 case OMPC_depend:
3184 case OMPC_mergeable:
3185 case OMPC_device:
3186 case OMPC_threads:
3187 case OMPC_simd:
3188 case OMPC_map:
3189 case OMPC_num_teams:
3190 case OMPC_thread_limit:
3191 case OMPC_priority:
3192 case OMPC_grainsize:
3193 case OMPC_nogroup:
3194 case OMPC_num_tasks:
3195 case OMPC_hint:
3196 case OMPC_dist_schedule:
3197 case OMPC_defaultmap:
3198 case OMPC_uniform:
3199 case OMPC_to:
3200 case OMPC_from:
3201 case OMPC_use_device_ptr:
3202 case OMPC_is_device_ptr:
3203 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3204 }
3205 }
3206
EmitOMPAtomicDirective(const OMPAtomicDirective & S)3207 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3208 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
3209 OpenMPClauseKind Kind = OMPC_unknown;
3210 for (auto *C : S.clauses()) {
3211 // Find first clause (skip seq_cst clause, if it is first).
3212 if (C->getClauseKind() != OMPC_seq_cst) {
3213 Kind = C->getClauseKind();
3214 break;
3215 }
3216 }
3217
3218 const auto *CS =
3219 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
3220 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
3221 enterFullExpression(EWC);
3222 }
3223 // Processing for statements under 'atomic capture'.
3224 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
3225 for (const auto *C : Compound->body()) {
3226 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
3227 enterFullExpression(EWC);
3228 }
3229 }
3230 }
3231
3232 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
3233 PrePostActionTy &) {
3234 CGF.EmitStopPoint(CS);
3235 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
3236 S.getV(), S.getExpr(), S.getUpdateExpr(),
3237 S.isXLHSInRHSPart(), S.getLocStart());
3238 };
3239 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3240 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
3241 }
3242
3243 std::pair<llvm::Function * /*OutlinedFn*/, llvm::Constant * /*OutlinedFnID*/>
EmitOMPTargetDirectiveOutlinedFunction(CodeGenModule & CGM,const OMPTargetDirective & S,StringRef ParentName,bool IsOffloadEntry)3244 CodeGenFunction::EmitOMPTargetDirectiveOutlinedFunction(
3245 CodeGenModule &CGM, const OMPTargetDirective &S, StringRef ParentName,
3246 bool IsOffloadEntry) {
3247 llvm::Function *OutlinedFn = nullptr;
3248 llvm::Constant *OutlinedFnID = nullptr;
3249 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3250 OMPPrivateScope PrivateScope(CGF);
3251 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3252 CGF.EmitOMPPrivateClause(S, PrivateScope);
3253 (void)PrivateScope.Privatize();
3254
3255 Action.Enter(CGF);
3256 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3257 };
3258 // Emit target region as a standalone region.
3259 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3260 S, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, CodeGen);
3261 return std::make_pair(OutlinedFn, OutlinedFnID);
3262 }
3263
EmitOMPTargetDirective(const OMPTargetDirective & S)3264 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
3265 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
3266
3267 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3268 GenerateOpenMPCapturedVars(CS, CapturedVars);
3269
3270 llvm::Function *Fn = nullptr;
3271 llvm::Constant *FnID = nullptr;
3272
3273 // Check if we have any if clause associated with the directive.
3274 const Expr *IfCond = nullptr;
3275
3276 if (auto *C = S.getSingleClause<OMPIfClause>()) {
3277 IfCond = C->getCondition();
3278 }
3279
3280 // Check if we have any device clause associated with the directive.
3281 const Expr *Device = nullptr;
3282 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
3283 Device = C->getDevice();
3284 }
3285
3286 // Check if we have an if clause whose conditional always evaluates to false
3287 // or if we do not have any targets specified. If so the target region is not
3288 // an offload entry point.
3289 bool IsOffloadEntry = true;
3290 if (IfCond) {
3291 bool Val;
3292 if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
3293 IsOffloadEntry = false;
3294 }
3295 if (CGM.getLangOpts().OMPTargetTriples.empty())
3296 IsOffloadEntry = false;
3297
3298 assert(CurFuncDecl && "No parent declaration for target region!");
3299 StringRef ParentName;
3300 // In case we have Ctors/Dtors we use the complete type variant to produce
3301 // the mangling of the device outlined kernel.
3302 if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
3303 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
3304 else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
3305 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
3306 else
3307 ParentName =
3308 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
3309
3310 std::tie(Fn, FnID) = EmitOMPTargetDirectiveOutlinedFunction(
3311 CGM, S, ParentName, IsOffloadEntry);
3312 OMPLexicalScope Scope(*this, S);
3313 CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
3314 CapturedVars);
3315 }
3316
emitCommonOMPTeamsDirective(CodeGenFunction & CGF,const OMPExecutableDirective & S,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)3317 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
3318 const OMPExecutableDirective &S,
3319 OpenMPDirectiveKind InnermostKind,
3320 const RegionCodeGenTy &CodeGen) {
3321 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3322 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
3323 emitParallelOrTeamsOutlinedFunction(S,
3324 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
3325
3326 const OMPTeamsDirective &TD = *dyn_cast<OMPTeamsDirective>(&S);
3327 const OMPNumTeamsClause *NT = TD.getSingleClause<OMPNumTeamsClause>();
3328 const OMPThreadLimitClause *TL = TD.getSingleClause<OMPThreadLimitClause>();
3329 if (NT || TL) {
3330 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
3331 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
3332
3333 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
3334 S.getLocStart());
3335 }
3336
3337 OMPLexicalScope Scope(CGF, S);
3338 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3339 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3340 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
3341 CapturedVars);
3342 }
3343
EmitOMPTeamsDirective(const OMPTeamsDirective & S)3344 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
3345 // Emit parallel region as a standalone region.
3346 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3347 OMPPrivateScope PrivateScope(CGF);
3348 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3349 CGF.EmitOMPPrivateClause(S, PrivateScope);
3350 (void)PrivateScope.Privatize();
3351 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3352 };
3353 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen);
3354 }
3355
EmitOMPCancellationPointDirective(const OMPCancellationPointDirective & S)3356 void CodeGenFunction::EmitOMPCancellationPointDirective(
3357 const OMPCancellationPointDirective &S) {
3358 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
3359 S.getCancelRegion());
3360 }
3361
EmitOMPCancelDirective(const OMPCancelDirective & S)3362 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
3363 const Expr *IfCond = nullptr;
3364 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3365 if (C->getNameModifier() == OMPD_unknown ||
3366 C->getNameModifier() == OMPD_cancel) {
3367 IfCond = C->getCondition();
3368 break;
3369 }
3370 }
3371 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
3372 S.getCancelRegion());
3373 }
3374
3375 CodeGenFunction::JumpDest
getOMPCancelDestination(OpenMPDirectiveKind Kind)3376 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
3377 if (Kind == OMPD_parallel || Kind == OMPD_task)
3378 return ReturnBlock;
3379 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
3380 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for);
3381 return BreakContinueStack.back().BreakBlock;
3382 }
3383
3384 // Generate the instructions for '#pragma omp target data' directive.
EmitOMPTargetDataDirective(const OMPTargetDataDirective & S)3385 void CodeGenFunction::EmitOMPTargetDataDirective(
3386 const OMPTargetDataDirective &S) {
3387 // The target data enclosed region is implemented just by emitting the
3388 // statement.
3389 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3390 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3391 };
3392
3393 // If we don't have target devices, don't bother emitting the data mapping
3394 // code.
3395 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
3396 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3397
3398 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_target_data,
3399 CodeGen);
3400 return;
3401 }
3402
3403 // Check if we have any if clause associated with the directive.
3404 const Expr *IfCond = nullptr;
3405 if (auto *C = S.getSingleClause<OMPIfClause>())
3406 IfCond = C->getCondition();
3407
3408 // Check if we have any device clause associated with the directive.
3409 const Expr *Device = nullptr;
3410 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3411 Device = C->getDevice();
3412
3413 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, CodeGen);
3414 }
3415
EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective & S)3416 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
3417 const OMPTargetEnterDataDirective &S) {
3418 // If we don't have target devices, don't bother emitting the data mapping
3419 // code.
3420 if (CGM.getLangOpts().OMPTargetTriples.empty())
3421 return;
3422
3423 // Check if we have any if clause associated with the directive.
3424 const Expr *IfCond = nullptr;
3425 if (auto *C = S.getSingleClause<OMPIfClause>())
3426 IfCond = C->getCondition();
3427
3428 // Check if we have any device clause associated with the directive.
3429 const Expr *Device = nullptr;
3430 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3431 Device = C->getDevice();
3432
3433 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3434 }
3435
EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective & S)3436 void CodeGenFunction::EmitOMPTargetExitDataDirective(
3437 const OMPTargetExitDataDirective &S) {
3438 // If we don't have target devices, don't bother emitting the data mapping
3439 // code.
3440 if (CGM.getLangOpts().OMPTargetTriples.empty())
3441 return;
3442
3443 // Check if we have any if clause associated with the directive.
3444 const Expr *IfCond = nullptr;
3445 if (auto *C = S.getSingleClause<OMPIfClause>())
3446 IfCond = C->getCondition();
3447
3448 // Check if we have any device clause associated with the directive.
3449 const Expr *Device = nullptr;
3450 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3451 Device = C->getDevice();
3452
3453 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3454 }
3455
EmitOMPTargetParallelDirective(const OMPTargetParallelDirective & S)3456 void CodeGenFunction::EmitOMPTargetParallelDirective(
3457 const OMPTargetParallelDirective &S) {
3458 // TODO: codegen for target parallel.
3459 }
3460
EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective & S)3461 void CodeGenFunction::EmitOMPTargetParallelForDirective(
3462 const OMPTargetParallelForDirective &S) {
3463 // TODO: codegen for target parallel for.
3464 }
3465
3466 /// Emit a helper variable and return corresponding lvalue.
mapParam(CodeGenFunction & CGF,const DeclRefExpr * Helper,const ImplicitParamDecl * PVD,CodeGenFunction::OMPPrivateScope & Privates)3467 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
3468 const ImplicitParamDecl *PVD,
3469 CodeGenFunction::OMPPrivateScope &Privates) {
3470 auto *VDecl = cast<VarDecl>(Helper->getDecl());
3471 Privates.addPrivate(
3472 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
3473 }
3474
EmitOMPTaskLoopBasedDirective(const OMPLoopDirective & S)3475 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
3476 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
3477 // Emit outlined function for task construct.
3478 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3479 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
3480 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3481 const Expr *IfCond = nullptr;
3482 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3483 if (C->getNameModifier() == OMPD_unknown ||
3484 C->getNameModifier() == OMPD_taskloop) {
3485 IfCond = C->getCondition();
3486 break;
3487 }
3488 }
3489
3490 OMPTaskDataTy Data;
3491 // Check if taskloop must be emitted without taskgroup.
3492 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
3493 // TODO: Check if we should emit tied or untied task.
3494 Data.Tied = true;
3495 // Set scheduling for taskloop
3496 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
3497 // grainsize clause
3498 Data.Schedule.setInt(/*IntVal=*/false);
3499 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
3500 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
3501 // num_tasks clause
3502 Data.Schedule.setInt(/*IntVal=*/true);
3503 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
3504 }
3505
3506 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
3507 // if (PreCond) {
3508 // for (IV in 0..LastIteration) BODY;
3509 // <Final counter/linear vars updates>;
3510 // }
3511 //
3512
3513 // Emit: if (PreCond) - begin.
3514 // If the condition constant folds and can be elided, avoid emitting the
3515 // whole loop.
3516 bool CondConstant;
3517 llvm::BasicBlock *ContBlock = nullptr;
3518 OMPLoopScope PreInitScope(CGF, S);
3519 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3520 if (!CondConstant)
3521 return;
3522 } else {
3523 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
3524 ContBlock = CGF.createBasicBlock("taskloop.if.end");
3525 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
3526 CGF.getProfileCount(&S));
3527 CGF.EmitBlock(ThenBlock);
3528 CGF.incrementProfileCounter(&S);
3529 }
3530
3531 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3532 CGF.EmitOMPSimdInit(S);
3533
3534 OMPPrivateScope LoopScope(CGF);
3535 // Emit helper vars inits.
3536 enum { LowerBound = 5, UpperBound, Stride, LastIter };
3537 auto *I = CS->getCapturedDecl()->param_begin();
3538 auto *LBP = std::next(I, LowerBound);
3539 auto *UBP = std::next(I, UpperBound);
3540 auto *STP = std::next(I, Stride);
3541 auto *LIP = std::next(I, LastIter);
3542 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
3543 LoopScope);
3544 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
3545 LoopScope);
3546 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
3547 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
3548 LoopScope);
3549 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
3550 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3551 (void)LoopScope.Privatize();
3552 // Emit the loop iteration variable.
3553 const Expr *IVExpr = S.getIterationVariable();
3554 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
3555 CGF.EmitVarDecl(*IVDecl);
3556 CGF.EmitIgnoredExpr(S.getInit());
3557
3558 // Emit the iterations count variable.
3559 // If it is not a variable, Sema decided to calculate iterations count on
3560 // each iteration (e.g., it is foldable into a constant).
3561 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3562 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3563 // Emit calculation of the iterations count.
3564 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
3565 }
3566
3567 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
3568 S.getInc(),
3569 [&S](CodeGenFunction &CGF) {
3570 CGF.EmitOMPLoopBody(S, JumpDest());
3571 CGF.EmitStopPoint(&S);
3572 },
3573 [](CodeGenFunction &) {});
3574 // Emit: if (PreCond) - end.
3575 if (ContBlock) {
3576 CGF.EmitBranch(ContBlock);
3577 CGF.EmitBlock(ContBlock, true);
3578 }
3579 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3580 if (HasLastprivateClause) {
3581 CGF.EmitOMPLastprivateClauseFinal(
3582 S, isOpenMPSimdDirective(S.getDirectiveKind()),
3583 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
3584 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
3585 (*LIP)->getType(), S.getLocStart())));
3586 }
3587 };
3588 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3589 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
3590 const OMPTaskDataTy &Data) {
3591 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
3592 OMPLoopScope PreInitScope(CGF, S);
3593 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
3594 OutlinedFn, SharedsTy,
3595 CapturedStruct, IfCond, Data);
3596 };
3597 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
3598 CodeGen);
3599 };
3600 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
3601 }
3602
EmitOMPTaskLoopDirective(const OMPTaskLoopDirective & S)3603 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
3604 EmitOMPTaskLoopBasedDirective(S);
3605 }
3606
EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective & S)3607 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
3608 const OMPTaskLoopSimdDirective &S) {
3609 EmitOMPTaskLoopBasedDirective(S);
3610 }
3611
3612 // Generate the instructions for '#pragma omp target update' directive.
EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective & S)3613 void CodeGenFunction::EmitOMPTargetUpdateDirective(
3614 const OMPTargetUpdateDirective &S) {
3615 // If we don't have target devices, don't bother emitting the data mapping
3616 // code.
3617 if (CGM.getLangOpts().OMPTargetTriples.empty())
3618 return;
3619
3620 // Check if we have any if clause associated with the directive.
3621 const Expr *IfCond = nullptr;
3622 if (auto *C = S.getSingleClause<OMPIfClause>())
3623 IfCond = C->getCondition();
3624
3625 // Check if we have any device clause associated with the directive.
3626 const Expr *Device = nullptr;
3627 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3628 Device = C->getDevice();
3629
3630 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3631 }
3632