1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
10 // GPU mapping strategy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "polly/CodeGen/PPCGCodeGeneration.h"
15 #include "polly/CodeGen/CodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/PerfMonitor.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/SCEVValidator.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/IR/IntrinsicsNVPTX.h"
29 #include "llvm/IR/LegacyPassManager.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/IRReader/IRReader.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Linker/Linker.h"
34 #include "llvm/Support/SourceMgr.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "isl/union_map.h"
40 #include <algorithm>
41
42 extern "C" {
43 #include "ppcg/cuda.h"
44 #include "ppcg/gpu.h"
45 #include "ppcg/ppcg.h"
46 }
47
48 #include "llvm/Support/Debug.h"
49
50 using namespace polly;
51 using namespace llvm;
52
53 #define DEBUG_TYPE "polly-codegen-ppcg"
54
55 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule",
56 cl::desc("Dump the computed GPU Schedule"),
57 cl::Hidden, cl::init(false), cl::ZeroOrMore,
58 cl::cat(PollyCategory));
59
60 static cl::opt<bool>
61 DumpCode("polly-acc-dump-code",
62 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden,
63 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
64
65 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
66 cl::desc("Dump the kernel LLVM-IR"),
67 cl::Hidden, cl::init(false), cl::ZeroOrMore,
68 cl::cat(PollyCategory));
69
70 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
71 cl::desc("Dump the kernel assembly code"),
72 cl::Hidden, cl::init(false), cl::ZeroOrMore,
73 cl::cat(PollyCategory));
74
75 static cl::opt<bool> FastMath("polly-acc-fastmath",
76 cl::desc("Allow unsafe math optimizations"),
77 cl::Hidden, cl::init(false), cl::ZeroOrMore,
78 cl::cat(PollyCategory));
79 static cl::opt<bool> SharedMemory("polly-acc-use-shared",
80 cl::desc("Use shared memory"), cl::Hidden,
81 cl::init(false), cl::ZeroOrMore,
82 cl::cat(PollyCategory));
83 static cl::opt<bool> PrivateMemory("polly-acc-use-private",
84 cl::desc("Use private memory"), cl::Hidden,
85 cl::init(false), cl::ZeroOrMore,
86 cl::cat(PollyCategory));
87
88 bool polly::PollyManagedMemory;
89 static cl::opt<bool, true>
90 XManagedMemory("polly-acc-codegen-managed-memory",
91 cl::desc("Generate Host kernel code assuming"
92 " that all memory has been"
93 " declared as managed memory"),
94 cl::location(PollyManagedMemory), cl::Hidden,
95 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
96
97 static cl::opt<bool>
98 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
99 cl::desc("Fail and generate a backtrace if"
100 " verifyModule fails on the GPU "
101 " kernel module."),
102 cl::Hidden, cl::init(false), cl::ZeroOrMore,
103 cl::cat(PollyCategory));
104
105 static cl::opt<std::string> CUDALibDevice(
106 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden,
107 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
108 cl::ZeroOrMore, cl::cat(PollyCategory));
109
110 static cl::opt<std::string>
111 CudaVersion("polly-acc-cuda-version",
112 cl::desc("The CUDA version to compile for"), cl::Hidden,
113 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory));
114
115 static cl::opt<int>
116 MinCompute("polly-acc-mincompute",
117 cl::desc("Minimal number of compute statements to run on GPU."),
118 cl::Hidden, cl::init(10 * 512 * 512));
119
120 extern bool polly::PerfMonitoring;
121
122 /// Return a unique name for a Scop, which is the scop region with the
123 /// function name.
getUniqueScopName(const Scop * S)124 std::string getUniqueScopName(const Scop *S) {
125 return "Scop Region: " + S->getNameStr() +
126 " | Function: " + std::string(S->getFunction().getName());
127 }
128
129 /// Used to store information PPCG wants for kills. This information is
130 /// used by live range reordering.
131 ///
132 /// @see computeLiveRangeReordering
133 /// @see GPUNodeBuilder::createPPCGScop
134 /// @see GPUNodeBuilder::createPPCGProg
135 struct MustKillsInfo {
136 /// Collection of all kill statements that will be sequenced at the end of
137 /// PPCGScop->schedule.
138 ///
139 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
140 /// which merges schedules in *arbitrary* order.
141 /// (we don't care about the order of the kills anyway).
142 isl::schedule KillsSchedule;
143 /// Map from kill statement instances to scalars that need to be
144 /// killed.
145 ///
146 /// We currently derive kill information for:
147 /// 1. phi nodes. PHI nodes are not alive outside the scop and can
148 /// consequently all be killed.
149 /// 2. Scalar arrays that are not used outside the Scop. This is
150 /// checked by `isScalarUsesContainedInScop`.
151 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
152 isl::union_map TaggedMustKills;
153
154 /// Tagged must kills stripped of the tags.
155 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] }
156 isl::union_map MustKills;
157
MustKillsInfoMustKillsInfo158 MustKillsInfo() : KillsSchedule(nullptr) {}
159 };
160
161 /// Check if SAI's uses are entirely contained within Scop S.
162 /// If a scalar is used only with a Scop, we are free to kill it, as no data
163 /// can flow in/out of the value any more.
164 /// @see computeMustKillsInfo
isScalarUsesContainedInScop(const Scop & S,const ScopArrayInfo * SAI)165 static bool isScalarUsesContainedInScop(const Scop &S,
166 const ScopArrayInfo *SAI) {
167 assert(SAI->isValueKind() && "this function only deals with scalars."
168 " Dealing with arrays required alias analysis");
169
170 const Region &R = S.getRegion();
171 for (User *U : SAI->getBasePtr()->users()) {
172 Instruction *I = dyn_cast<Instruction>(U);
173 assert(I && "invalid user of scop array info");
174 if (!R.contains(I))
175 return false;
176 }
177 return true;
178 }
179
180 /// Compute must-kills needed to enable live range reordering with PPCG.
181 ///
182 /// @params S The Scop to compute live range reordering information
183 /// @returns live range reordering information that can be used to setup
184 /// PPCG.
computeMustKillsInfo(const Scop & S)185 static MustKillsInfo computeMustKillsInfo(const Scop &S) {
186 const isl::space ParamSpace = S.getParamSpace();
187 MustKillsInfo Info;
188
189 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
190 // 1.1 phi nodes in scop.
191 // 1.2 scalars that are only used within the scop
192 SmallVector<isl::id, 4> KillMemIds;
193 for (ScopArrayInfo *SAI : S.arrays()) {
194 if (SAI->isPHIKind() ||
195 (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI)))
196 KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release()));
197 }
198
199 Info.TaggedMustKills = isl::union_map::empty(ParamSpace);
200 Info.MustKills = isl::union_map::empty(ParamSpace);
201
202 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
203 // schedule:
204 // - filter: "[control] -> { }"
205 // So, we choose to not create this to keep the output a little nicer,
206 // at the cost of some code complexity.
207 Info.KillsSchedule = nullptr;
208
209 for (isl::id &ToKillId : KillMemIds) {
210 isl::id KillStmtId = isl::id::alloc(
211 S.getIslCtx(),
212 std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr);
213
214 // NOTE: construction of tagged_must_kill:
215 // 2. We need to construct a map:
216 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
217 // To construct this, we use `isl_map_domain_product` on 2 maps`:
218 // 2a. StmtToScalar:
219 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
220 // 2b. PhantomRefToScalar:
221 // [param] -> { ref_phantom[] -> scalar_to_kill[] }
222 //
223 // Combining these with `isl_map_domain_product` gives us
224 // TaggedMustKill:
225 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
226
227 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
228 isl::map StmtToScalar = isl::map::universe(ParamSpace);
229 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId));
230 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId));
231
232 isl::id PhantomRefId = isl::id::alloc(
233 S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(),
234 nullptr);
235
236 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
237 isl::map PhantomRefToScalar = isl::map::universe(ParamSpace);
238 PhantomRefToScalar =
239 PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId);
240 PhantomRefToScalar =
241 PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId);
242
243 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
244 isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar);
245 Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill);
246
247 // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
248 Info.MustKills = Info.TaggedMustKills.domain_factor_domain();
249
250 // 3. Create the kill schedule of the form:
251 // "[param] -> { Stmt_phantom[] }"
252 // Then add this to Info.KillsSchedule.
253 isl::space KillStmtSpace = ParamSpace;
254 KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId);
255 isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace);
256
257 isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain);
258 if (Info.KillsSchedule)
259 Info.KillsSchedule = isl::manage(
260 isl_schedule_set(Info.KillsSchedule.release(), KillSchedule.copy()));
261 else
262 Info.KillsSchedule = KillSchedule;
263 }
264
265 return Info;
266 }
267
268 /// Create the ast expressions for a ScopStmt.
269 ///
270 /// This function is a callback for to generate the ast expressions for each
271 /// of the scheduled ScopStmts.
pollyBuildAstExprForStmt(void * StmtT,__isl_take isl_ast_build * Build_C,isl_multi_pw_aff * (* FunctionIndex)(__isl_take isl_multi_pw_aff * MPA,isl_id * Id,void * User),void * UserIndex,isl_ast_expr * (* FunctionExpr)(isl_ast_expr * Expr,isl_id * Id,void * User),void * UserExpr)272 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt(
273 void *StmtT, __isl_take isl_ast_build *Build_C,
274 isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA,
275 isl_id *Id, void *User),
276 void *UserIndex,
277 isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User),
278 void *UserExpr) {
279
280 ScopStmt *Stmt = (ScopStmt *)StmtT;
281
282 if (!Stmt || !Build_C)
283 return NULL;
284
285 isl::ast_build Build = isl::manage_copy(Build_C);
286 isl::ctx Ctx = Build.get_ctx();
287 isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0);
288
289 Stmt->setAstBuild(Build);
290
291 for (MemoryAccess *Acc : *Stmt) {
292 isl::map AddrFunc = Acc->getAddressFunction();
293 AddrFunc = AddrFunc.intersect_domain(Stmt->getDomain());
294
295 isl::id RefId = Acc->getId();
296 isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc);
297
298 isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA);
299 MPA = MPA.coalesce();
300 MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex));
301
302 isl::ast_expr Access = Build.access_from(MPA);
303 Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr));
304 RefToExpr = RefToExpr.set(RefId, Access);
305 }
306
307 return RefToExpr.release();
308 }
309
310 /// Given a LLVM Type, compute its size in bytes,
computeSizeInBytes(const Type * T)311 static int computeSizeInBytes(const Type *T) {
312 int bytes = T->getPrimitiveSizeInBits() / 8;
313 if (bytes == 0)
314 bytes = T->getScalarSizeInBits() / 8;
315 return bytes;
316 }
317
318 /// Generate code for a GPU specific isl AST.
319 ///
320 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
321 /// generates code for general-purpose AST nodes, with special functionality
322 /// for generating GPU specific user nodes.
323 ///
324 /// @see GPUNodeBuilder::createUser
325 class GPUNodeBuilder : public IslNodeBuilder {
326 public:
GPUNodeBuilder(PollyIRBuilder & Builder,ScopAnnotator & Annotator,const DataLayout & DL,LoopInfo & LI,ScalarEvolution & SE,DominatorTree & DT,Scop & S,BasicBlock * StartBlock,gpu_prog * Prog,GPURuntime Runtime,GPUArch Arch)327 GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator,
328 const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE,
329 DominatorTree &DT, Scop &S, BasicBlock *StartBlock,
330 gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch)
331 : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock),
332 Prog(Prog), Runtime(Runtime), Arch(Arch) {
333 getExprBuilder().setIDToSAI(&IDToSAI);
334 }
335
336 /// Create after-run-time-check initialization code.
337 void initializeAfterRTH();
338
339 /// Finalize the generated scop.
340 void finalize() override;
341
342 /// Track if the full build process was successful.
343 ///
344 /// This value is set to false, if throughout the build process an error
345 /// occurred which prevents us from generating valid GPU code.
346 bool BuildSuccessful = true;
347
348 /// The maximal number of loops surrounding a sequential kernel.
349 unsigned DeepestSequential = 0;
350
351 /// The maximal number of loops surrounding a parallel kernel.
352 unsigned DeepestParallel = 0;
353
354 /// Return the name to set for the ptx_kernel.
355 std::string getKernelFuncName(int Kernel_id);
356
357 private:
358 /// A vector of array base pointers for which a new ScopArrayInfo was created.
359 ///
360 /// This vector is used to delete the ScopArrayInfo when it is not needed any
361 /// more.
362 std::vector<Value *> LocalArrays;
363
364 /// A map from ScopArrays to their corresponding device allocations.
365 std::map<ScopArrayInfo *, Value *> DeviceAllocations;
366
367 /// The current GPU context.
368 Value *GPUContext;
369
370 /// The set of isl_ids allocated in the kernel
371 std::vector<isl_id *> KernelIds;
372
373 /// A module containing GPU code.
374 ///
375 /// This pointer is only set in case we are currently generating GPU code.
376 std::unique_ptr<Module> GPUModule;
377
378 /// The GPU program we generate code for.
379 gpu_prog *Prog;
380
381 /// The GPU Runtime implementation to use (OpenCL or CUDA).
382 GPURuntime Runtime;
383
384 /// The GPU Architecture to target.
385 GPUArch Arch;
386
387 /// Class to free isl_ids.
388 class IslIdDeleter {
389 public:
operator ()(__isl_take isl_id * Id)390 void operator()(__isl_take isl_id *Id) { isl_id_free(Id); };
391 };
392
393 /// A set containing all isl_ids allocated in a GPU kernel.
394 ///
395 /// By releasing this set all isl_ids will be freed.
396 std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs;
397
398 IslExprBuilder::IDToScopArrayInfoTy IDToSAI;
399
400 /// Create code for user-defined AST nodes.
401 ///
402 /// These AST nodes can be of type:
403 ///
404 /// - ScopStmt: A computational statement (TODO)
405 /// - Kernel: A GPU kernel call (TODO)
406 /// - Data-Transfer: A GPU <-> CPU data-transfer
407 /// - In-kernel synchronization
408 /// - In-kernel memory copy statement
409 ///
410 /// @param UserStmt The ast node to generate code for.
411 void createUser(__isl_take isl_ast_node *UserStmt) override;
412
413 void createFor(__isl_take isl_ast_node *Node) override;
414
415 enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST };
416
417 /// Create code for a data transfer statement
418 ///
419 /// @param TransferStmt The data transfer statement.
420 /// @param Direction The direction in which to transfer data.
421 void createDataTransfer(__isl_take isl_ast_node *TransferStmt,
422 enum DataDirection Direction);
423
424 /// Find llvm::Values referenced in GPU kernel.
425 ///
426 /// @param Kernel The kernel to scan for llvm::Values
427 ///
428 /// @returns A tuple, whose:
429 /// - First element contains the set of values referenced by the
430 /// kernel
431 /// - Second element contains the set of functions referenced by the
432 /// kernel. All functions in the set satisfy
433 /// `isValidFunctionInKernel`.
434 /// - Third element contains loops that have induction variables
435 /// which are used in the kernel, *and* these loops are *neither*
436 /// in the scop, nor do they immediately surroung the Scop.
437 /// See [Code generation of induction variables of loops outside
438 /// Scops]
439 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
440 isl::space>
441 getReferencesInKernel(ppcg_kernel *Kernel);
442
443 /// Compute the sizes of the execution grid for a given kernel.
444 ///
445 /// @param Kernel The kernel to compute grid sizes for.
446 ///
447 /// @returns A tuple with grid sizes for X and Y dimension
448 std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel);
449
450 /// Get the managed array pointer for sending host pointers to the device.
451 /// \note
452 /// This is to be used only with managed memory
453 Value *getManagedDeviceArray(gpu_array_info *Array, ScopArrayInfo *ArrayInfo);
454
455 /// Compute the sizes of the thread blocks for a given kernel.
456 ///
457 /// @param Kernel The kernel to compute thread block sizes for.
458 ///
459 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
460 std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel);
461
462 /// Store a specific kernel launch parameter in the array of kernel launch
463 /// parameters.
464 ///
465 /// @param Parameters The list of parameters in which to store.
466 /// @param Param The kernel launch parameter to store.
467 /// @param Index The index in the parameter list, at which to store the
468 /// parameter.
469 void insertStoreParameter(Instruction *Parameters, Instruction *Param,
470 int Index);
471
472 /// Create kernel launch parameters.
473 ///
474 /// @param Kernel The kernel to create parameters for.
475 /// @param F The kernel function that has been created.
476 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
477 ///
478 /// @returns A stack allocated array with pointers to the parameter
479 /// values that are passed to the kernel.
480 Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F,
481 SetVector<Value *> SubtreeValues);
482
483 /// Create declarations for kernel variable.
484 ///
485 /// This includes shared memory declarations.
486 ///
487 /// @param Kernel The kernel definition to create variables for.
488 /// @param FN The function into which to generate the variables.
489 void createKernelVariables(ppcg_kernel *Kernel, Function *FN);
490
491 /// Add CUDA annotations to module.
492 ///
493 /// Add a set of CUDA annotations that declares the maximal block dimensions
494 /// that will be used to execute the CUDA kernel. This allows the NVIDIA
495 /// PTX compiler to bound the number of allocated registers to ensure the
496 /// resulting kernel is known to run with up to as many block dimensions
497 /// as specified here.
498 ///
499 /// @param M The module to add the annotations to.
500 /// @param BlockDimX The size of block dimension X.
501 /// @param BlockDimY The size of block dimension Y.
502 /// @param BlockDimZ The size of block dimension Z.
503 void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY,
504 Value *BlockDimZ);
505
506 /// Create GPU kernel.
507 ///
508 /// Code generate the kernel described by @p KernelStmt.
509 ///
510 /// @param KernelStmt The ast node to generate kernel code for.
511 void createKernel(__isl_take isl_ast_node *KernelStmt);
512
513 /// Generate code that computes the size of an array.
514 ///
515 /// @param Array The array for which to compute a size.
516 Value *getArraySize(gpu_array_info *Array);
517
518 /// Generate code to compute the minimal offset at which an array is accessed.
519 ///
520 /// The offset of an array is the minimal array location accessed in a scop.
521 ///
522 /// Example:
523 ///
524 /// for (long i = 0; i < 100; i++)
525 /// A[i + 42] += ...
526 ///
527 /// getArrayOffset(A) results in 42.
528 ///
529 /// @param Array The array for which to compute the offset.
530 /// @returns An llvm::Value that contains the offset of the array.
531 Value *getArrayOffset(gpu_array_info *Array);
532
533 /// Prepare the kernel arguments for kernel code generation
534 ///
535 /// @param Kernel The kernel to generate code for.
536 /// @param FN The function created for the kernel.
537 void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN);
538
539 /// Create kernel function.
540 ///
541 /// Create a kernel function located in a newly created module that can serve
542 /// as target for device code generation. Set the Builder to point to the
543 /// start block of this newly created function.
544 ///
545 /// @param Kernel The kernel to generate code for.
546 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
547 /// @param SubtreeFunctions The set of llvm::Functions referenced by this
548 /// kernel.
549 void createKernelFunction(ppcg_kernel *Kernel,
550 SetVector<Value *> &SubtreeValues,
551 SetVector<Function *> &SubtreeFunctions);
552
553 /// Create the declaration of a kernel function.
554 ///
555 /// The kernel function takes as arguments:
556 ///
557 /// - One i8 pointer for each external array reference used in the kernel.
558 /// - Host iterators
559 /// - Parameters
560 /// - Other LLVM Value references (TODO)
561 ///
562 /// @param Kernel The kernel to generate the function declaration for.
563 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
564 ///
565 /// @returns The newly declared function.
566 Function *createKernelFunctionDecl(ppcg_kernel *Kernel,
567 SetVector<Value *> &SubtreeValues);
568
569 /// Insert intrinsic functions to obtain thread and block ids.
570 ///
571 /// @param The kernel to generate the intrinsic functions for.
572 void insertKernelIntrinsics(ppcg_kernel *Kernel);
573
574 /// Insert function calls to retrieve the SPIR group/local ids.
575 ///
576 /// @param Kernel The kernel to generate the function calls for.
577 /// @param SizeTypeIs64Bit Whether size_t of the openCl device is 64bit.
578 void insertKernelCallsSPIR(ppcg_kernel *Kernel, bool SizeTypeIs64bit);
579
580 /// Setup the creation of functions referenced by the GPU kernel.
581 ///
582 /// 1. Create new function declarations in GPUModule which are the same as
583 /// SubtreeFunctions.
584 ///
585 /// 2. Populate IslNodeBuilder::ValueMap with mappings from
586 /// old functions (that come from the original module) to new functions
587 /// (that are created within GPUModule). That way, we generate references
588 /// to the correct function (in GPUModule) in BlockGenerator.
589 ///
590 /// @see IslNodeBuilder::ValueMap
591 /// @see BlockGenerator::GlobalMap
592 /// @see BlockGenerator::getNewValue
593 /// @see GPUNodeBuilder::getReferencesInKernel.
594 ///
595 /// @param SubtreeFunctions The set of llvm::Functions referenced by
596 /// this kernel.
597 void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions);
598
599 /// Create a global-to-shared or shared-to-global copy statement.
600 ///
601 /// @param CopyStmt The copy statement to generate code for
602 void createKernelCopy(ppcg_kernel_stmt *CopyStmt);
603
604 /// Create code for a ScopStmt called in @p Expr.
605 ///
606 /// @param Expr The expression containing the call.
607 /// @param KernelStmt The kernel statement referenced in the call.
608 void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt);
609
610 /// Create an in-kernel synchronization call.
611 void createKernelSync();
612
613 /// Create a PTX assembly string for the current GPU kernel.
614 ///
615 /// @returns A string containing the corresponding PTX assembly code.
616 std::string createKernelASM();
617
618 /// Remove references from the dominator tree to the kernel function @p F.
619 ///
620 /// @param F The function to remove references to.
621 void clearDominators(Function *F);
622
623 /// Remove references from scalar evolution to the kernel function @p F.
624 ///
625 /// @param F The function to remove references to.
626 void clearScalarEvolution(Function *F);
627
628 /// Remove references from loop info to the kernel function @p F.
629 ///
630 /// @param F The function to remove references to.
631 void clearLoops(Function *F);
632
633 /// Check if the scop requires to be linked with CUDA's libdevice.
634 bool requiresCUDALibDevice();
635
636 /// Link with the NVIDIA libdevice library (if needed and available).
637 void addCUDALibDevice();
638
639 /// Finalize the generation of the kernel function.
640 ///
641 /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
642 /// dump its IR to stderr.
643 ///
644 /// @returns The Assembly string of the kernel.
645 std::string finalizeKernelFunction();
646
647 /// Finalize the generation of the kernel arguments.
648 ///
649 /// This function ensures that not-read-only scalars used in a kernel are
650 /// stored back to the global memory location they are backed with before
651 /// the kernel terminates.
652 ///
653 /// @params Kernel The kernel to finalize kernel arguments for.
654 void finalizeKernelArguments(ppcg_kernel *Kernel);
655
656 /// Create code that allocates memory to store arrays on device.
657 void allocateDeviceArrays();
658
659 /// Create code to prepare the managed device pointers.
660 void prepareManagedDeviceArrays();
661
662 /// Free all allocated device arrays.
663 void freeDeviceArrays();
664
665 /// Create a call to initialize the GPU context.
666 ///
667 /// @returns A pointer to the newly initialized context.
668 Value *createCallInitContext();
669
670 /// Create a call to get the device pointer for a kernel allocation.
671 ///
672 /// @param Allocation The Polly GPU allocation
673 ///
674 /// @returns The device parameter corresponding to this allocation.
675 Value *createCallGetDevicePtr(Value *Allocation);
676
677 /// Create a call to free the GPU context.
678 ///
679 /// @param Context A pointer to an initialized GPU context.
680 void createCallFreeContext(Value *Context);
681
682 /// Create a call to allocate memory on the device.
683 ///
684 /// @param Size The size of memory to allocate
685 ///
686 /// @returns A pointer that identifies this allocation.
687 Value *createCallAllocateMemoryForDevice(Value *Size);
688
689 /// Create a call to free a device array.
690 ///
691 /// @param Array The device array to free.
692 void createCallFreeDeviceMemory(Value *Array);
693
694 /// Create a call to copy data from host to device.
695 ///
696 /// @param HostPtr A pointer to the host data that should be copied.
697 /// @param DevicePtr A device pointer specifying the location to copy to.
698 void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr,
699 Value *Size);
700
701 /// Create a call to copy data from device to host.
702 ///
703 /// @param DevicePtr A pointer to the device data that should be copied.
704 /// @param HostPtr A host pointer specifying the location to copy to.
705 void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr,
706 Value *Size);
707
708 /// Create a call to synchronize Host & Device.
709 /// \note
710 /// This is to be used only with managed memory.
711 void createCallSynchronizeDevice();
712
713 /// Create a call to get a kernel from an assembly string.
714 ///
715 /// @param Buffer The string describing the kernel.
716 /// @param Entry The name of the kernel function to call.
717 ///
718 /// @returns A pointer to a kernel object
719 Value *createCallGetKernel(Value *Buffer, Value *Entry);
720
721 /// Create a call to free a GPU kernel.
722 ///
723 /// @param GPUKernel THe kernel to free.
724 void createCallFreeKernel(Value *GPUKernel);
725
726 /// Create a call to launch a GPU kernel.
727 ///
728 /// @param GPUKernel The kernel to launch.
729 /// @param GridDimX The size of the first grid dimension.
730 /// @param GridDimY The size of the second grid dimension.
731 /// @param GridBlockX The size of the first block dimension.
732 /// @param GridBlockY The size of the second block dimension.
733 /// @param GridBlockZ The size of the third block dimension.
734 /// @param Parameters A pointer to an array that contains itself pointers to
735 /// the parameter values passed for each kernel argument.
736 void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
737 Value *GridDimY, Value *BlockDimX,
738 Value *BlockDimY, Value *BlockDimZ,
739 Value *Parameters);
740 };
741
getKernelFuncName(int Kernel_id)742 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) {
743 return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" +
744 std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id);
745 }
746
initializeAfterRTH()747 void GPUNodeBuilder::initializeAfterRTH() {
748 BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(),
749 &*Builder.GetInsertPoint(), &DT, &LI);
750 NewBB->setName("polly.acc.initialize");
751 Builder.SetInsertPoint(&NewBB->front());
752
753 GPUContext = createCallInitContext();
754
755 if (!PollyManagedMemory)
756 allocateDeviceArrays();
757 else
758 prepareManagedDeviceArrays();
759 }
760
finalize()761 void GPUNodeBuilder::finalize() {
762 if (!PollyManagedMemory)
763 freeDeviceArrays();
764
765 createCallFreeContext(GPUContext);
766 IslNodeBuilder::finalize();
767 }
768
allocateDeviceArrays()769 void GPUNodeBuilder::allocateDeviceArrays() {
770 assert(!PollyManagedMemory &&
771 "Managed memory will directly send host pointers "
772 "to the kernel. There is no need for device arrays");
773 isl_ast_build *Build = isl_ast_build_from_context(S.getContext().release());
774
775 for (int i = 0; i < Prog->n_array; ++i) {
776 gpu_array_info *Array = &Prog->array[i];
777 auto *ScopArray = (ScopArrayInfo *)Array->user;
778 std::string DevArrayName("p_dev_array_");
779 DevArrayName.append(Array->name);
780
781 Value *ArraySize = getArraySize(Array);
782 Value *Offset = getArrayOffset(Array);
783 if (Offset)
784 ArraySize = Builder.CreateSub(
785 ArraySize,
786 Builder.CreateMul(Offset,
787 Builder.getInt64(ScopArray->getElemSizeInBytes())));
788 const SCEV *SizeSCEV = SE.getSCEV(ArraySize);
789 // It makes no sense to have an array of size 0. The CUDA API will
790 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
791 // choose to be defensive and catch this at the compile phase. It is
792 // most likely that we are doing something wrong with size computation.
793 if (SizeSCEV->isZero()) {
794 errs() << getUniqueScopName(&S)
795 << " has computed array size 0: " << *ArraySize
796 << " | for array: " << *(ScopArray->getBasePtr())
797 << ". This is illegal, exiting.\n";
798 report_fatal_error("array size was computed to be 0");
799 }
800
801 Value *DevArray = createCallAllocateMemoryForDevice(ArraySize);
802 DevArray->setName(DevArrayName);
803 DeviceAllocations[ScopArray] = DevArray;
804 }
805
806 isl_ast_build_free(Build);
807 }
808
prepareManagedDeviceArrays()809 void GPUNodeBuilder::prepareManagedDeviceArrays() {
810 assert(PollyManagedMemory &&
811 "Device array most only be prepared in managed-memory mode");
812 for (int i = 0; i < Prog->n_array; ++i) {
813 gpu_array_info *Array = &Prog->array[i];
814 ScopArrayInfo *ScopArray = (ScopArrayInfo *)Array->user;
815 Value *HostPtr;
816
817 if (gpu_array_is_scalar(Array))
818 HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
819 else
820 HostPtr = ScopArray->getBasePtr();
821 HostPtr = getLatestValue(HostPtr);
822
823 Value *Offset = getArrayOffset(Array);
824 if (Offset) {
825 HostPtr = Builder.CreatePointerCast(
826 HostPtr, ScopArray->getElementType()->getPointerTo());
827 HostPtr = Builder.CreateGEP(HostPtr, Offset);
828 }
829
830 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
831 DeviceAllocations[ScopArray] = HostPtr;
832 }
833 }
834
addCUDAAnnotations(Module * M,Value * BlockDimX,Value * BlockDimY,Value * BlockDimZ)835 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX,
836 Value *BlockDimY, Value *BlockDimZ) {
837 auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations");
838
839 for (auto &F : *M) {
840 if (F.getCallingConv() != CallingConv::PTX_Kernel)
841 continue;
842
843 Value *V[] = {BlockDimX, BlockDimY, BlockDimZ};
844
845 Metadata *Elements[] = {
846 ValueAsMetadata::get(&F), MDString::get(M->getContext(), "maxntidx"),
847 ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"),
848 ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"),
849 ValueAsMetadata::get(V[2]),
850 };
851 MDNode *Node = MDNode::get(M->getContext(), Elements);
852 AnnotationNode->addOperand(Node);
853 }
854 }
855
freeDeviceArrays()856 void GPUNodeBuilder::freeDeviceArrays() {
857 assert(!PollyManagedMemory && "Managed memory does not use device arrays");
858 for (auto &Array : DeviceAllocations)
859 createCallFreeDeviceMemory(Array.second);
860 }
861
createCallGetKernel(Value * Buffer,Value * Entry)862 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) {
863 const char *Name = "polly_getKernel";
864 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
865 Function *F = M->getFunction(Name);
866
867 // If F is not available, declare it.
868 if (!F) {
869 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
870 std::vector<Type *> Args;
871 Args.push_back(Builder.getInt8PtrTy());
872 Args.push_back(Builder.getInt8PtrTy());
873 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
874 F = Function::Create(Ty, Linkage, Name, M);
875 }
876
877 return Builder.CreateCall(F, {Buffer, Entry});
878 }
879
createCallGetDevicePtr(Value * Allocation)880 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) {
881 const char *Name = "polly_getDevicePtr";
882 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
883 Function *F = M->getFunction(Name);
884
885 // If F is not available, declare it.
886 if (!F) {
887 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
888 std::vector<Type *> Args;
889 Args.push_back(Builder.getInt8PtrTy());
890 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
891 F = Function::Create(Ty, Linkage, Name, M);
892 }
893
894 return Builder.CreateCall(F, {Allocation});
895 }
896
createCallLaunchKernel(Value * GPUKernel,Value * GridDimX,Value * GridDimY,Value * BlockDimX,Value * BlockDimY,Value * BlockDimZ,Value * Parameters)897 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
898 Value *GridDimY, Value *BlockDimX,
899 Value *BlockDimY, Value *BlockDimZ,
900 Value *Parameters) {
901 const char *Name = "polly_launchKernel";
902 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
903 Function *F = M->getFunction(Name);
904
905 // If F is not available, declare it.
906 if (!F) {
907 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
908 std::vector<Type *> Args;
909 Args.push_back(Builder.getInt8PtrTy());
910 Args.push_back(Builder.getInt32Ty());
911 Args.push_back(Builder.getInt32Ty());
912 Args.push_back(Builder.getInt32Ty());
913 Args.push_back(Builder.getInt32Ty());
914 Args.push_back(Builder.getInt32Ty());
915 Args.push_back(Builder.getInt8PtrTy());
916 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
917 F = Function::Create(Ty, Linkage, Name, M);
918 }
919
920 Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
921 BlockDimZ, Parameters});
922 }
923
createCallFreeKernel(Value * GPUKernel)924 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) {
925 const char *Name = "polly_freeKernel";
926 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
927 Function *F = M->getFunction(Name);
928
929 // If F is not available, declare it.
930 if (!F) {
931 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
932 std::vector<Type *> Args;
933 Args.push_back(Builder.getInt8PtrTy());
934 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
935 F = Function::Create(Ty, Linkage, Name, M);
936 }
937
938 Builder.CreateCall(F, {GPUKernel});
939 }
940
createCallFreeDeviceMemory(Value * Array)941 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) {
942 assert(!PollyManagedMemory &&
943 "Managed memory does not allocate or free memory "
944 "for device");
945 const char *Name = "polly_freeDeviceMemory";
946 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
947 Function *F = M->getFunction(Name);
948
949 // If F is not available, declare it.
950 if (!F) {
951 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
952 std::vector<Type *> Args;
953 Args.push_back(Builder.getInt8PtrTy());
954 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
955 F = Function::Create(Ty, Linkage, Name, M);
956 }
957
958 Builder.CreateCall(F, {Array});
959 }
960
createCallAllocateMemoryForDevice(Value * Size)961 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) {
962 assert(!PollyManagedMemory &&
963 "Managed memory does not allocate or free memory "
964 "for device");
965 const char *Name = "polly_allocateMemoryForDevice";
966 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
967 Function *F = M->getFunction(Name);
968
969 // If F is not available, declare it.
970 if (!F) {
971 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
972 std::vector<Type *> Args;
973 Args.push_back(Builder.getInt64Ty());
974 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
975 F = Function::Create(Ty, Linkage, Name, M);
976 }
977
978 return Builder.CreateCall(F, {Size});
979 }
980
createCallCopyFromHostToDevice(Value * HostData,Value * DeviceData,Value * Size)981 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData,
982 Value *DeviceData,
983 Value *Size) {
984 assert(!PollyManagedMemory &&
985 "Managed memory does not transfer memory between "
986 "device and host");
987 const char *Name = "polly_copyFromHostToDevice";
988 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
989 Function *F = M->getFunction(Name);
990
991 // If F is not available, declare it.
992 if (!F) {
993 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
994 std::vector<Type *> Args;
995 Args.push_back(Builder.getInt8PtrTy());
996 Args.push_back(Builder.getInt8PtrTy());
997 Args.push_back(Builder.getInt64Ty());
998 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
999 F = Function::Create(Ty, Linkage, Name, M);
1000 }
1001
1002 Builder.CreateCall(F, {HostData, DeviceData, Size});
1003 }
1004
createCallCopyFromDeviceToHost(Value * DeviceData,Value * HostData,Value * Size)1005 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData,
1006 Value *HostData,
1007 Value *Size) {
1008 assert(!PollyManagedMemory &&
1009 "Managed memory does not transfer memory between "
1010 "device and host");
1011 const char *Name = "polly_copyFromDeviceToHost";
1012 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1013 Function *F = M->getFunction(Name);
1014
1015 // If F is not available, declare it.
1016 if (!F) {
1017 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1018 std::vector<Type *> Args;
1019 Args.push_back(Builder.getInt8PtrTy());
1020 Args.push_back(Builder.getInt8PtrTy());
1021 Args.push_back(Builder.getInt64Ty());
1022 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1023 F = Function::Create(Ty, Linkage, Name, M);
1024 }
1025
1026 Builder.CreateCall(F, {DeviceData, HostData, Size});
1027 }
1028
createCallSynchronizeDevice()1029 void GPUNodeBuilder::createCallSynchronizeDevice() {
1030 assert(PollyManagedMemory && "explicit synchronization is only necessary for "
1031 "managed memory");
1032 const char *Name = "polly_synchronizeDevice";
1033 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1034 Function *F = M->getFunction(Name);
1035
1036 // If F is not available, declare it.
1037 if (!F) {
1038 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1039 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false);
1040 F = Function::Create(Ty, Linkage, Name, M);
1041 }
1042
1043 Builder.CreateCall(F);
1044 }
1045
createCallInitContext()1046 Value *GPUNodeBuilder::createCallInitContext() {
1047 const char *Name;
1048
1049 switch (Runtime) {
1050 case GPURuntime::CUDA:
1051 Name = "polly_initContextCUDA";
1052 break;
1053 case GPURuntime::OpenCL:
1054 Name = "polly_initContextCL";
1055 break;
1056 }
1057
1058 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1059 Function *F = M->getFunction(Name);
1060
1061 // If F is not available, declare it.
1062 if (!F) {
1063 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1064 std::vector<Type *> Args;
1065 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
1066 F = Function::Create(Ty, Linkage, Name, M);
1067 }
1068
1069 return Builder.CreateCall(F, {});
1070 }
1071
createCallFreeContext(Value * Context)1072 void GPUNodeBuilder::createCallFreeContext(Value *Context) {
1073 const char *Name = "polly_freeContext";
1074 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1075 Function *F = M->getFunction(Name);
1076
1077 // If F is not available, declare it.
1078 if (!F) {
1079 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1080 std::vector<Type *> Args;
1081 Args.push_back(Builder.getInt8PtrTy());
1082 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1083 F = Function::Create(Ty, Linkage, Name, M);
1084 }
1085
1086 Builder.CreateCall(F, {Context});
1087 }
1088
1089 /// Check if one string is a prefix of another.
1090 ///
1091 /// @param String The string in which to look for the prefix.
1092 /// @param Prefix The prefix to look for.
isPrefix(std::string String,std::string Prefix)1093 static bool isPrefix(std::string String, std::string Prefix) {
1094 return String.find(Prefix) == 0;
1095 }
1096
getArraySize(gpu_array_info * Array)1097 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) {
1098 isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1099 Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size);
1100
1101 if (!gpu_array_is_scalar(Array)) {
1102 isl::multi_pw_aff ArrayBound = isl::manage_copy(Array->bound);
1103
1104 isl::pw_aff OffsetDimZero = ArrayBound.get_pw_aff(0);
1105 isl::ast_expr Res = Build.expr_from(OffsetDimZero);
1106
1107 for (unsigned int i = 1; i < Array->n_index; i++) {
1108 isl::pw_aff Bound_I = ArrayBound.get_pw_aff(i);
1109 isl::ast_expr Expr = Build.expr_from(Bound_I);
1110 Res = Res.mul(Expr);
1111 }
1112
1113 Value *NumElements = ExprBuilder.create(Res.release());
1114 if (NumElements->getType() != ArraySize->getType())
1115 NumElements = Builder.CreateSExt(NumElements, ArraySize->getType());
1116 ArraySize = Builder.CreateMul(ArraySize, NumElements);
1117 }
1118 return ArraySize;
1119 }
1120
getArrayOffset(gpu_array_info * Array)1121 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) {
1122 if (gpu_array_is_scalar(Array))
1123 return nullptr;
1124
1125 isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1126
1127 isl::set Min = isl::manage_copy(Array->extent).lexmin();
1128
1129 isl::set ZeroSet = isl::set::universe(Min.get_space());
1130
1131 for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++)
1132 ZeroSet = ZeroSet.fix_si(isl::dim::set, i, 0);
1133
1134 if (Min.is_subset(ZeroSet)) {
1135 return nullptr;
1136 }
1137
1138 isl::ast_expr Result = isl::ast_expr::from_val(isl::val(Min.get_ctx(), 0));
1139
1140 for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++) {
1141 if (i > 0) {
1142 isl::pw_aff Bound_I =
1143 isl::manage(isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1));
1144 isl::ast_expr BExpr = Build.expr_from(Bound_I);
1145 Result = Result.mul(BExpr);
1146 }
1147 isl::pw_aff DimMin = Min.dim_min(i);
1148 isl::ast_expr MExpr = Build.expr_from(DimMin);
1149 Result = Result.add(MExpr);
1150 }
1151
1152 return ExprBuilder.create(Result.release());
1153 }
1154
getManagedDeviceArray(gpu_array_info * Array,ScopArrayInfo * ArrayInfo)1155 Value *GPUNodeBuilder::getManagedDeviceArray(gpu_array_info *Array,
1156 ScopArrayInfo *ArrayInfo) {
1157 assert(PollyManagedMemory && "Only used when you wish to get a host "
1158 "pointer for sending data to the kernel, "
1159 "with managed memory");
1160 std::map<ScopArrayInfo *, Value *>::iterator it;
1161 it = DeviceAllocations.find(ArrayInfo);
1162 assert(it != DeviceAllocations.end() &&
1163 "Device array expected to be available");
1164 return it->second;
1165 }
1166
createDataTransfer(__isl_take isl_ast_node * TransferStmt,enum DataDirection Direction)1167 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt,
1168 enum DataDirection Direction) {
1169 assert(!PollyManagedMemory && "Managed memory needs no data transfers");
1170 isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt);
1171 isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0);
1172 isl_id *Id = isl_ast_expr_get_id(Arg);
1173 auto Array = (gpu_array_info *)isl_id_get_user(Id);
1174 auto ScopArray = (ScopArrayInfo *)(Array->user);
1175
1176 Value *Size = getArraySize(Array);
1177 Value *Offset = getArrayOffset(Array);
1178 Value *DevPtr = DeviceAllocations[ScopArray];
1179
1180 Value *HostPtr;
1181
1182 if (gpu_array_is_scalar(Array))
1183 HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
1184 else
1185 HostPtr = ScopArray->getBasePtr();
1186 HostPtr = getLatestValue(HostPtr);
1187
1188 if (Offset) {
1189 HostPtr = Builder.CreatePointerCast(
1190 HostPtr, ScopArray->getElementType()->getPointerTo());
1191 HostPtr = Builder.CreateGEP(HostPtr, Offset);
1192 }
1193
1194 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
1195
1196 if (Offset) {
1197 Size = Builder.CreateSub(
1198 Size, Builder.CreateMul(
1199 Offset, Builder.getInt64(ScopArray->getElemSizeInBytes())));
1200 }
1201
1202 if (Direction == HOST_TO_DEVICE)
1203 createCallCopyFromHostToDevice(HostPtr, DevPtr, Size);
1204 else
1205 createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size);
1206
1207 isl_id_free(Id);
1208 isl_ast_expr_free(Arg);
1209 isl_ast_expr_free(Expr);
1210 isl_ast_node_free(TransferStmt);
1211 }
1212
createUser(__isl_take isl_ast_node * UserStmt)1213 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) {
1214 isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt);
1215 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1216 isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1217 isl_id_free(Id);
1218 isl_ast_expr_free(StmtExpr);
1219
1220 const char *Str = isl_id_get_name(Id);
1221 if (!strcmp(Str, "kernel")) {
1222 createKernel(UserStmt);
1223 if (PollyManagedMemory)
1224 createCallSynchronizeDevice();
1225 isl_ast_expr_free(Expr);
1226 return;
1227 }
1228 if (!strcmp(Str, "init_device")) {
1229 initializeAfterRTH();
1230 isl_ast_node_free(UserStmt);
1231 isl_ast_expr_free(Expr);
1232 return;
1233 }
1234 if (!strcmp(Str, "clear_device")) {
1235 finalize();
1236 isl_ast_node_free(UserStmt);
1237 isl_ast_expr_free(Expr);
1238 return;
1239 }
1240 if (isPrefix(Str, "to_device")) {
1241 if (!PollyManagedMemory)
1242 createDataTransfer(UserStmt, HOST_TO_DEVICE);
1243 else
1244 isl_ast_node_free(UserStmt);
1245
1246 isl_ast_expr_free(Expr);
1247 return;
1248 }
1249
1250 if (isPrefix(Str, "from_device")) {
1251 if (!PollyManagedMemory) {
1252 createDataTransfer(UserStmt, DEVICE_TO_HOST);
1253 } else {
1254 isl_ast_node_free(UserStmt);
1255 }
1256 isl_ast_expr_free(Expr);
1257 return;
1258 }
1259
1260 isl_id *Anno = isl_ast_node_get_annotation(UserStmt);
1261 struct ppcg_kernel_stmt *KernelStmt =
1262 (struct ppcg_kernel_stmt *)isl_id_get_user(Anno);
1263 isl_id_free(Anno);
1264
1265 switch (KernelStmt->type) {
1266 case ppcg_kernel_domain:
1267 createScopStmt(Expr, KernelStmt);
1268 isl_ast_node_free(UserStmt);
1269 return;
1270 case ppcg_kernel_copy:
1271 createKernelCopy(KernelStmt);
1272 isl_ast_expr_free(Expr);
1273 isl_ast_node_free(UserStmt);
1274 return;
1275 case ppcg_kernel_sync:
1276 createKernelSync();
1277 isl_ast_expr_free(Expr);
1278 isl_ast_node_free(UserStmt);
1279 return;
1280 }
1281
1282 isl_ast_expr_free(Expr);
1283 isl_ast_node_free(UserStmt);
1284 }
1285
createFor(__isl_take isl_ast_node * Node)1286 void GPUNodeBuilder::createFor(__isl_take isl_ast_node *Node) {
1287 createForSequential(isl::manage(Node), false);
1288 }
1289
createKernelCopy(ppcg_kernel_stmt * KernelStmt)1290 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) {
1291 isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index);
1292 LocalIndex = isl_ast_expr_address_of(LocalIndex);
1293 Value *LocalAddr = ExprBuilder.create(LocalIndex);
1294 isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index);
1295 Index = isl_ast_expr_address_of(Index);
1296 Value *GlobalAddr = ExprBuilder.create(Index);
1297
1298 if (KernelStmt->u.c.read) {
1299 LoadInst *Load = Builder.CreateLoad(GlobalAddr, "shared.read");
1300 Builder.CreateStore(Load, LocalAddr);
1301 } else {
1302 LoadInst *Load = Builder.CreateLoad(LocalAddr, "shared.write");
1303 Builder.CreateStore(Load, GlobalAddr);
1304 }
1305 }
1306
createScopStmt(isl_ast_expr * Expr,ppcg_kernel_stmt * KernelStmt)1307 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr,
1308 ppcg_kernel_stmt *KernelStmt) {
1309 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1310 isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr;
1311
1312 LoopToScevMapT LTS;
1313 LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end());
1314
1315 createSubstitutions(Expr, Stmt, LTS);
1316
1317 if (Stmt->isBlockStmt())
1318 BlockGen.copyStmt(*Stmt, LTS, Indexes);
1319 else
1320 RegionGen.copyStmt(*Stmt, LTS, Indexes);
1321 }
1322
createKernelSync()1323 void GPUNodeBuilder::createKernelSync() {
1324 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1325 const char *SpirName = "__gen_ocl_barrier_global";
1326
1327 Function *Sync;
1328
1329 switch (Arch) {
1330 case GPUArch::SPIR64:
1331 case GPUArch::SPIR32:
1332 Sync = M->getFunction(SpirName);
1333
1334 // If Sync is not available, declare it.
1335 if (!Sync) {
1336 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1337 std::vector<Type *> Args;
1338 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1339 Sync = Function::Create(Ty, Linkage, SpirName, M);
1340 Sync->setCallingConv(CallingConv::SPIR_FUNC);
1341 }
1342 break;
1343 case GPUArch::NVPTX64:
1344 Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0);
1345 break;
1346 }
1347
1348 Builder.CreateCall(Sync, {});
1349 }
1350
1351 /// Collect llvm::Values referenced from @p Node
1352 ///
1353 /// This function only applies to isl_ast_nodes that are user_nodes referring
1354 /// to a ScopStmt. All other node types are ignore.
1355 ///
1356 /// @param Node The node to collect references for.
1357 /// @param User A user pointer used as storage for the data that is collected.
1358 ///
1359 /// @returns isl_bool_true if data could be collected successfully.
collectReferencesInGPUStmt(__isl_keep isl_ast_node * Node,void * User)1360 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) {
1361 if (isl_ast_node_get_type(Node) != isl_ast_node_user)
1362 return isl_bool_true;
1363
1364 isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node);
1365 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1366 isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1367 const char *Str = isl_id_get_name(Id);
1368 isl_id_free(Id);
1369 isl_ast_expr_free(StmtExpr);
1370 isl_ast_expr_free(Expr);
1371
1372 if (!isPrefix(Str, "Stmt"))
1373 return isl_bool_true;
1374
1375 Id = isl_ast_node_get_annotation(Node);
1376 auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id);
1377 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1378 isl_id_free(Id);
1379
1380 addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */);
1381
1382 return isl_bool_true;
1383 }
1384
1385 /// A list of functions that are available in NVIDIA's libdevice.
1386 const std::set<std::string> CUDALibDeviceFunctions = {
1387 "exp", "expf", "expl", "cos", "cosf", "sqrt", "sqrtf",
1388 "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"};
1389
1390 // A map from intrinsics to their corresponding libdevice functions.
1391 const std::map<std::string, std::string> IntrinsicToLibdeviceFunc = {
1392 {"llvm.exp.f64", "exp"},
1393 {"llvm.exp.f32", "expf"},
1394 {"llvm.powi.f64", "powi"},
1395 {"llvm.powi.f32", "powif"}};
1396
1397 /// Return the corresponding CUDA libdevice function name @p Name.
1398 /// Note that this function will try to convert instrinsics in the list
1399 /// IntrinsicToLibdeviceFunc into libdevice functions.
1400 /// This is because some intrinsics such as `exp`
1401 /// are not supported by the NVPTX backend.
1402 /// If this restriction of the backend is lifted, we should refactor our code
1403 /// so that we use intrinsics whenever possible.
1404 ///
1405 /// Return "" if we are not compiling for CUDA.
getCUDALibDeviceFuntion(StringRef NameRef)1406 std::string getCUDALibDeviceFuntion(StringRef NameRef) {
1407 std::string Name = NameRef.str();
1408 auto It = IntrinsicToLibdeviceFunc.find(Name);
1409 if (It != IntrinsicToLibdeviceFunc.end())
1410 return getCUDALibDeviceFuntion(It->second);
1411
1412 if (CUDALibDeviceFunctions.count(Name))
1413 return ("__nv_" + Name);
1414
1415 return "";
1416 }
1417
1418 /// Check if F is a function that we can code-generate in a GPU kernel.
isValidFunctionInKernel(llvm::Function * F,bool AllowLibDevice)1419 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) {
1420 assert(F && "F is an invalid pointer");
1421 // We string compare against the name of the function to allow
1422 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1423 // "llvm.copysign".
1424 const StringRef Name = F->getName();
1425
1426 if (AllowLibDevice && getCUDALibDeviceFuntion(Name).length() > 0)
1427 return true;
1428
1429 return F->isIntrinsic() &&
1430 (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") ||
1431 Name.startswith("llvm.copysign"));
1432 }
1433
1434 /// Do not take `Function` as a subtree value.
1435 ///
1436 /// We try to take the reference of all subtree values and pass them along
1437 /// to the kernel from the host. Taking an address of any function and
1438 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1439 /// `Function`s.
isValidSubtreeValue(llvm::Value * V)1440 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); }
1441
1442 /// Return `Function`s from `RawSubtreeValues`.
1443 static SetVector<Function *>
getFunctionsFromRawSubtreeValues(SetVector<Value * > RawSubtreeValues,bool AllowCUDALibDevice)1444 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues,
1445 bool AllowCUDALibDevice) {
1446 SetVector<Function *> SubtreeFunctions;
1447 for (Value *It : RawSubtreeValues) {
1448 Function *F = dyn_cast<Function>(It);
1449 if (F) {
1450 assert(isValidFunctionInKernel(F, AllowCUDALibDevice) &&
1451 "Code should have bailed out by "
1452 "this point if an invalid function "
1453 "were present in a kernel.");
1454 SubtreeFunctions.insert(F);
1455 }
1456 }
1457 return SubtreeFunctions;
1458 }
1459
1460 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
1461 isl::space>
getReferencesInKernel(ppcg_kernel * Kernel)1462 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) {
1463 SetVector<Value *> SubtreeValues;
1464 SetVector<const SCEV *> SCEVs;
1465 SetVector<const Loop *> Loops;
1466 isl::space ParamSpace = isl::space(S.getIslCtx(), 0, 0).params();
1467 SubtreeReferences References = {
1468 LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator(),
1469 &ParamSpace};
1470
1471 for (const auto &I : IDToValue)
1472 SubtreeValues.insert(I.second);
1473
1474 // NOTE: this is populated in IslNodeBuilder::addParameters
1475 // See [Code generation of induction variables of loops outside Scops].
1476 for (const auto &I : OutsideLoopIterations)
1477 SubtreeValues.insert(cast<SCEVUnknown>(I.second)->getValue());
1478
1479 isl_ast_node_foreach_descendant_top_down(
1480 Kernel->tree, collectReferencesInGPUStmt, &References);
1481
1482 for (const SCEV *Expr : SCEVs) {
1483 findValues(Expr, SE, SubtreeValues);
1484 findLoops(Expr, Loops);
1485 }
1486
1487 Loops.remove_if([this](const Loop *L) {
1488 return S.contains(L) || L->contains(S.getEntry());
1489 });
1490
1491 for (auto &SAI : S.arrays())
1492 SubtreeValues.remove(SAI->getBasePtr());
1493
1494 isl_space *Space = S.getParamSpace().release();
1495 for (long i = 0, n = isl_space_dim(Space, isl_dim_param); i < n; i++) {
1496 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i);
1497 assert(IDToValue.count(Id));
1498 Value *Val = IDToValue[Id];
1499 SubtreeValues.remove(Val);
1500 isl_id_free(Id);
1501 }
1502 isl_space_free(Space);
1503
1504 for (long i = 0, n = isl_space_dim(Kernel->space, isl_dim_set); i < n; i++) {
1505 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1506 assert(IDToValue.count(Id));
1507 Value *Val = IDToValue[Id];
1508 SubtreeValues.remove(Val);
1509 isl_id_free(Id);
1510 }
1511
1512 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1513 // SubtreeValues. This is important, because we should not lose any
1514 // SubtreeValues in the process of constructing the
1515 // "ValidSubtree{Values, Functions} sets. Nor should the set
1516 // ValidSubtree{Values, Functions} have any common element.
1517 auto ValidSubtreeValuesIt =
1518 make_filter_range(SubtreeValues, isValidSubtreeValue);
1519 SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(),
1520 ValidSubtreeValuesIt.end());
1521
1522 bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64;
1523
1524 SetVector<Function *> ValidSubtreeFunctions(
1525 getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice));
1526
1527 // @see IslNodeBuilder::getReferencesInSubtree
1528 SetVector<Value *> ReplacedValues;
1529 for (Value *V : ValidSubtreeValues) {
1530 auto It = ValueMap.find(V);
1531 if (It == ValueMap.end())
1532 ReplacedValues.insert(V);
1533 else
1534 ReplacedValues.insert(It->second);
1535 }
1536 return std::make_tuple(ReplacedValues, ValidSubtreeFunctions, Loops,
1537 ParamSpace);
1538 }
1539
clearDominators(Function * F)1540 void GPUNodeBuilder::clearDominators(Function *F) {
1541 DomTreeNode *N = DT.getNode(&F->getEntryBlock());
1542 std::vector<BasicBlock *> Nodes;
1543 for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I)
1544 Nodes.push_back(I->getBlock());
1545
1546 for (BasicBlock *BB : Nodes)
1547 DT.eraseNode(BB);
1548 }
1549
clearScalarEvolution(Function * F)1550 void GPUNodeBuilder::clearScalarEvolution(Function *F) {
1551 for (BasicBlock &BB : *F) {
1552 Loop *L = LI.getLoopFor(&BB);
1553 if (L)
1554 SE.forgetLoop(L);
1555 }
1556 }
1557
clearLoops(Function * F)1558 void GPUNodeBuilder::clearLoops(Function *F) {
1559 SmallSet<Loop *, 1> WorkList;
1560 for (BasicBlock &BB : *F) {
1561 Loop *L = LI.getLoopFor(&BB);
1562 if (L)
1563 WorkList.insert(L);
1564 }
1565 for (auto *L : WorkList)
1566 LI.erase(L);
1567 }
1568
getGridSizes(ppcg_kernel * Kernel)1569 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) {
1570 std::vector<Value *> Sizes;
1571 isl::ast_build Context = isl::ast_build::from_context(S.getContext());
1572
1573 isl::multi_pw_aff GridSizePwAffs = isl::manage_copy(Kernel->grid_size);
1574 for (long i = 0; i < Kernel->n_grid; i++) {
1575 isl::pw_aff Size = GridSizePwAffs.get_pw_aff(i);
1576 isl::ast_expr GridSize = Context.expr_from(Size);
1577 Value *Res = ExprBuilder.create(GridSize.release());
1578 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
1579 Sizes.push_back(Res);
1580 }
1581
1582 for (long i = Kernel->n_grid; i < 3; i++)
1583 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1584
1585 return std::make_tuple(Sizes[0], Sizes[1]);
1586 }
1587
1588 std::tuple<Value *, Value *, Value *>
getBlockSizes(ppcg_kernel * Kernel)1589 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) {
1590 std::vector<Value *> Sizes;
1591
1592 for (long i = 0; i < Kernel->n_block; i++) {
1593 Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]);
1594 Sizes.push_back(Res);
1595 }
1596
1597 for (long i = Kernel->n_block; i < 3; i++)
1598 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1599
1600 return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]);
1601 }
1602
insertStoreParameter(Instruction * Parameters,Instruction * Param,int Index)1603 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters,
1604 Instruction *Param, int Index) {
1605 Value *Slot = Builder.CreateGEP(
1606 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1607 Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1608 Builder.CreateStore(ParamTyped, Slot);
1609 }
1610
1611 Value *
createLaunchParameters(ppcg_kernel * Kernel,Function * F,SetVector<Value * > SubtreeValues)1612 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F,
1613 SetVector<Value *> SubtreeValues) {
1614 const int NumArgs = F->arg_size();
1615 std::vector<int> ArgSizes(NumArgs);
1616
1617 // If we are using the OpenCL Runtime, we need to add the kernel argument
1618 // sizes to the end of the launch-parameter list, so OpenCL can determine
1619 // how big the respective kernel arguments are.
1620 // Here we need to reserve adequate space for that.
1621 Type *ArrayTy;
1622 if (Runtime == GPURuntime::OpenCL)
1623 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs);
1624 else
1625 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumArgs);
1626
1627 BasicBlock *EntryBlock =
1628 &Builder.GetInsertBlock()->getParent()->getEntryBlock();
1629 auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace();
1630 std::string Launch = "polly_launch_" + std::to_string(Kernel->id);
1631 Instruction *Parameters = new AllocaInst(
1632 ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator());
1633
1634 int Index = 0;
1635 for (long i = 0; i < Prog->n_array; i++) {
1636 if (!ppcg_kernel_requires_array_argument(Kernel, i))
1637 continue;
1638
1639 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1640 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1641
1642 if (Runtime == GPURuntime::OpenCL)
1643 ArgSizes[Index] = SAI->getElemSizeInBytes();
1644
1645 Value *DevArray = nullptr;
1646 if (PollyManagedMemory) {
1647 DevArray = getManagedDeviceArray(&Prog->array[i],
1648 const_cast<ScopArrayInfo *>(SAI));
1649 } else {
1650 DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)];
1651 DevArray = createCallGetDevicePtr(DevArray);
1652 }
1653 assert(DevArray != nullptr && "Array to be offloaded to device not "
1654 "initialized");
1655 Value *Offset = getArrayOffset(&Prog->array[i]);
1656
1657 if (Offset) {
1658 DevArray = Builder.CreatePointerCast(
1659 DevArray, SAI->getElementType()->getPointerTo());
1660 DevArray = Builder.CreateGEP(DevArray, Builder.CreateNeg(Offset));
1661 DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy());
1662 }
1663 Value *Slot = Builder.CreateGEP(
1664 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1665
1666 if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1667 Value *ValPtr = nullptr;
1668 if (PollyManagedMemory)
1669 ValPtr = DevArray;
1670 else
1671 ValPtr = BlockGen.getOrCreateAlloca(SAI);
1672
1673 assert(ValPtr != nullptr && "ValPtr that should point to a valid object"
1674 " to be stored into Parameters");
1675 Value *ValPtrCast =
1676 Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy());
1677 Builder.CreateStore(ValPtrCast, Slot);
1678 } else {
1679 Instruction *Param =
1680 new AllocaInst(Builder.getInt8PtrTy(), AddressSpace,
1681 Launch + "_param_" + std::to_string(Index),
1682 EntryBlock->getTerminator());
1683 Builder.CreateStore(DevArray, Param);
1684 Value *ParamTyped =
1685 Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1686 Builder.CreateStore(ParamTyped, Slot);
1687 }
1688 Index++;
1689 }
1690
1691 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1692
1693 for (long i = 0; i < NumHostIters; i++) {
1694 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1695 Value *Val = IDToValue[Id];
1696 isl_id_free(Id);
1697
1698 if (Runtime == GPURuntime::OpenCL)
1699 ArgSizes[Index] = computeSizeInBytes(Val->getType());
1700
1701 Instruction *Param =
1702 new AllocaInst(Val->getType(), AddressSpace,
1703 Launch + "_param_" + std::to_string(Index),
1704 EntryBlock->getTerminator());
1705 Builder.CreateStore(Val, Param);
1706 insertStoreParameter(Parameters, Param, Index);
1707 Index++;
1708 }
1709
1710 int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1711
1712 for (long i = 0; i < NumVars; i++) {
1713 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1714 Value *Val = IDToValue[Id];
1715 if (ValueMap.count(Val))
1716 Val = ValueMap[Val];
1717 isl_id_free(Id);
1718
1719 if (Runtime == GPURuntime::OpenCL)
1720 ArgSizes[Index] = computeSizeInBytes(Val->getType());
1721
1722 Instruction *Param =
1723 new AllocaInst(Val->getType(), AddressSpace,
1724 Launch + "_param_" + std::to_string(Index),
1725 EntryBlock->getTerminator());
1726 Builder.CreateStore(Val, Param);
1727 insertStoreParameter(Parameters, Param, Index);
1728 Index++;
1729 }
1730
1731 for (auto Val : SubtreeValues) {
1732 if (Runtime == GPURuntime::OpenCL)
1733 ArgSizes[Index] = computeSizeInBytes(Val->getType());
1734
1735 Instruction *Param =
1736 new AllocaInst(Val->getType(), AddressSpace,
1737 Launch + "_param_" + std::to_string(Index),
1738 EntryBlock->getTerminator());
1739 Builder.CreateStore(Val, Param);
1740 insertStoreParameter(Parameters, Param, Index);
1741 Index++;
1742 }
1743
1744 if (Runtime == GPURuntime::OpenCL) {
1745 for (int i = 0; i < NumArgs; i++) {
1746 Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]);
1747 Instruction *Param =
1748 new AllocaInst(Builder.getInt32Ty(), AddressSpace,
1749 Launch + "_param_size_" + std::to_string(i),
1750 EntryBlock->getTerminator());
1751 Builder.CreateStore(Val, Param);
1752 insertStoreParameter(Parameters, Param, Index);
1753 Index++;
1754 }
1755 }
1756
1757 auto Location = EntryBlock->getTerminator();
1758 return new BitCastInst(Parameters, Builder.getInt8PtrTy(),
1759 Launch + "_params_i8ptr", Location);
1760 }
1761
setupKernelSubtreeFunctions(SetVector<Function * > SubtreeFunctions)1762 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1763 SetVector<Function *> SubtreeFunctions) {
1764 for (auto Fn : SubtreeFunctions) {
1765 const std::string ClonedFnName = Fn->getName().str();
1766 Function *Clone = GPUModule->getFunction(ClonedFnName);
1767 if (!Clone)
1768 Clone =
1769 Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage,
1770 ClonedFnName, GPUModule.get());
1771 assert(Clone && "Expected cloned function to be initialized.");
1772 assert(ValueMap.find(Fn) == ValueMap.end() &&
1773 "Fn already present in ValueMap");
1774 ValueMap[Fn] = Clone;
1775 }
1776 }
createKernel(__isl_take isl_ast_node * KernelStmt)1777 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) {
1778 isl_id *Id = isl_ast_node_get_annotation(KernelStmt);
1779 ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id);
1780 isl_id_free(Id);
1781 isl_ast_node_free(KernelStmt);
1782
1783 if (Kernel->n_grid > 1)
1784 DeepestParallel = std::max(
1785 DeepestParallel, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1786 else
1787 DeepestSequential = std::max(
1788 DeepestSequential, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1789
1790 Value *BlockDimX, *BlockDimY, *BlockDimZ;
1791 std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel);
1792
1793 SetVector<Value *> SubtreeValues;
1794 SetVector<Function *> SubtreeFunctions;
1795 SetVector<const Loop *> Loops;
1796 isl::space ParamSpace;
1797 std::tie(SubtreeValues, SubtreeFunctions, Loops, ParamSpace) =
1798 getReferencesInKernel(Kernel);
1799
1800 // Add parameters that appear only in the access function to the kernel
1801 // space. This is important to make sure that all isl_ids are passed as
1802 // parameters to the kernel, even though we may not have all parameters
1803 // in the context to improve compile time.
1804 Kernel->space = isl_space_align_params(Kernel->space, ParamSpace.release());
1805
1806 assert(Kernel->tree && "Device AST of kernel node is empty");
1807
1808 Instruction &HostInsertPoint = *Builder.GetInsertPoint();
1809 IslExprBuilder::IDToValueTy HostIDs = IDToValue;
1810 ValueMapT HostValueMap = ValueMap;
1811 BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap;
1812 ScalarMap.clear();
1813 BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap = EscapeMap;
1814 EscapeMap.clear();
1815
1816 // Create for all loops we depend on values that contain the current loop
1817 // iteration. These values are necessary to generate code for SCEVs that
1818 // depend on such loops. As a result we need to pass them to the subfunction.
1819 for (const Loop *L : Loops) {
1820 const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)),
1821 SE.getUnknown(Builder.getInt64(1)),
1822 L, SCEV::FlagAnyWrap);
1823 Value *V = generateSCEV(OuterLIV);
1824 OutsideLoopIterations[L] = SE.getUnknown(V);
1825 SubtreeValues.insert(V);
1826 }
1827
1828 createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions);
1829 setupKernelSubtreeFunctions(SubtreeFunctions);
1830
1831 create(isl_ast_node_copy(Kernel->tree));
1832
1833 finalizeKernelArguments(Kernel);
1834 Function *F = Builder.GetInsertBlock()->getParent();
1835 if (Arch == GPUArch::NVPTX64)
1836 addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ);
1837 clearDominators(F);
1838 clearScalarEvolution(F);
1839 clearLoops(F);
1840
1841 IDToValue = HostIDs;
1842
1843 ValueMap = std::move(HostValueMap);
1844 ScalarMap = std::move(HostScalarMap);
1845 EscapeMap = std::move(HostEscapeMap);
1846 IDToSAI.clear();
1847 Annotator.resetAlternativeAliasBases();
1848 for (auto &BasePtr : LocalArrays)
1849 S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array);
1850 LocalArrays.clear();
1851
1852 std::string ASMString = finalizeKernelFunction();
1853 Builder.SetInsertPoint(&HostInsertPoint);
1854 Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues);
1855
1856 std::string Name = getKernelFuncName(Kernel->id);
1857 Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name);
1858 Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name");
1859 Value *GPUKernel = createCallGetKernel(KernelString, NameString);
1860
1861 Value *GridDimX, *GridDimY;
1862 std::tie(GridDimX, GridDimY) = getGridSizes(Kernel);
1863
1864 createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
1865 BlockDimZ, Parameters);
1866 createCallFreeKernel(GPUKernel);
1867
1868 for (auto Id : KernelIds)
1869 isl_id_free(Id);
1870
1871 KernelIds.clear();
1872 }
1873
1874 /// Compute the DataLayout string for the NVPTX backend.
1875 ///
1876 /// @param is64Bit Are we looking for a 64 bit architecture?
computeNVPTXDataLayout(bool is64Bit)1877 static std::string computeNVPTXDataLayout(bool is64Bit) {
1878 std::string Ret = "";
1879
1880 if (!is64Bit) {
1881 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1882 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1883 "64-v128:128:128-n16:32:64";
1884 } else {
1885 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1886 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1887 "64-v128:128:128-n16:32:64";
1888 }
1889
1890 return Ret;
1891 }
1892
1893 /// Compute the DataLayout string for a SPIR kernel.
1894 ///
1895 /// @param is64Bit Are we looking for a 64 bit architecture?
computeSPIRDataLayout(bool is64Bit)1896 static std::string computeSPIRDataLayout(bool is64Bit) {
1897 std::string Ret = "";
1898
1899 if (!is64Bit) {
1900 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1901 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1902 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1903 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1904 } else {
1905 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1906 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1907 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1908 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1909 }
1910
1911 return Ret;
1912 }
1913
1914 Function *
createKernelFunctionDecl(ppcg_kernel * Kernel,SetVector<Value * > & SubtreeValues)1915 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel,
1916 SetVector<Value *> &SubtreeValues) {
1917 std::vector<Type *> Args;
1918 std::string Identifier = getKernelFuncName(Kernel->id);
1919
1920 std::vector<Metadata *> MemoryType;
1921
1922 for (long i = 0; i < Prog->n_array; i++) {
1923 if (!ppcg_kernel_requires_array_argument(Kernel, i))
1924 continue;
1925
1926 if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1927 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1928 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1929 Args.push_back(SAI->getElementType());
1930 MemoryType.push_back(
1931 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1932 } else {
1933 static const int UseGlobalMemory = 1;
1934 Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory));
1935 MemoryType.push_back(
1936 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1)));
1937 }
1938 }
1939
1940 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1941
1942 for (long i = 0; i < NumHostIters; i++) {
1943 Args.push_back(Builder.getInt64Ty());
1944 MemoryType.push_back(
1945 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1946 }
1947
1948 int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1949
1950 for (long i = 0; i < NumVars; i++) {
1951 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1952 Value *Val = IDToValue[Id];
1953 isl_id_free(Id);
1954 Args.push_back(Val->getType());
1955 MemoryType.push_back(
1956 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1957 }
1958
1959 for (auto *V : SubtreeValues) {
1960 Args.push_back(V->getType());
1961 MemoryType.push_back(
1962 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1963 }
1964
1965 auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false);
1966 auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier,
1967 GPUModule.get());
1968
1969 std::vector<Metadata *> EmptyStrings;
1970
1971 for (unsigned int i = 0; i < MemoryType.size(); i++) {
1972 EmptyStrings.push_back(MDString::get(FN->getContext(), ""));
1973 }
1974
1975 if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) {
1976 FN->setMetadata("kernel_arg_addr_space",
1977 MDNode::get(FN->getContext(), MemoryType));
1978 FN->setMetadata("kernel_arg_name",
1979 MDNode::get(FN->getContext(), EmptyStrings));
1980 FN->setMetadata("kernel_arg_access_qual",
1981 MDNode::get(FN->getContext(), EmptyStrings));
1982 FN->setMetadata("kernel_arg_type",
1983 MDNode::get(FN->getContext(), EmptyStrings));
1984 FN->setMetadata("kernel_arg_type_qual",
1985 MDNode::get(FN->getContext(), EmptyStrings));
1986 FN->setMetadata("kernel_arg_base_type",
1987 MDNode::get(FN->getContext(), EmptyStrings));
1988 }
1989
1990 switch (Arch) {
1991 case GPUArch::NVPTX64:
1992 FN->setCallingConv(CallingConv::PTX_Kernel);
1993 break;
1994 case GPUArch::SPIR32:
1995 case GPUArch::SPIR64:
1996 FN->setCallingConv(CallingConv::SPIR_KERNEL);
1997 break;
1998 }
1999
2000 auto Arg = FN->arg_begin();
2001 for (long i = 0; i < Kernel->n_array; i++) {
2002 if (!ppcg_kernel_requires_array_argument(Kernel, i))
2003 continue;
2004
2005 Arg->setName(Kernel->array[i].array->name);
2006
2007 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2008 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2009 Type *EleTy = SAI->getElementType();
2010 Value *Val = &*Arg;
2011 SmallVector<const SCEV *, 4> Sizes;
2012 isl_ast_build *Build =
2013 isl_ast_build_from_context(isl_set_copy(Prog->context));
2014 Sizes.push_back(nullptr);
2015 for (long j = 1, n = Kernel->array[i].array->n_index; j < n; j++) {
2016 isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff(
2017 Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j));
2018 auto V = ExprBuilder.create(DimSize);
2019 Sizes.push_back(SE.getSCEV(V));
2020 }
2021 const ScopArrayInfo *SAIRep =
2022 S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array);
2023 LocalArrays.push_back(Val);
2024
2025 isl_ast_build_free(Build);
2026 KernelIds.push_back(Id);
2027 IDToSAI[Id] = SAIRep;
2028 Arg++;
2029 }
2030
2031 for (long i = 0; i < NumHostIters; i++) {
2032 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
2033 Arg->setName(isl_id_get_name(Id));
2034 IDToValue[Id] = &*Arg;
2035 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2036 Arg++;
2037 }
2038
2039 for (long i = 0; i < NumVars; i++) {
2040 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
2041 Arg->setName(isl_id_get_name(Id));
2042 Value *Val = IDToValue[Id];
2043 ValueMap[Val] = &*Arg;
2044 IDToValue[Id] = &*Arg;
2045 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2046 Arg++;
2047 }
2048
2049 for (auto *V : SubtreeValues) {
2050 Arg->setName(V->getName());
2051 ValueMap[V] = &*Arg;
2052 Arg++;
2053 }
2054
2055 return FN;
2056 }
2057
insertKernelIntrinsics(ppcg_kernel * Kernel)2058 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) {
2059 Intrinsic::ID IntrinsicsBID[2];
2060 Intrinsic::ID IntrinsicsTID[3];
2061
2062 switch (Arch) {
2063 case GPUArch::SPIR64:
2064 case GPUArch::SPIR32:
2065 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2066 case GPUArch::NVPTX64:
2067 IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x;
2068 IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y;
2069
2070 IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x;
2071 IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y;
2072 IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z;
2073 break;
2074 }
2075
2076 auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable {
2077 std::string Name = isl_id_get_name(Id);
2078 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2079 Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr);
2080 Value *Val = Builder.CreateCall(IntrinsicFn, {});
2081 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2082 IDToValue[Id] = Val;
2083 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2084 };
2085
2086 for (int i = 0; i < Kernel->n_grid; ++i) {
2087 isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i);
2088 addId(Id, IntrinsicsBID[i]);
2089 }
2090
2091 for (int i = 0; i < Kernel->n_block; ++i) {
2092 isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i);
2093 addId(Id, IntrinsicsTID[i]);
2094 }
2095 }
2096
insertKernelCallsSPIR(ppcg_kernel * Kernel,bool SizeTypeIs64bit)2097 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel,
2098 bool SizeTypeIs64bit) {
2099 const char *GroupName[3] = {"__gen_ocl_get_group_id0",
2100 "__gen_ocl_get_group_id1",
2101 "__gen_ocl_get_group_id2"};
2102
2103 const char *LocalName[3] = {"__gen_ocl_get_local_id0",
2104 "__gen_ocl_get_local_id1",
2105 "__gen_ocl_get_local_id2"};
2106 IntegerType *SizeT =
2107 SizeTypeIs64bit ? Builder.getInt64Ty() : Builder.getInt32Ty();
2108
2109 auto createFunc = [this](const char *Name, __isl_take isl_id *Id,
2110 IntegerType *SizeT) mutable {
2111 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2112 Function *FN = M->getFunction(Name);
2113
2114 // If FN is not available, declare it.
2115 if (!FN) {
2116 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
2117 std::vector<Type *> Args;
2118 FunctionType *Ty = FunctionType::get(SizeT, Args, false);
2119 FN = Function::Create(Ty, Linkage, Name, M);
2120 FN->setCallingConv(CallingConv::SPIR_FUNC);
2121 }
2122
2123 Value *Val = Builder.CreateCall(FN, {});
2124 if (SizeT == Builder.getInt32Ty())
2125 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2126 IDToValue[Id] = Val;
2127 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2128 };
2129
2130 for (int i = 0; i < Kernel->n_grid; ++i)
2131 createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i), SizeT);
2132
2133 for (int i = 0; i < Kernel->n_block; ++i)
2134 createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i), SizeT);
2135 }
2136
prepareKernelArguments(ppcg_kernel * Kernel,Function * FN)2137 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) {
2138 auto Arg = FN->arg_begin();
2139 for (long i = 0; i < Kernel->n_array; i++) {
2140 if (!ppcg_kernel_requires_array_argument(Kernel, i))
2141 continue;
2142
2143 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2144 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2145 isl_id_free(Id);
2146
2147 if (SAI->getNumberOfDimensions() > 0) {
2148 Arg++;
2149 continue;
2150 }
2151
2152 Value *Val = &*Arg;
2153
2154 if (!gpu_array_is_read_only_scalar(&Prog->array[i])) {
2155 Type *TypePtr = SAI->getElementType()->getPointerTo();
2156 Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr);
2157 Val = Builder.CreateLoad(TypedArgPtr);
2158 }
2159
2160 Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2161 Builder.CreateStore(Val, Alloca);
2162
2163 Arg++;
2164 }
2165 }
2166
finalizeKernelArguments(ppcg_kernel * Kernel)2167 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) {
2168 auto *FN = Builder.GetInsertBlock()->getParent();
2169 auto Arg = FN->arg_begin();
2170
2171 bool StoredScalar = false;
2172 for (long i = 0; i < Kernel->n_array; i++) {
2173 if (!ppcg_kernel_requires_array_argument(Kernel, i))
2174 continue;
2175
2176 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2177 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2178 isl_id_free(Id);
2179
2180 if (SAI->getNumberOfDimensions() > 0) {
2181 Arg++;
2182 continue;
2183 }
2184
2185 if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
2186 Arg++;
2187 continue;
2188 }
2189
2190 Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2191 Value *ArgPtr = &*Arg;
2192 Type *TypePtr = SAI->getElementType()->getPointerTo();
2193 Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr);
2194 Value *Val = Builder.CreateLoad(Alloca);
2195 Builder.CreateStore(Val, TypedArgPtr);
2196 StoredScalar = true;
2197
2198 Arg++;
2199 }
2200
2201 if (StoredScalar) {
2202 /// In case more than one thread contains scalar stores, the generated
2203 /// code might be incorrect, if we only store at the end of the kernel.
2204 /// To support this case we need to store these scalars back at each
2205 /// memory store or at least before each kernel barrier.
2206 if (Kernel->n_block != 0 || Kernel->n_grid != 0) {
2207 BuildSuccessful = 0;
2208 LLVM_DEBUG(
2209 dbgs() << getUniqueScopName(&S)
2210 << " has a store to a scalar value that"
2211 " would be undefined to run in parallel. Bailing out.\n";);
2212 }
2213 }
2214 }
2215
createKernelVariables(ppcg_kernel * Kernel,Function * FN)2216 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) {
2217 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2218
2219 for (int i = 0; i < Kernel->n_var; ++i) {
2220 struct ppcg_kernel_var &Var = Kernel->var[i];
2221 isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set);
2222 Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType();
2223
2224 Type *ArrayTy = EleTy;
2225 SmallVector<const SCEV *, 4> Sizes;
2226
2227 Sizes.push_back(nullptr);
2228 for (unsigned int j = 1; j < Var.array->n_index; ++j) {
2229 isl_val *Val = isl_vec_get_element_val(Var.size, j);
2230 long Bound = isl_val_get_num_si(Val);
2231 isl_val_free(Val);
2232 Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
2233 }
2234
2235 for (int j = Var.array->n_index - 1; j >= 0; --j) {
2236 isl_val *Val = isl_vec_get_element_val(Var.size, j);
2237 long Bound = isl_val_get_num_si(Val);
2238 isl_val_free(Val);
2239 ArrayTy = ArrayType::get(ArrayTy, Bound);
2240 }
2241
2242 const ScopArrayInfo *SAI;
2243 Value *Allocation;
2244 if (Var.type == ppcg_access_shared) {
2245 auto GlobalVar = new GlobalVariable(
2246 *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name,
2247 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3);
2248 GlobalVar->setAlignment(llvm::Align(EleTy->getPrimitiveSizeInBits() / 8));
2249 GlobalVar->setInitializer(Constant::getNullValue(ArrayTy));
2250
2251 Allocation = GlobalVar;
2252 } else if (Var.type == ppcg_access_private) {
2253 Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array");
2254 } else {
2255 llvm_unreachable("unknown variable type");
2256 }
2257 SAI =
2258 S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array);
2259 Id = isl_id_alloc(S.getIslCtx().get(), Var.name, nullptr);
2260 IDToValue[Id] = Allocation;
2261 LocalArrays.push_back(Allocation);
2262 KernelIds.push_back(Id);
2263 IDToSAI[Id] = SAI;
2264 }
2265 }
2266
createKernelFunction(ppcg_kernel * Kernel,SetVector<Value * > & SubtreeValues,SetVector<Function * > & SubtreeFunctions)2267 void GPUNodeBuilder::createKernelFunction(
2268 ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues,
2269 SetVector<Function *> &SubtreeFunctions) {
2270 std::string Identifier = getKernelFuncName(Kernel->id);
2271 GPUModule.reset(new Module(Identifier, Builder.getContext()));
2272
2273 switch (Arch) {
2274 case GPUArch::NVPTX64:
2275 if (Runtime == GPURuntime::CUDA)
2276 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2277 else if (Runtime == GPURuntime::OpenCL)
2278 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2279 GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2280 break;
2281 case GPUArch::SPIR32:
2282 GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2283 GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2284 break;
2285 case GPUArch::SPIR64:
2286 GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2287 GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2288 break;
2289 }
2290
2291 Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues);
2292
2293 BasicBlock *PrevBlock = Builder.GetInsertBlock();
2294 auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN);
2295
2296 DT.addNewBlock(EntryBlock, PrevBlock);
2297
2298 Builder.SetInsertPoint(EntryBlock);
2299 Builder.CreateRetVoid();
2300 Builder.SetInsertPoint(EntryBlock, EntryBlock->begin());
2301
2302 ScopDetection::markFunctionAsInvalid(FN);
2303
2304 prepareKernelArguments(Kernel, FN);
2305 createKernelVariables(Kernel, FN);
2306
2307 switch (Arch) {
2308 case GPUArch::NVPTX64:
2309 insertKernelIntrinsics(Kernel);
2310 break;
2311 case GPUArch::SPIR32:
2312 insertKernelCallsSPIR(Kernel, false);
2313 break;
2314 case GPUArch::SPIR64:
2315 insertKernelCallsSPIR(Kernel, true);
2316 break;
2317 }
2318 }
2319
createKernelASM()2320 std::string GPUNodeBuilder::createKernelASM() {
2321 llvm::Triple GPUTriple;
2322
2323 switch (Arch) {
2324 case GPUArch::NVPTX64:
2325 switch (Runtime) {
2326 case GPURuntime::CUDA:
2327 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2328 break;
2329 case GPURuntime::OpenCL:
2330 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2331 break;
2332 }
2333 break;
2334 case GPUArch::SPIR64:
2335 case GPUArch::SPIR32:
2336 std::string SPIRAssembly;
2337 raw_string_ostream IROstream(SPIRAssembly);
2338 IROstream << *GPUModule;
2339 IROstream.flush();
2340 return SPIRAssembly;
2341 }
2342
2343 std::string ErrMsg;
2344 auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg);
2345
2346 if (!GPUTarget) {
2347 errs() << ErrMsg << "\n";
2348 return "";
2349 }
2350
2351 TargetOptions Options;
2352 Options.UnsafeFPMath = FastMath;
2353
2354 std::string subtarget;
2355
2356 switch (Arch) {
2357 case GPUArch::NVPTX64:
2358 subtarget = CudaVersion;
2359 break;
2360 case GPUArch::SPIR32:
2361 case GPUArch::SPIR64:
2362 llvm_unreachable("No subtarget for SPIR architecture");
2363 }
2364
2365 std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine(
2366 GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>()));
2367
2368 SmallString<0> ASMString;
2369 raw_svector_ostream ASMStream(ASMString);
2370 llvm::legacy::PassManager PM;
2371
2372 PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis()));
2373
2374 if (TargetM->addPassesToEmitFile(PM, ASMStream, nullptr, CGFT_AssemblyFile,
2375 true /* verify */)) {
2376 errs() << "The target does not support generation of this file type!\n";
2377 return "";
2378 }
2379
2380 PM.run(*GPUModule);
2381
2382 return ASMStream.str().str();
2383 }
2384
requiresCUDALibDevice()2385 bool GPUNodeBuilder::requiresCUDALibDevice() {
2386 bool RequiresLibDevice = false;
2387 for (Function &F : GPUModule->functions()) {
2388 if (!F.isDeclaration())
2389 continue;
2390
2391 const std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(F.getName());
2392 if (CUDALibDeviceFunc.length() != 0) {
2393 // We need to handle the case where a module looks like this:
2394 // @expf(..)
2395 // @llvm.exp.f64(..)
2396 // Both of these functions would be renamed to `__nv_expf`.
2397 //
2398 // So, we must first check for the existence of the libdevice function.
2399 // If this exists, we replace our current function with it.
2400 //
2401 // If it does not exist, we rename the current function to the
2402 // libdevice functiono name.
2403 if (Function *Replacement = F.getParent()->getFunction(CUDALibDeviceFunc))
2404 F.replaceAllUsesWith(Replacement);
2405 else
2406 F.setName(CUDALibDeviceFunc);
2407 RequiresLibDevice = true;
2408 }
2409 }
2410
2411 return RequiresLibDevice;
2412 }
2413
addCUDALibDevice()2414 void GPUNodeBuilder::addCUDALibDevice() {
2415 if (Arch != GPUArch::NVPTX64)
2416 return;
2417
2418 if (requiresCUDALibDevice()) {
2419 SMDiagnostic Error;
2420
2421 errs() << CUDALibDevice << "\n";
2422 auto LibDeviceModule =
2423 parseIRFile(CUDALibDevice, Error, GPUModule->getContext());
2424
2425 if (!LibDeviceModule) {
2426 BuildSuccessful = false;
2427 report_fatal_error("Could not find or load libdevice. Skipping GPU "
2428 "kernel generation. Please set -polly-acc-libdevice "
2429 "accordingly.\n");
2430 return;
2431 }
2432
2433 Linker L(*GPUModule);
2434
2435 // Set an nvptx64 target triple to avoid linker warnings. The original
2436 // triple of the libdevice files are nvptx-unknown-unknown.
2437 LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2438 L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded);
2439 }
2440 }
2441
finalizeKernelFunction()2442 std::string GPUNodeBuilder::finalizeKernelFunction() {
2443
2444 if (verifyModule(*GPUModule)) {
2445 LLVM_DEBUG(dbgs() << "verifyModule failed on module:\n";
2446 GPUModule->print(dbgs(), nullptr); dbgs() << "\n";);
2447 LLVM_DEBUG(dbgs() << "verifyModule Error:\n";
2448 verifyModule(*GPUModule, &dbgs()););
2449
2450 if (FailOnVerifyModuleFailure)
2451 llvm_unreachable("VerifyModule failed.");
2452
2453 BuildSuccessful = false;
2454 return "";
2455 }
2456
2457 addCUDALibDevice();
2458
2459 if (DumpKernelIR)
2460 outs() << *GPUModule << "\n";
2461
2462 if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) {
2463 // Optimize module.
2464 llvm::legacy::PassManager OptPasses;
2465 PassManagerBuilder PassBuilder;
2466 PassBuilder.OptLevel = 3;
2467 PassBuilder.SizeLevel = 0;
2468 PassBuilder.populateModulePassManager(OptPasses);
2469 OptPasses.run(*GPUModule);
2470 }
2471
2472 std::string Assembly = createKernelASM();
2473
2474 if (DumpKernelASM)
2475 outs() << Assembly << "\n";
2476
2477 GPUModule.release();
2478 KernelIDs.clear();
2479
2480 return Assembly;
2481 }
2482 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2483 /// @param PwAffs The list of piecewise affine functions to create an
2484 /// `isl_pw_aff_list` from. We expect an rvalue ref because
2485 /// all the isl_pw_aff are used up by this function.
2486 ///
2487 /// @returns The `isl_pw_aff_list`.
2488 __isl_give isl_pw_aff_list *
createPwAffList(isl_ctx * Context,const std::vector<__isl_take isl_pw_aff * > && PwAffs)2489 createPwAffList(isl_ctx *Context,
2490 const std::vector<__isl_take isl_pw_aff *> &&PwAffs) {
2491 isl_pw_aff_list *List = isl_pw_aff_list_alloc(Context, PwAffs.size());
2492
2493 for (unsigned i = 0; i < PwAffs.size(); i++) {
2494 List = isl_pw_aff_list_insert(List, i, PwAffs[i]);
2495 }
2496 return List;
2497 }
2498
2499 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2500 ///
2501 /// We loop over all `pw_aff` and align all of their spaces together to
2502 /// create a common space for all the `pw_aff`. This common space is the
2503 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2504 /// with the given `SeedSpace`.
2505 /// @param PwAffs The list of piecewise affine functions we want to align.
2506 /// This is an rvalue reference because the entire vector is
2507 /// used up by the end of the operation.
2508 /// @param SeedSpace The space to start the alignment process with.
2509 /// @returns A std::pair, whose first element is the aligned space,
2510 /// whose second element is the vector of aligned piecewise
2511 /// affines.
2512 static std::pair<__isl_give isl_space *, std::vector<__isl_give isl_pw_aff *>>
alignPwAffs(const std::vector<__isl_take isl_pw_aff * > && PwAffs,__isl_take isl_space * SeedSpace)2513 alignPwAffs(const std::vector<__isl_take isl_pw_aff *> &&PwAffs,
2514 __isl_take isl_space *SeedSpace) {
2515 assert(SeedSpace && "Invalid seed space given.");
2516
2517 isl_space *AlignSpace = SeedSpace;
2518 for (isl_pw_aff *PwAff : PwAffs) {
2519 isl_space *PwAffSpace = isl_pw_aff_get_domain_space(PwAff);
2520 AlignSpace = isl_space_align_params(AlignSpace, PwAffSpace);
2521 }
2522 std::vector<isl_pw_aff *> AdjustedPwAffs;
2523
2524 for (unsigned i = 0; i < PwAffs.size(); i++) {
2525 isl_pw_aff *Adjusted = PwAffs[i];
2526 assert(Adjusted && "Invalid pw_aff given.");
2527 Adjusted = isl_pw_aff_align_params(Adjusted, isl_space_copy(AlignSpace));
2528 AdjustedPwAffs.push_back(Adjusted);
2529 }
2530 return std::make_pair(AlignSpace, AdjustedPwAffs);
2531 }
2532
2533 namespace {
2534 class PPCGCodeGeneration : public ScopPass {
2535 public:
2536 static char ID;
2537
2538 GPURuntime Runtime = GPURuntime::CUDA;
2539
2540 GPUArch Architecture = GPUArch::NVPTX64;
2541
2542 /// The scop that is currently processed.
2543 Scop *S;
2544
2545 LoopInfo *LI;
2546 DominatorTree *DT;
2547 ScalarEvolution *SE;
2548 const DataLayout *DL;
2549 RegionInfo *RI;
2550
PPCGCodeGeneration()2551 PPCGCodeGeneration() : ScopPass(ID) {}
2552
2553 /// Construct compilation options for PPCG.
2554 ///
2555 /// @returns The compilation options.
createPPCGOptions()2556 ppcg_options *createPPCGOptions() {
2557 auto DebugOptions =
2558 (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options));
2559 auto Options = (ppcg_options *)malloc(sizeof(ppcg_options));
2560
2561 DebugOptions->dump_schedule_constraints = false;
2562 DebugOptions->dump_schedule = false;
2563 DebugOptions->dump_final_schedule = false;
2564 DebugOptions->dump_sizes = false;
2565 DebugOptions->verbose = false;
2566
2567 Options->debug = DebugOptions;
2568
2569 Options->group_chains = false;
2570 Options->reschedule = true;
2571 Options->scale_tile_loops = false;
2572 Options->wrap = false;
2573
2574 Options->non_negative_parameters = false;
2575 Options->ctx = nullptr;
2576 Options->sizes = nullptr;
2577
2578 Options->tile = true;
2579 Options->tile_size = 32;
2580
2581 Options->isolate_full_tiles = false;
2582
2583 Options->use_private_memory = PrivateMemory;
2584 Options->use_shared_memory = SharedMemory;
2585 Options->max_shared_memory = 48 * 1024;
2586
2587 Options->target = PPCG_TARGET_CUDA;
2588 Options->openmp = false;
2589 Options->linearize_device_arrays = true;
2590 Options->allow_gnu_extensions = false;
2591
2592 Options->unroll_copy_shared = false;
2593 Options->unroll_gpu_tile = false;
2594 Options->live_range_reordering = true;
2595
2596 Options->live_range_reordering = true;
2597 Options->hybrid = false;
2598 Options->opencl_compiler_options = nullptr;
2599 Options->opencl_use_gpu = false;
2600 Options->opencl_n_include_file = 0;
2601 Options->opencl_include_files = nullptr;
2602 Options->opencl_print_kernel_types = false;
2603 Options->opencl_embed_kernel_code = false;
2604
2605 Options->save_schedule_file = nullptr;
2606 Options->load_schedule_file = nullptr;
2607
2608 return Options;
2609 }
2610
2611 /// Get a tagged access relation containing all accesses of type @p AccessTy.
2612 ///
2613 /// Instead of a normal access of the form:
2614 ///
2615 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2616 ///
2617 /// a tagged access has the form
2618 ///
2619 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2620 ///
2621 /// where 'id' is an additional space that references the memory access that
2622 /// triggered the access.
2623 ///
2624 /// @param AccessTy The type of the memory accesses to collect.
2625 ///
2626 /// @return The relation describing all tagged memory accesses.
getTaggedAccesses(enum MemoryAccess::AccessType AccessTy)2627 isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) {
2628 isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace().release());
2629
2630 for (auto &Stmt : *S)
2631 for (auto &Acc : Stmt)
2632 if (Acc->getType() == AccessTy) {
2633 isl_map *Relation = Acc->getAccessRelation().release();
2634 Relation =
2635 isl_map_intersect_domain(Relation, Stmt.getDomain().release());
2636
2637 isl_space *Space = isl_map_get_space(Relation);
2638 Space = isl_space_range(Space);
2639 Space = isl_space_from_range(Space);
2640 Space =
2641 isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2642 isl_map *Universe = isl_map_universe(Space);
2643 Relation = isl_map_domain_product(Relation, Universe);
2644 Accesses = isl_union_map_add_map(Accesses, Relation);
2645 }
2646
2647 return Accesses;
2648 }
2649
2650 /// Get the set of all read accesses, tagged with the access id.
2651 ///
2652 /// @see getTaggedAccesses
getTaggedReads()2653 isl_union_map *getTaggedReads() {
2654 return getTaggedAccesses(MemoryAccess::READ);
2655 }
2656
2657 /// Get the set of all may (and must) accesses, tagged with the access id.
2658 ///
2659 /// @see getTaggedAccesses
getTaggedMayWrites()2660 isl_union_map *getTaggedMayWrites() {
2661 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE),
2662 getTaggedAccesses(MemoryAccess::MUST_WRITE));
2663 }
2664
2665 /// Get the set of all must accesses, tagged with the access id.
2666 ///
2667 /// @see getTaggedAccesses
getTaggedMustWrites()2668 isl_union_map *getTaggedMustWrites() {
2669 return getTaggedAccesses(MemoryAccess::MUST_WRITE);
2670 }
2671
2672 /// Collect parameter and array names as isl_ids.
2673 ///
2674 /// To reason about the different parameters and arrays used, ppcg requires
2675 /// a list of all isl_ids in use. As PPCG traditionally performs
2676 /// source-to-source compilation each of these isl_ids is mapped to the
2677 /// expression that represents it. As we do not have a corresponding
2678 /// expression in Polly, we just map each id to a 'zero' expression to match
2679 /// the data format that ppcg expects.
2680 ///
2681 /// @returns Retun a map from collected ids to 'zero' ast expressions.
getNames()2682 __isl_give isl_id_to_ast_expr *getNames() {
2683 auto *Names = isl_id_to_ast_expr_alloc(
2684 S->getIslCtx().get(),
2685 S->getNumParams() + std::distance(S->array_begin(), S->array_end()));
2686 auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx().get()));
2687
2688 for (const SCEV *P : S->parameters()) {
2689 isl_id *Id = S->getIdForParam(P).release();
2690 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2691 }
2692
2693 for (auto &Array : S->arrays()) {
2694 auto Id = Array->getBasePtrId().release();
2695 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2696 }
2697
2698 isl_ast_expr_free(Zero);
2699
2700 return Names;
2701 }
2702
2703 /// Create a new PPCG scop from the current scop.
2704 ///
2705 /// The PPCG scop is initialized with data from the current polly::Scop. From
2706 /// this initial data, the data-dependences in the PPCG scop are initialized.
2707 /// We do not use Polly's dependence analysis for now, to ensure we match
2708 /// the PPCG default behaviour more closely.
2709 ///
2710 /// @returns A new ppcg scop.
createPPCGScop()2711 ppcg_scop *createPPCGScop() {
2712 MustKillsInfo KillsInfo = computeMustKillsInfo(*S);
2713
2714 auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop));
2715
2716 PPCGScop->options = createPPCGOptions();
2717 // enable live range reordering
2718 PPCGScop->options->live_range_reordering = 1;
2719
2720 PPCGScop->start = 0;
2721 PPCGScop->end = 0;
2722
2723 PPCGScop->context = S->getContext().release();
2724 PPCGScop->domain = S->getDomains().release();
2725 // TODO: investigate this further. PPCG calls collect_call_domains.
2726 PPCGScop->call = isl_union_set_from_set(S->getContext().release());
2727 PPCGScop->tagged_reads = getTaggedReads();
2728 PPCGScop->reads = S->getReads().release();
2729 PPCGScop->live_in = nullptr;
2730 PPCGScop->tagged_may_writes = getTaggedMayWrites();
2731 PPCGScop->may_writes = S->getWrites().release();
2732 PPCGScop->tagged_must_writes = getTaggedMustWrites();
2733 PPCGScop->must_writes = S->getMustWrites().release();
2734 PPCGScop->live_out = nullptr;
2735 PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.release();
2736 PPCGScop->must_kills = KillsInfo.MustKills.release();
2737
2738 PPCGScop->tagger = nullptr;
2739 PPCGScop->independence =
2740 isl_union_map_empty(isl_set_get_space(PPCGScop->context));
2741 PPCGScop->dep_flow = nullptr;
2742 PPCGScop->tagged_dep_flow = nullptr;
2743 PPCGScop->dep_false = nullptr;
2744 PPCGScop->dep_forced = nullptr;
2745 PPCGScop->dep_order = nullptr;
2746 PPCGScop->tagged_dep_order = nullptr;
2747
2748 PPCGScop->schedule = S->getScheduleTree().release();
2749 // If we have something non-trivial to kill, add it to the schedule
2750 if (KillsInfo.KillsSchedule.get())
2751 PPCGScop->schedule = isl_schedule_sequence(
2752 PPCGScop->schedule, KillsInfo.KillsSchedule.release());
2753
2754 PPCGScop->names = getNames();
2755 PPCGScop->pet = nullptr;
2756
2757 compute_tagger(PPCGScop);
2758 compute_dependences(PPCGScop);
2759 eliminate_dead_code(PPCGScop);
2760
2761 return PPCGScop;
2762 }
2763
2764 /// Collect the array accesses in a statement.
2765 ///
2766 /// @param Stmt The statement for which to collect the accesses.
2767 ///
2768 /// @returns A list of array accesses.
getStmtAccesses(ScopStmt & Stmt)2769 gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) {
2770 gpu_stmt_access *Accesses = nullptr;
2771
2772 for (MemoryAccess *Acc : Stmt) {
2773 auto Access =
2774 isl_alloc_type(S->getIslCtx().get(), struct gpu_stmt_access);
2775 Access->read = Acc->isRead();
2776 Access->write = Acc->isWrite();
2777 Access->access = Acc->getAccessRelation().release();
2778 isl_space *Space = isl_map_get_space(Access->access);
2779 Space = isl_space_range(Space);
2780 Space = isl_space_from_range(Space);
2781 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2782 isl_map *Universe = isl_map_universe(Space);
2783 Access->tagged_access =
2784 isl_map_domain_product(Acc->getAccessRelation().release(), Universe);
2785 Access->exact_write = !Acc->isMayWrite();
2786 Access->ref_id = Acc->getId().release();
2787 Access->next = Accesses;
2788 Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions();
2789 // TODO: Also mark one-element accesses to arrays as fixed-element.
2790 Access->fixed_element =
2791 Acc->isLatestScalarKind() ? isl_bool_true : isl_bool_false;
2792 Accesses = Access;
2793 }
2794
2795 return Accesses;
2796 }
2797
2798 /// Collect the list of GPU statements.
2799 ///
2800 /// Each statement has an id, a pointer to the underlying data structure,
2801 /// as well as a list with all memory accesses.
2802 ///
2803 /// TODO: Initialize the list of memory accesses.
2804 ///
2805 /// @returns A linked-list of statements.
getStatements()2806 gpu_stmt *getStatements() {
2807 gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx().get(), struct gpu_stmt,
2808 std::distance(S->begin(), S->end()));
2809
2810 int i = 0;
2811 for (auto &Stmt : *S) {
2812 gpu_stmt *GPUStmt = &Stmts[i];
2813
2814 GPUStmt->id = Stmt.getDomainId().release();
2815
2816 // We use the pet stmt pointer to keep track of the Polly statements.
2817 GPUStmt->stmt = (pet_stmt *)&Stmt;
2818 GPUStmt->accesses = getStmtAccesses(Stmt);
2819 i++;
2820 }
2821
2822 return Stmts;
2823 }
2824
2825 /// Derive the extent of an array.
2826 ///
2827 /// The extent of an array is the set of elements that are within the
2828 /// accessed array. For the inner dimensions, the extent constraints are
2829 /// 0 and the size of the corresponding array dimension. For the first
2830 /// (outermost) dimension, the extent constraints are the minimal and maximal
2831 /// subscript value for the first dimension.
2832 ///
2833 /// @param Array The array to derive the extent for.
2834 ///
2835 /// @returns An isl_set describing the extent of the array.
getExtent(ScopArrayInfo * Array)2836 isl::set getExtent(ScopArrayInfo *Array) {
2837 unsigned NumDims = Array->getNumberOfDimensions();
2838
2839 if (Array->getNumberOfDimensions() == 0)
2840 return isl::set::universe(Array->getSpace());
2841
2842 isl::union_map Accesses = S->getAccesses(Array);
2843 isl::union_set AccessUSet = Accesses.range();
2844 AccessUSet = AccessUSet.coalesce();
2845 AccessUSet = AccessUSet.detect_equalities();
2846 AccessUSet = AccessUSet.coalesce();
2847
2848 if (AccessUSet.is_empty())
2849 return isl::set::empty(Array->getSpace());
2850
2851 isl::set AccessSet = AccessUSet.extract_set(Array->getSpace());
2852
2853 isl::local_space LS = isl::local_space(Array->getSpace());
2854
2855 isl::pw_aff Val = isl::aff::var_on_domain(LS, isl::dim::set, 0);
2856 isl::pw_aff OuterMin = AccessSet.dim_min(0);
2857 isl::pw_aff OuterMax = AccessSet.dim_max(0);
2858 OuterMin = OuterMin.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2859 OuterMax = OuterMax.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2860 OuterMin = OuterMin.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2861 OuterMax = OuterMax.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2862
2863 isl::set Extent = isl::set::universe(Array->getSpace());
2864
2865 Extent = Extent.intersect(OuterMin.le_set(Val));
2866 Extent = Extent.intersect(OuterMax.ge_set(Val));
2867
2868 for (unsigned i = 1; i < NumDims; ++i)
2869 Extent = Extent.lower_bound_si(isl::dim::set, i, 0);
2870
2871 for (unsigned i = 0; i < NumDims; ++i) {
2872 isl::pw_aff PwAff = Array->getDimensionSizePw(i);
2873
2874 // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2875 // Fortran array will we have a legitimate dimension.
2876 if (PwAff.is_null()) {
2877 assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2878 continue;
2879 }
2880
2881 isl::pw_aff Val = isl::aff::var_on_domain(
2882 isl::local_space(Array->getSpace()), isl::dim::set, i);
2883 PwAff = PwAff.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2884 PwAff = PwAff.set_tuple_id(isl::dim::in, Val.get_tuple_id(isl::dim::in));
2885 isl::set Set = PwAff.gt_set(Val);
2886 Extent = Set.intersect(Extent);
2887 }
2888
2889 return Extent;
2890 }
2891
2892 /// Derive the bounds of an array.
2893 ///
2894 /// For the first dimension we derive the bound of the array from the extent
2895 /// of this dimension. For inner dimensions we obtain their size directly from
2896 /// ScopArrayInfo.
2897 ///
2898 /// @param PPCGArray The array to compute bounds for.
2899 /// @param Array The polly array from which to take the information.
setArrayBounds(gpu_array_info & PPCGArray,ScopArrayInfo * Array)2900 void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) {
2901 std::vector<isl_pw_aff *> Bounds;
2902
2903 if (PPCGArray.n_index > 0) {
2904 if (isl_set_is_empty(PPCGArray.extent)) {
2905 isl_set *Dom = isl_set_copy(PPCGArray.extent);
2906 isl_local_space *LS = isl_local_space_from_space(
2907 isl_space_params(isl_set_get_space(Dom)));
2908 isl_set_free(Dom);
2909 isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS));
2910 Bounds.push_back(Zero);
2911 } else {
2912 isl_set *Dom = isl_set_copy(PPCGArray.extent);
2913 Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1);
2914 isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0);
2915 isl_set_free(Dom);
2916 Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound));
2917 isl_local_space *LS =
2918 isl_local_space_from_space(isl_set_get_space(Dom));
2919 isl_aff *One = isl_aff_zero_on_domain(LS);
2920 One = isl_aff_add_constant_si(One, 1);
2921 Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One));
2922 Bound = isl_pw_aff_gist(Bound, S->getContext().release());
2923 Bounds.push_back(Bound);
2924 }
2925 }
2926
2927 for (unsigned i = 1; i < PPCGArray.n_index; ++i) {
2928 isl_pw_aff *Bound = Array->getDimensionSizePw(i).release();
2929 auto LS = isl_pw_aff_get_domain_space(Bound);
2930 auto Aff = isl_multi_aff_zero(LS);
2931
2932 // We need types to work out, which is why we perform this weird dance
2933 // with `Aff` and `Bound`. Consider this example:
2934
2935 // LS: [p] -> { [] }
2936 // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }.
2937 // This `~` is used to denote a "null space" (which is different from
2938 // a *zero dimensional* space), which is something that ISL does not
2939 // show you when pretty printing.
2940
2941 // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional*
2942 // space, not a "null space" which does not exist at all.
2943
2944 // When we pullback (precompose) `Bound` with `Zero`, we get:
2945 // Bound . Zero =
2946 // ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) =
2947 // [p] -> { ~ -> [(10p)] } =
2948 // [p] -> [(10p)] (as ISL pretty prints it)
2949 // Bound Pullback: [p] -> { [(10p)] }
2950
2951 // We want this kind of an expression for Bound, without a
2952 // zero dimensional input, but with a "null space" input for the types
2953 // to work out later on, as far as I (Siddharth Bhat) understand.
2954 // I was unable to find a reference to this in the ISL manual.
2955 // References: Tobias Grosser.
2956
2957 Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff);
2958 Bounds.push_back(Bound);
2959 }
2960
2961 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2962 /// to have the same parameter dimensions. So, we need to align them to an
2963 /// appropriate space.
2964 /// Scop::Context is _not_ an appropriate space, because when we have
2965 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
2966 /// contain all parameter dimensions.
2967 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
2968 isl_space *SeedAlignSpace = S->getParamSpace().release();
2969 SeedAlignSpace = isl_space_add_dims(SeedAlignSpace, isl_dim_set, 1);
2970
2971 isl_space *AlignSpace = nullptr;
2972 std::vector<isl_pw_aff *> AlignedBounds;
2973 std::tie(AlignSpace, AlignedBounds) =
2974 alignPwAffs(std::move(Bounds), SeedAlignSpace);
2975
2976 assert(AlignSpace && "alignPwAffs did not initialise AlignSpace");
2977
2978 isl_pw_aff_list *BoundsList =
2979 createPwAffList(S->getIslCtx().get(), std::move(AlignedBounds));
2980
2981 isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent);
2982 BoundsSpace = isl_space_align_params(BoundsSpace, AlignSpace);
2983
2984 assert(BoundsSpace && "Unable to access space of array.");
2985 assert(BoundsList && "Unable to access list of bounds.");
2986
2987 PPCGArray.bound =
2988 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList);
2989 assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly.");
2990 }
2991
2992 /// Create the arrays for @p PPCGProg.
2993 ///
2994 /// @param PPCGProg The program to compute the arrays for.
createArrays(gpu_prog * PPCGProg,const SmallVector<ScopArrayInfo *,4> & ValidSAIs)2995 void createArrays(gpu_prog *PPCGProg,
2996 const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) {
2997 int i = 0;
2998 for (auto &Array : ValidSAIs) {
2999 std::string TypeName;
3000 raw_string_ostream OS(TypeName);
3001
3002 OS << *Array->getElementType();
3003 TypeName = OS.str();
3004
3005 gpu_array_info &PPCGArray = PPCGProg->array[i];
3006
3007 PPCGArray.space = Array->getSpace().release();
3008 PPCGArray.type = strdup(TypeName.c_str());
3009 PPCGArray.size = DL->getTypeAllocSize(Array->getElementType());
3010 PPCGArray.name = strdup(Array->getName().c_str());
3011 PPCGArray.extent = nullptr;
3012 PPCGArray.n_index = Array->getNumberOfDimensions();
3013 PPCGArray.extent = getExtent(Array).release();
3014 PPCGArray.n_ref = 0;
3015 PPCGArray.refs = nullptr;
3016 PPCGArray.accessed = true;
3017 PPCGArray.read_only_scalar =
3018 Array->isReadOnly() && Array->getNumberOfDimensions() == 0;
3019 PPCGArray.has_compound_element = false;
3020 PPCGArray.local = false;
3021 PPCGArray.declare_local = false;
3022 PPCGArray.global = false;
3023 PPCGArray.linearize = false;
3024 PPCGArray.dep_order = nullptr;
3025 PPCGArray.user = Array;
3026
3027 PPCGArray.bound = nullptr;
3028 setArrayBounds(PPCGArray, Array);
3029 i++;
3030
3031 collect_references(PPCGProg, &PPCGArray);
3032 PPCGArray.only_fixed_element = only_fixed_element_accessed(&PPCGArray);
3033 }
3034 }
3035
3036 /// Create an identity map between the arrays in the scop.
3037 ///
3038 /// @returns An identity map between the arrays in the scop.
getArrayIdentity()3039 isl_union_map *getArrayIdentity() {
3040 isl_union_map *Maps = isl_union_map_empty(S->getParamSpace().release());
3041
3042 for (auto &Array : S->arrays()) {
3043 isl_space *Space = Array->getSpace().release();
3044 Space = isl_space_map_from_set(Space);
3045 isl_map *Identity = isl_map_identity(Space);
3046 Maps = isl_union_map_add_map(Maps, Identity);
3047 }
3048
3049 return Maps;
3050 }
3051
3052 /// Create a default-initialized PPCG GPU program.
3053 ///
3054 /// @returns A new gpu program description.
createPPCGProg(ppcg_scop * PPCGScop)3055 gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) {
3056
3057 if (!PPCGScop)
3058 return nullptr;
3059
3060 auto PPCGProg = isl_calloc_type(S->getIslCtx().get(), struct gpu_prog);
3061
3062 PPCGProg->ctx = S->getIslCtx().get();
3063 PPCGProg->scop = PPCGScop;
3064 PPCGProg->context = isl_set_copy(PPCGScop->context);
3065 PPCGProg->read = isl_union_map_copy(PPCGScop->reads);
3066 PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes);
3067 PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes);
3068 PPCGProg->tagged_must_kill =
3069 isl_union_map_copy(PPCGScop->tagged_must_kills);
3070 PPCGProg->to_inner = getArrayIdentity();
3071 PPCGProg->to_outer = getArrayIdentity();
3072 // TODO: verify that this assignment is correct.
3073 PPCGProg->any_to_outer = nullptr;
3074 PPCGProg->n_stmts = std::distance(S->begin(), S->end());
3075 PPCGProg->stmts = getStatements();
3076
3077 // Only consider arrays that have a non-empty extent.
3078 // Otherwise, this will cause us to consider the following kinds of
3079 // empty arrays:
3080 // 1. Invariant loads that are represented by SAI objects.
3081 // 2. Arrays with statically known zero size.
3082 auto ValidSAIsRange =
3083 make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool {
3084 return !getExtent(SAI).is_empty();
3085 });
3086 SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(),
3087 ValidSAIsRange.end());
3088
3089 PPCGProg->n_array =
3090 ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end());
3091 PPCGProg->array = isl_calloc_array(
3092 S->getIslCtx().get(), struct gpu_array_info, PPCGProg->n_array);
3093
3094 createArrays(PPCGProg, ValidSAIs);
3095
3096 PPCGProg->array_order = nullptr;
3097 collect_order_dependences(PPCGProg);
3098
3099 PPCGProg->may_persist = compute_may_persist(PPCGProg);
3100 return PPCGProg;
3101 }
3102
3103 struct PrintGPUUserData {
3104 struct cuda_info *CudaInfo;
3105 struct gpu_prog *PPCGProg;
3106 std::vector<ppcg_kernel *> Kernels;
3107 };
3108
3109 /// Print a user statement node in the host code.
3110 ///
3111 /// We use ppcg's printing facilities to print the actual statement and
3112 /// additionally build up a list of all kernels that are encountered in the
3113 /// host ast.
3114 ///
3115 /// @param P The printer to print to
3116 /// @param Options The printing options to use
3117 /// @param Node The node to print
3118 /// @param User A user pointer to carry additional data. This pointer is
3119 /// expected to be of type PrintGPUUserData.
3120 ///
3121 /// @returns A printer to which the output has been printed.
3122 static __isl_give isl_printer *
printHostUser(__isl_take isl_printer * P,__isl_take isl_ast_print_options * Options,__isl_take isl_ast_node * Node,void * User)3123 printHostUser(__isl_take isl_printer *P,
3124 __isl_take isl_ast_print_options *Options,
3125 __isl_take isl_ast_node *Node, void *User) {
3126 auto Data = (struct PrintGPUUserData *)User;
3127 auto Id = isl_ast_node_get_annotation(Node);
3128
3129 if (Id) {
3130 bool IsUser = !strcmp(isl_id_get_name(Id), "user");
3131
3132 // If this is a user statement, format it ourselves as ppcg would
3133 // otherwise try to call pet functionality that is not available in
3134 // Polly.
3135 if (IsUser) {
3136 P = isl_printer_start_line(P);
3137 P = isl_printer_print_ast_node(P, Node);
3138 P = isl_printer_end_line(P);
3139 isl_id_free(Id);
3140 isl_ast_print_options_free(Options);
3141 return P;
3142 }
3143
3144 auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id);
3145 isl_id_free(Id);
3146 Data->Kernels.push_back(Kernel);
3147 }
3148
3149 return print_host_user(P, Options, Node, User);
3150 }
3151
3152 /// Print C code corresponding to the control flow in @p Kernel.
3153 ///
3154 /// @param Kernel The kernel to print
printKernel(ppcg_kernel * Kernel)3155 void printKernel(ppcg_kernel *Kernel) {
3156 auto *P = isl_printer_to_str(S->getIslCtx().get());
3157 P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3158 auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3159 P = isl_ast_node_print(Kernel->tree, P, Options);
3160 char *String = isl_printer_get_str(P);
3161 outs() << String << "\n";
3162 free(String);
3163 isl_printer_free(P);
3164 }
3165
3166 /// Print C code corresponding to the GPU code described by @p Tree.
3167 ///
3168 /// @param Tree An AST describing GPU code
3169 /// @param PPCGProg The PPCG program from which @Tree has been constructed.
printGPUTree(isl_ast_node * Tree,gpu_prog * PPCGProg)3170 void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) {
3171 auto *P = isl_printer_to_str(S->getIslCtx().get());
3172 P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3173
3174 PrintGPUUserData Data;
3175 Data.PPCGProg = PPCGProg;
3176
3177 auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3178 Options =
3179 isl_ast_print_options_set_print_user(Options, printHostUser, &Data);
3180 P = isl_ast_node_print(Tree, P, Options);
3181 char *String = isl_printer_get_str(P);
3182 outs() << "# host\n";
3183 outs() << String << "\n";
3184 free(String);
3185 isl_printer_free(P);
3186
3187 for (auto Kernel : Data.Kernels) {
3188 outs() << "# kernel" << Kernel->id << "\n";
3189 printKernel(Kernel);
3190 }
3191 }
3192
3193 // Generate a GPU program using PPCG.
3194 //
3195 // GPU mapping consists of multiple steps:
3196 //
3197 // 1) Compute new schedule for the program.
3198 // 2) Map schedule to GPU (TODO)
3199 // 3) Generate code for new schedule (TODO)
3200 //
3201 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3202 // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3203 // strategy directly from this pass.
generateGPU(ppcg_scop * PPCGScop,gpu_prog * PPCGProg)3204 gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) {
3205
3206 auto PPCGGen = isl_calloc_type(S->getIslCtx().get(), struct gpu_gen);
3207
3208 PPCGGen->ctx = S->getIslCtx().get();
3209 PPCGGen->options = PPCGScop->options;
3210 PPCGGen->print = nullptr;
3211 PPCGGen->print_user = nullptr;
3212 PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt;
3213 PPCGGen->prog = PPCGProg;
3214 PPCGGen->tree = nullptr;
3215 PPCGGen->types.n = 0;
3216 PPCGGen->types.name = nullptr;
3217 PPCGGen->sizes = nullptr;
3218 PPCGGen->used_sizes = nullptr;
3219 PPCGGen->kernel_id = 0;
3220
3221 // Set scheduling strategy to same strategy PPCG is using.
3222 isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true);
3223 isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true);
3224 isl_options_set_schedule_whole_component(PPCGGen->ctx, false);
3225
3226 isl_schedule *Schedule = get_schedule(PPCGGen);
3227
3228 int has_permutable = has_any_permutable_node(Schedule);
3229
3230 Schedule =
3231 isl_schedule_align_params(Schedule, S->getFullParamSpace().release());
3232
3233 if (!has_permutable || has_permutable < 0) {
3234 Schedule = isl_schedule_free(Schedule);
3235 LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3236 << " does not have permutable bands. Bailing out\n";);
3237 } else {
3238 const bool CreateTransferToFromDevice = !PollyManagedMemory;
3239 Schedule = map_to_device(PPCGGen, Schedule, CreateTransferToFromDevice);
3240 PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule));
3241 }
3242
3243 if (DumpSchedule) {
3244 isl_printer *P = isl_printer_to_str(S->getIslCtx().get());
3245 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
3246 P = isl_printer_print_str(P, "Schedule\n");
3247 P = isl_printer_print_str(P, "========\n");
3248 if (Schedule)
3249 P = isl_printer_print_schedule(P, Schedule);
3250 else
3251 P = isl_printer_print_str(P, "No schedule found\n");
3252
3253 outs() << isl_printer_get_str(P) << "\n";
3254 isl_printer_free(P);
3255 }
3256
3257 if (DumpCode) {
3258 outs() << "Code\n";
3259 outs() << "====\n";
3260 if (PPCGGen->tree)
3261 printGPUTree(PPCGGen->tree, PPCGProg);
3262 else
3263 outs() << "No code generated\n";
3264 }
3265
3266 isl_schedule_free(Schedule);
3267
3268 return PPCGGen;
3269 }
3270
3271 /// Free gpu_gen structure.
3272 ///
3273 /// @param PPCGGen The ppcg_gen object to free.
freePPCGGen(gpu_gen * PPCGGen)3274 void freePPCGGen(gpu_gen *PPCGGen) {
3275 isl_ast_node_free(PPCGGen->tree);
3276 isl_union_map_free(PPCGGen->sizes);
3277 isl_union_map_free(PPCGGen->used_sizes);
3278 free(PPCGGen);
3279 }
3280
3281 /// Free the options in the ppcg scop structure.
3282 ///
3283 /// ppcg is not freeing these options for us. To avoid leaks we do this
3284 /// ourselves.
3285 ///
3286 /// @param PPCGScop The scop referencing the options to free.
freeOptions(ppcg_scop * PPCGScop)3287 void freeOptions(ppcg_scop *PPCGScop) {
3288 free(PPCGScop->options->debug);
3289 PPCGScop->options->debug = nullptr;
3290 free(PPCGScop->options);
3291 PPCGScop->options = nullptr;
3292 }
3293
3294 /// Approximate the number of points in the set.
3295 ///
3296 /// This function returns an ast expression that overapproximates the number
3297 /// of points in an isl set through the rectangular hull surrounding this set.
3298 ///
3299 /// @param Set The set to count.
3300 /// @param Build The isl ast build object to use for creating the ast
3301 /// expression.
3302 ///
3303 /// @returns An approximation of the number of points in the set.
approxPointsInSet(__isl_take isl_set * Set,__isl_keep isl_ast_build * Build)3304 __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set,
3305 __isl_keep isl_ast_build *Build) {
3306
3307 isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1);
3308 auto *Expr = isl_ast_expr_from_val(isl_val_copy(One));
3309
3310 isl_space *Space = isl_set_get_space(Set);
3311 Space = isl_space_params(Space);
3312 auto *Univ = isl_set_universe(Space);
3313 isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One);
3314
3315 for (long i = 0, n = isl_set_dim(Set, isl_dim_set); i < n; i++) {
3316 isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i);
3317 isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i);
3318 isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min);
3319 DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff));
3320 auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize);
3321 Expr = isl_ast_expr_mul(Expr, DimSizeExpr);
3322 }
3323
3324 isl_set_free(Set);
3325 isl_pw_aff_free(OneAff);
3326
3327 return Expr;
3328 }
3329
3330 /// Approximate a number of dynamic instructions executed by a given
3331 /// statement.
3332 ///
3333 /// @param Stmt The statement for which to compute the number of dynamic
3334 /// instructions.
3335 /// @param Build The isl ast build object to use for creating the ast
3336 /// expression.
3337 /// @returns An approximation of the number of dynamic instructions executed
3338 /// by @p Stmt.
approxDynamicInst(ScopStmt & Stmt,__isl_keep isl_ast_build * Build)3339 __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt,
3340 __isl_keep isl_ast_build *Build) {
3341 auto Iterations = approxPointsInSet(Stmt.getDomain().release(), Build);
3342
3343 long InstCount = 0;
3344
3345 if (Stmt.isBlockStmt()) {
3346 auto *BB = Stmt.getBasicBlock();
3347 InstCount = std::distance(BB->begin(), BB->end());
3348 } else {
3349 auto *R = Stmt.getRegion();
3350
3351 for (auto *BB : R->blocks()) {
3352 InstCount += std::distance(BB->begin(), BB->end());
3353 }
3354 }
3355
3356 isl_val *InstVal = isl_val_int_from_si(S->getIslCtx().get(), InstCount);
3357 auto *InstExpr = isl_ast_expr_from_val(InstVal);
3358 return isl_ast_expr_mul(InstExpr, Iterations);
3359 }
3360
3361 /// Approximate dynamic instructions executed in scop.
3362 ///
3363 /// @param S The scop for which to approximate dynamic instructions.
3364 /// @param Build The isl ast build object to use for creating the ast
3365 /// expression.
3366 /// @returns An approximation of the number of dynamic instructions executed
3367 /// in @p S.
3368 __isl_give isl_ast_expr *
getNumberOfIterations(Scop & S,__isl_keep isl_ast_build * Build)3369 getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) {
3370 isl_ast_expr *Instructions;
3371
3372 isl_val *Zero = isl_val_int_from_si(S.getIslCtx().get(), 0);
3373 Instructions = isl_ast_expr_from_val(Zero);
3374
3375 for (ScopStmt &Stmt : S) {
3376 isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build);
3377 Instructions = isl_ast_expr_add(Instructions, StmtInstructions);
3378 }
3379 return Instructions;
3380 }
3381
3382 /// Create a check that ensures sufficient compute in scop.
3383 ///
3384 /// @param S The scop for which to ensure sufficient compute.
3385 /// @param Build The isl ast build object to use for creating the ast
3386 /// expression.
3387 /// @returns An expression that evaluates to TRUE in case of sufficient
3388 /// compute and to FALSE, otherwise.
3389 __isl_give isl_ast_expr *
createSufficientComputeCheck(Scop & S,__isl_keep isl_ast_build * Build)3390 createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) {
3391 auto Iterations = getNumberOfIterations(S, Build);
3392 auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx().get(), MinCompute);
3393 auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal);
3394 return isl_ast_expr_ge(Iterations, MinComputeExpr);
3395 }
3396
3397 /// Check if the basic block contains a function we cannot codegen for GPU
3398 /// kernels.
3399 ///
3400 /// If this basic block does something with a `Function` other than calling
3401 /// a function that we support in a kernel, return true.
containsInvalidKernelFunctionInBlock(const BasicBlock * BB,bool AllowCUDALibDevice)3402 bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB,
3403 bool AllowCUDALibDevice) {
3404 for (const Instruction &Inst : *BB) {
3405 const CallInst *Call = dyn_cast<CallInst>(&Inst);
3406 if (Call && isValidFunctionInKernel(Call->getCalledFunction(),
3407 AllowCUDALibDevice))
3408 continue;
3409
3410 for (Value *Op : Inst.operands())
3411 // Look for (<func-type>*) among operands of Inst
3412 if (auto PtrTy = dyn_cast<PointerType>(Op->getType())) {
3413 if (isa<FunctionType>(PtrTy->getElementType())) {
3414 LLVM_DEBUG(dbgs()
3415 << Inst << " has illegal use of function in kernel.\n");
3416 return true;
3417 }
3418 }
3419 }
3420 return false;
3421 }
3422
3423 /// Return whether the Scop S uses functions in a way that we do not support.
containsInvalidKernelFunction(const Scop & S,bool AllowCUDALibDevice)3424 bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) {
3425 for (auto &Stmt : S) {
3426 if (Stmt.isBlockStmt()) {
3427 if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(),
3428 AllowCUDALibDevice))
3429 return true;
3430 } else {
3431 assert(Stmt.isRegionStmt() &&
3432 "Stmt was neither block nor region statement");
3433 for (const BasicBlock *BB : Stmt.getRegion()->blocks())
3434 if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice))
3435 return true;
3436 }
3437 }
3438 return false;
3439 }
3440
3441 /// Generate code for a given GPU AST described by @p Root.
3442 ///
3443 /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3444 /// @param Prog The GPU Program to generate code for.
generateCode(__isl_take isl_ast_node * Root,gpu_prog * Prog)3445 void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) {
3446 ScopAnnotator Annotator;
3447 Annotator.buildAliasScopes(*S);
3448
3449 Region *R = &S->getRegion();
3450
3451 simplifyRegion(R, DT, LI, RI);
3452
3453 BasicBlock *EnteringBB = R->getEnteringBlock();
3454
3455 PollyIRBuilder Builder(EnteringBB->getContext(), ConstantFolder(),
3456 IRInserter(Annotator));
3457 Builder.SetInsertPoint(EnteringBB->getTerminator());
3458
3459 // Only build the run-time condition and parameters _after_ having
3460 // introduced the conditional branch. This is important as the conditional
3461 // branch will guard the original scop from new induction variables that
3462 // the SCEVExpander may introduce while code generating the parameters and
3463 // which may introduce scalar dependences that prevent us from correctly
3464 // code generating this scop.
3465 BBPair StartExitBlocks;
3466 BranchInst *CondBr = nullptr;
3467 std::tie(StartExitBlocks, CondBr) =
3468 executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI);
3469 BasicBlock *StartBlock = std::get<0>(StartExitBlocks);
3470
3471 assert(CondBr && "CondBr not initialized by executeScopConditionally");
3472
3473 GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S,
3474 StartBlock, Prog, Runtime, Architecture);
3475
3476 // TODO: Handle LICM
3477 auto SplitBlock = StartBlock->getSinglePredecessor();
3478 Builder.SetInsertPoint(SplitBlock->getTerminator());
3479
3480 isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx().get());
3481 isl_ast_expr *Condition = IslAst::buildRunCondition(*S, Build);
3482 isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build);
3483 Condition = isl_ast_expr_and(Condition, SufficientCompute);
3484 isl_ast_build_free(Build);
3485
3486 // preload invariant loads. Note: This should happen before the RTC
3487 // because the RTC may depend on values that are invariant load hoisted.
3488 if (!NodeBuilder.preloadInvariantLoads()) {
3489 // Patch the introduced branch condition to ensure that we always execute
3490 // the original SCoP.
3491 auto *FalseI1 = Builder.getFalse();
3492 auto *SplitBBTerm = Builder.GetInsertBlock()->getTerminator();
3493 SplitBBTerm->setOperand(0, FalseI1);
3494
3495 LLVM_DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3496 S->getFunction().getName() +
3497 " | Scop Region: " + S->getNameStr());
3498 // adjust the dominator tree accordingly.
3499 auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3500 assert(ExitingBlock);
3501 auto *MergeBlock = ExitingBlock->getUniqueSuccessor();
3502 assert(MergeBlock);
3503 polly::markBlockUnreachable(*StartBlock, Builder);
3504 polly::markBlockUnreachable(*ExitingBlock, Builder);
3505 auto *ExitingBB = S->getExitingBlock();
3506 assert(ExitingBB);
3507
3508 DT->changeImmediateDominator(MergeBlock, ExitingBB);
3509 DT->eraseNode(ExitingBlock);
3510 isl_ast_expr_free(Condition);
3511 isl_ast_node_free(Root);
3512 } else {
3513
3514 if (polly::PerfMonitoring) {
3515 PerfMonitor P(*S, EnteringBB->getParent()->getParent());
3516 P.initialize();
3517 P.insertRegionStart(SplitBlock->getTerminator());
3518
3519 // TODO: actually think if this is the correct exiting block to place
3520 // the `end` performance marker. Invariant load hoisting changes
3521 // the CFG in a way that I do not precisely understand, so I
3522 // (Siddharth<siddu.druid@gmail.com>) should come back to this and
3523 // think about which exiting block to use.
3524 auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3525 assert(ExitingBlock);
3526 BasicBlock *MergeBlock = ExitingBlock->getUniqueSuccessor();
3527 P.insertRegionEnd(MergeBlock->getTerminator());
3528 }
3529
3530 NodeBuilder.addParameters(S->getContext().release());
3531 Value *RTC = NodeBuilder.createRTC(Condition);
3532 Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC);
3533
3534 Builder.SetInsertPoint(&*StartBlock->begin());
3535
3536 NodeBuilder.create(Root);
3537 }
3538
3539 /// In case a sequential kernel has more surrounding loops as any parallel
3540 /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3541 /// point in running it on a GPU.
3542 if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel)
3543 CondBr->setOperand(0, Builder.getFalse());
3544
3545 if (!NodeBuilder.BuildSuccessful)
3546 CondBr->setOperand(0, Builder.getFalse());
3547 }
3548
runOnScop(Scop & CurrentScop)3549 bool runOnScop(Scop &CurrentScop) override {
3550 S = &CurrentScop;
3551 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3552 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3553 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3554 DL = &S->getRegion().getEntry()->getModule()->getDataLayout();
3555 RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
3556
3557 LLVM_DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S)
3558 << " | loop depth: " << S->getMaxLoopDepth() << "\n");
3559
3560 // We currently do not support functions other than intrinsics inside
3561 // kernels, as code generation will need to offload function calls to the
3562 // kernel. This may lead to a kernel trying to call a function on the host.
3563 // This also allows us to prevent codegen from trying to take the
3564 // address of an intrinsic function to send to the kernel.
3565 if (containsInvalidKernelFunction(CurrentScop,
3566 Architecture == GPUArch::NVPTX64)) {
3567 LLVM_DEBUG(
3568 dbgs() << getUniqueScopName(S)
3569 << " contains function which cannot be materialised in a GPU "
3570 "kernel. Bailing out.\n";);
3571 return false;
3572 }
3573
3574 auto PPCGScop = createPPCGScop();
3575 auto PPCGProg = createPPCGProg(PPCGScop);
3576 auto PPCGGen = generateGPU(PPCGScop, PPCGProg);
3577
3578 if (PPCGGen->tree) {
3579 generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg);
3580 CurrentScop.markAsToBeSkipped();
3581 } else {
3582 LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3583 << " has empty PPCGGen->tree. Bailing out.\n");
3584 }
3585
3586 freeOptions(PPCGScop);
3587 freePPCGGen(PPCGGen);
3588 gpu_prog_free(PPCGProg);
3589 ppcg_scop_free(PPCGScop);
3590
3591 return true;
3592 }
3593
printScop(raw_ostream &,Scop &) const3594 void printScop(raw_ostream &, Scop &) const override {}
3595
getAnalysisUsage(AnalysisUsage & AU) const3596 void getAnalysisUsage(AnalysisUsage &AU) const override {
3597 ScopPass::getAnalysisUsage(AU);
3598
3599 AU.addRequired<DominatorTreeWrapperPass>();
3600 AU.addRequired<RegionInfoPass>();
3601 AU.addRequired<ScalarEvolutionWrapperPass>();
3602 AU.addRequired<ScopDetectionWrapperPass>();
3603 AU.addRequired<ScopInfoRegionPass>();
3604 AU.addRequired<LoopInfoWrapperPass>();
3605
3606 // FIXME: We do not yet add regions for the newly generated code to the
3607 // region tree.
3608 }
3609 };
3610 } // namespace
3611
3612 char PPCGCodeGeneration::ID = 1;
3613
createPPCGCodeGenerationPass(GPUArch Arch,GPURuntime Runtime)3614 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) {
3615 PPCGCodeGeneration *generator = new PPCGCodeGeneration();
3616 generator->Runtime = Runtime;
3617 generator->Architecture = Arch;
3618 return generator;
3619 }
3620
3621 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg",
3622 "Polly - Apply PPCG translation to SCOP", false, false)
3623 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
3624 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
3625 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
3626 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
3627 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
3628 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
3629 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg",
3630 "Polly - Apply PPCG translation to SCOP", false, false)
3631