1 /*
2 * Copyright 2010-2012, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Assert.h"
18 #include "Log.h"
19 #include "RSTransforms.h"
20 #include "RSUtils.h"
21 #include "rsDefines.h"
22
23 #include "bcc/Compiler.h"
24 #include "bcc/CompilerConfig.h"
25 #include "bcc/Config.h"
26 #include "bcc/Script.h"
27 #include "bcc/Source.h"
28 #include "bcinfo/MetadataExtractor.h"
29
30 #include <llvm/Analysis/Passes.h>
31 #include <llvm/Analysis/TargetTransformInfo.h>
32 #include <llvm/CodeGen/RegAllocRegistry.h>
33 #include <llvm/IR/LegacyPassManager.h>
34 #include <llvm/IR/Module.h>
35 #include <llvm/Support/TargetRegistry.h>
36 #include <llvm/Support/raw_ostream.h>
37 #include <llvm/IR/DataLayout.h>
38 #include <llvm/Target/TargetSubtargetInfo.h>
39 #include <llvm/Target/TargetMachine.h>
40 #include <llvm/Transforms/IPO.h>
41 #include <llvm/Transforms/IPO/PassManagerBuilder.h>
42 #include <llvm/Transforms/Scalar.h>
43 #include <llvm/Transforms/Vectorize.h>
44
45 #include <string>
46 #include <set>
47
48 namespace {
49
50 // Name of metadata node where list of exported types resides
51 // (should be synced with slang_rs_metadata.h)
52 static const llvm::StringRef ExportedTypeMetadataName = "#rs_export_type";
53
54 // Every exported struct type must have the same layout according to
55 // the Module's DataLayout that it does according to the
56 // TargetMachine's DataLayout -- that is, the front end (represented
57 // by Module) and back end (represented by TargetMachine) must agree.
validateLayoutOfExportedTypes(const llvm::Module & module,const llvm::DataLayout & moduleDataLayout,const llvm::DataLayout & targetDataLayout)58 bool validateLayoutOfExportedTypes(const llvm::Module &module,
59 const llvm::DataLayout &moduleDataLayout,
60 const llvm::DataLayout &targetDataLayout) {
61 if (moduleDataLayout == targetDataLayout)
62 return true;
63
64 const llvm::NamedMDNode *const exportedTypesMD =
65 module.getNamedMetadata(ExportedTypeMetadataName);
66 if (!exportedTypesMD)
67 return true;
68
69 bool allOk = true;
70 for (const llvm::MDNode *const exportedTypeMD : exportedTypesMD->operands()) {
71 bccAssert(exportedTypeMD->getNumOperands() == 1);
72
73 // The name of the type in LLVM is the name of the type in the
74 // metadata with "struct." prepended.
75 std::string exportedTypeName =
76 "struct." +
77 llvm::cast<llvm::MDString>(exportedTypeMD->getOperand(0))->getString().str();
78
79 llvm::StructType *const exportedType = module.getTypeByName(exportedTypeName);
80
81 if (!exportedType) {
82 // presumably this means the type got optimized away
83 continue;
84 }
85
86 const llvm::StructLayout *const moduleStructLayout = moduleDataLayout.getStructLayout(exportedType);
87 const llvm::StructLayout *const targetStructLayout = targetDataLayout.getStructLayout(exportedType);
88
89 if (moduleStructLayout->getSizeInBits() != targetStructLayout->getSizeInBits()) {
90 ALOGE("%s: getSizeInBits() does not match (%u, %u)", exportedTypeName.c_str(),
91 unsigned(moduleStructLayout->getSizeInBits()), unsigned(targetStructLayout->getSizeInBits()));
92 allOk = false;
93 }
94
95 // We deliberately do not check alignment of the struct as a whole -- the explicit padding
96 // from slang doesn't force the alignment.
97
98 for (unsigned elementCount = exportedType->getNumElements(), elementIdx = 0;
99 elementIdx < elementCount; ++elementIdx) {
100 if (moduleStructLayout->getElementOffsetInBits(elementIdx) !=
101 targetStructLayout->getElementOffsetInBits(elementIdx)) {
102 ALOGE("%s: getElementOffsetInBits(%u) does not match (%u, %u)",
103 exportedTypeName.c_str(), elementIdx,
104 unsigned(moduleStructLayout->getElementOffsetInBits(elementIdx)),
105 unsigned(targetStructLayout->getElementOffsetInBits(elementIdx)));
106 allOk = false;
107 }
108 }
109 }
110
111 return allOk;
112 }
113
114 } // end unnamed namespace
115
116 using namespace bcc;
117
GetErrorString(enum ErrorCode pErrCode)118 const char *Compiler::GetErrorString(enum ErrorCode pErrCode) {
119 switch (pErrCode) {
120 case kSuccess:
121 return "Successfully compiled.";
122 case kInvalidConfigNoTarget:
123 return "Invalid compiler config supplied (getTarget() returns nullptr.) "
124 "(missing call to CompilerConfig::initialize()?)";
125 case kErrCreateTargetMachine:
126 return "Failed to create llvm::TargetMachine.";
127 case kErrSwitchTargetMachine:
128 return "Failed to switch llvm::TargetMachine.";
129 case kErrNoTargetMachine:
130 return "Failed to compile the script since there's no available "
131 "TargetMachine. (missing call to Compiler::config()?)";
132 case kErrMaterialization:
133 return "Failed to materialize the module.";
134 case kErrInvalidOutputFileState:
135 return "Supplied output file was invalid (in the error state.)";
136 case kErrPrepareOutput:
137 return "Failed to prepare file for output.";
138 case kPrepareCodeGenPass:
139 return "Failed to construct pass list for code-generation.";
140 case kErrCustomPasses:
141 return "Error occurred while adding custom passes.";
142 case kErrInvalidSource:
143 return "Error loading input bitcode";
144 case kIllegalGlobalFunction:
145 return "Use of undefined external function";
146 case kErrInvalidTargetMachine:
147 return "Invalid/unexpected llvm::TargetMachine.";
148 case kErrInvalidLayout:
149 return "Invalid layout (RenderScript ABI and native ABI are incompatible)";
150 }
151
152 // This assert should never be reached as the compiler verifies that the
153 // above switch coveres all enum values.
154 bccAssert(false && "Unknown error code encountered");
155 return "";
156 }
157
158 //===----------------------------------------------------------------------===//
159 // Instance Methods
160 //===----------------------------------------------------------------------===//
Compiler()161 Compiler::Compiler() : mTarget(nullptr), mEnableOpt(true) {
162 return;
163 }
164
Compiler(const CompilerConfig & pConfig)165 Compiler::Compiler(const CompilerConfig &pConfig) : mTarget(nullptr),
166 mEnableOpt(true) {
167 const std::string &triple = pConfig.getTriple();
168
169 enum ErrorCode err = config(pConfig);
170 if (err != kSuccess) {
171 ALOGE("%s (%s, features: %s)", GetErrorString(err),
172 triple.c_str(), pConfig.getFeatureString().c_str());
173 return;
174 }
175
176 return;
177 }
178
config(const CompilerConfig & pConfig)179 enum Compiler::ErrorCode Compiler::config(const CompilerConfig &pConfig) {
180 if (pConfig.getTarget() == nullptr) {
181 return kInvalidConfigNoTarget;
182 }
183
184 llvm::TargetMachine *new_target =
185 (pConfig.getTarget())->createTargetMachine(pConfig.getTriple(),
186 pConfig.getCPU(),
187 pConfig.getFeatureString(),
188 pConfig.getTargetOptions(),
189 pConfig.getRelocationModel(),
190 pConfig.getCodeModel(),
191 pConfig.getOptimizationLevel());
192
193 if (new_target == nullptr) {
194 return ((mTarget != nullptr) ? kErrSwitchTargetMachine :
195 kErrCreateTargetMachine);
196 }
197
198 // Replace the old TargetMachine.
199 delete mTarget;
200 mTarget = new_target;
201
202 // Adjust register allocation policy according to the optimization level.
203 // createFastRegisterAllocator: fast but bad quality
204 // createLinearScanRegisterAllocator: not so fast but good quality
205 if ((pConfig.getOptimizationLevel() == llvm::CodeGenOpt::None)) {
206 llvm::RegisterRegAlloc::setDefault(llvm::createFastRegisterAllocator);
207 } else {
208 llvm::RegisterRegAlloc::setDefault(llvm::createGreedyRegisterAllocator);
209 }
210
211 return kSuccess;
212 }
213
~Compiler()214 Compiler::~Compiler() {
215 delete mTarget;
216 }
217
218
219 // This function has complete responsibility for creating and executing the
220 // exact list of compiler passes.
runPasses(Script & script,llvm::raw_pwrite_stream & pResult)221 enum Compiler::ErrorCode Compiler::runPasses(Script &script,
222 llvm::raw_pwrite_stream &pResult) {
223 // Pass manager for link-time optimization
224 llvm::legacy::PassManager transformPasses;
225
226 // Empty MCContext.
227 llvm::MCContext *mc_context = nullptr;
228
229 transformPasses.add(
230 createTargetTransformInfoWrapperPass(mTarget->getTargetIRAnalysis()));
231
232 // Add some initial custom passes.
233 addInvokeHelperPass(transformPasses);
234 addExpandKernelPass(transformPasses);
235 addDebugInfoPass(script, transformPasses);
236 addInvariantPass(transformPasses);
237 if (mTarget->getOptLevel() != llvm::CodeGenOpt::None) {
238 if (!addInternalizeSymbolsPass(script, transformPasses))
239 return kErrCustomPasses;
240 }
241 addGlobalInfoPass(script, transformPasses);
242
243 if (mTarget->getOptLevel() == llvm::CodeGenOpt::None) {
244 transformPasses.add(llvm::createGlobalOptimizerPass());
245 transformPasses.add(llvm::createConstantMergePass());
246
247 } else {
248 // FIXME: Figure out which passes should be executed.
249 llvm::PassManagerBuilder Builder;
250 Builder.Inliner = llvm::createFunctionInliningPass();
251 Builder.populateLTOPassManager(transformPasses);
252
253 /* FIXME: Reenable autovectorization after rebase.
254 bug 19324423
255 // Add vectorization passes after LTO passes are in
256 // additional flag: -unroll-runtime
257 transformPasses.add(llvm::createLoopUnrollPass(-1, 16, 0, 1));
258 // Need to pass appropriate flags here: -scalarize-load-store
259 transformPasses.add(llvm::createScalarizerPass());
260 transformPasses.add(llvm::createCFGSimplificationPass());
261 transformPasses.add(llvm::createScopedNoAliasAAPass());
262 transformPasses.add(llvm::createScalarEvolutionAliasAnalysisPass());
263 // additional flags: -slp-vectorize-hor -slp-vectorize-hor-store (unnecessary?)
264 transformPasses.add(llvm::createSLPVectorizerPass());
265 transformPasses.add(llvm::createDeadCodeEliminationPass());
266 transformPasses.add(llvm::createInstructionCombiningPass());
267 */
268 }
269
270 // These passes have to come after LTO, since we don't want to examine
271 // functions that are never actually called.
272 if (llvm::Triple(getTargetMachine().getTargetTriple()).getArch() == llvm::Triple::x86_64 ||
273 llvm::Triple(getTargetMachine().getTargetTriple()).getArch() == llvm::Triple::mips64el)
274 transformPasses.add(createRSX86_64CallConvPass()); // Add pass to correct calling convention for X86-64 and mips64.
275 transformPasses.add(createRSIsThreadablePass()); // Add pass to mark script as threadable.
276
277 // RSEmbedInfoPass needs to come after we have scanned for non-threadable
278 // functions.
279 if (script.getEmbedInfo())
280 transformPasses.add(createRSEmbedInfoPass());
281
282 // Execute the passes.
283 transformPasses.run(script.getSource().getModule());
284
285 // Run backend separately to avoid interference between debug metadata
286 // generation and backend initialization.
287 llvm::legacy::PassManager codeGenPasses;
288
289 // Add passes to the pass manager to emit machine code through MC layer.
290 if (mTarget->addPassesToEmitMC(codeGenPasses, mc_context, pResult,
291 /* DisableVerify */false)) {
292 return kPrepareCodeGenPass;
293 }
294
295 // Execute the passes.
296 codeGenPasses.run(script.getSource().getModule());
297
298 return kSuccess;
299 }
300
compile(Script & script,llvm::raw_pwrite_stream & pResult,llvm::raw_ostream * IRStream)301 enum Compiler::ErrorCode Compiler::compile(Script &script,
302 llvm::raw_pwrite_stream &pResult,
303 llvm::raw_ostream *IRStream) {
304 llvm::Module &module = script.getSource().getModule();
305 enum ErrorCode err;
306
307 if (mTarget == nullptr) {
308 return kErrNoTargetMachine;
309 }
310
311 const std::string &triple = module.getTargetTriple();
312 const llvm::DataLayout dl = getTargetMachine().createDataLayout();
313 unsigned int pointerSize = dl.getPointerSizeInBits();
314 if (triple == "armv7-none-linux-gnueabi") {
315 if (pointerSize != 32) {
316 return kErrInvalidSource;
317 }
318 } else if (triple == "aarch64-none-linux-gnueabi") {
319 if (pointerSize != 64) {
320 return kErrInvalidSource;
321 }
322 } else {
323 return kErrInvalidSource;
324 }
325
326 if (script.isStructExplicitlyPaddedBySlang()) {
327 if (!validateLayoutOfExportedTypes(module, module.getDataLayout(), dl))
328 return kErrInvalidLayout;
329 } else {
330 if (getTargetMachine().getTargetTriple().getArch() == llvm::Triple::x86) {
331 // Detect and fail if TargetMachine datalayout is different than what we
332 // expect. This is to detect changes in default target layout for x86 and
333 // update X86_CUSTOM_DL_STRING in include/bcc/Config/Config.h appropriately.
334 if (dl.getStringRepresentation().compare(X86_DEFAULT_DL_STRING) != 0) {
335 return kErrInvalidTargetMachine;
336 }
337 }
338 }
339
340 // Sanitize module's target information.
341 module.setTargetTriple(getTargetMachine().getTargetTriple().str());
342 module.setDataLayout(getTargetMachine().createDataLayout());
343
344 // Materialize the bitcode module.
345 if (module.getMaterializer() != nullptr) {
346 // A module with non-null materializer means that it is a lazy-load module.
347 // Materialize it now. This function returns false when the materialization
348 // is successful.
349 std::error_code ec = module.materializeAll();
350 if (ec) {
351 ALOGE("Failed to materialize the module `%s'! (%s)",
352 module.getModuleIdentifier().c_str(), ec.message().c_str());
353 return kErrMaterialization;
354 }
355 }
356
357 if ((err = runPasses(script, pResult)) != kSuccess) {
358 return err;
359 }
360
361 if (IRStream) {
362 *IRStream << module;
363 }
364
365 return kSuccess;
366 }
367
addInternalizeSymbolsPass(Script & script,llvm::legacy::PassManager & pPM)368 bool Compiler::addInternalizeSymbolsPass(Script &script, llvm::legacy::PassManager &pPM) {
369 // Add a pass to internalize the symbols that don't need to have global
370 // visibility.
371 llvm::Module &module = script.getSource().getModule();
372 bcinfo::MetadataExtractor me(&module);
373 if (!me.extract()) {
374 bccAssert(false && "Could not extract metadata for module!");
375 return false;
376 }
377
378 // Set of symbols that should not be internalized.
379 std::set<std::string> export_symbols;
380
381 const char *sf[] = {
382 kRoot, // Graphics drawing function or compute kernel.
383 kInit, // Initialization routine called implicitly on startup.
384 kRsDtor, // Static global destructor for a script instance.
385 kRsInfo, // Variable containing string of RS metadata info.
386 kRsGlobalEntries, // Optional number of global variables.
387 kRsGlobalNames, // Optional global variable name info.
388 kRsGlobalAddresses, // Optional global variable address info.
389 kRsGlobalSizes, // Optional global variable size info.
390 kRsGlobalProperties, // Optional global variable properties.
391 nullptr // Must be nullptr-terminated.
392 };
393 const char **special_functions = sf;
394 // Special RS functions should always be global symbols.
395 while (*special_functions != nullptr) {
396 export_symbols.insert(*special_functions);
397 special_functions++;
398 }
399
400 // Visibility of symbols appeared in rs_export_var and rs_export_func should
401 // also be preserved.
402 size_t exportVarCount = me.getExportVarCount();
403 size_t exportFuncCount = me.getExportFuncCount();
404 size_t exportForEachCount = me.getExportForEachSignatureCount();
405 size_t exportReduceCount = me.getExportReduceCount();
406 const char **exportVarNameList = me.getExportVarNameList();
407 const char **exportFuncNameList = me.getExportFuncNameList();
408 const char **exportForEachNameList = me.getExportForEachNameList();
409 const bcinfo::MetadataExtractor::Reduce *exportReduceList = me.getExportReduceList();
410 size_t i;
411
412 for (i = 0; i < exportVarCount; ++i) {
413 export_symbols.insert(exportVarNameList[i]);
414 }
415
416 for (i = 0; i < exportFuncCount; ++i) {
417 export_symbols.insert(exportFuncNameList[i]);
418 }
419
420 // Expanded foreach functions should not be internalized; nor should
421 // general reduction initializer, combiner, and outconverter
422 // functions. keep_funcs keeps the names of these functions around
423 // until createInternalizePass() is finished making its own copy of
424 // the visible symbols.
425 std::vector<std::string> keep_funcs;
426 keep_funcs.reserve(exportForEachCount + exportReduceCount*4);
427
428 for (i = 0; i < exportForEachCount; ++i) {
429 keep_funcs.push_back(std::string(exportForEachNameList[i]) + ".expand");
430 }
431 auto keepFuncsPushBackIfPresent = [&keep_funcs](const char *Name) {
432 if (Name) keep_funcs.push_back(Name);
433 };
434 for (i = 0; i < exportReduceCount; ++i) {
435 keep_funcs.push_back(std::string(exportReduceList[i].mAccumulatorName) + ".expand");
436 keepFuncsPushBackIfPresent(exportReduceList[i].mInitializerName);
437 if (exportReduceList[i].mCombinerName != nullptr) {
438 keep_funcs.push_back(exportReduceList[i].mCombinerName);
439 } else {
440 keep_funcs.push_back(nameReduceCombinerFromAccumulator(exportReduceList[i].mAccumulatorName));
441 }
442 keepFuncsPushBackIfPresent(exportReduceList[i].mOutConverterName);
443 }
444
445 for (auto &symbol_name : keep_funcs) {
446 export_symbols.insert(symbol_name);
447 }
448
449 auto IsExportedSymbol = [=](const llvm::GlobalValue &GV) {
450 return export_symbols.count(GV.getName()) > 0;
451 };
452
453 pPM.add(llvm::createInternalizePass(IsExportedSymbol));
454
455 return true;
456 }
457
addInvokeHelperPass(llvm::legacy::PassManager & pPM)458 void Compiler::addInvokeHelperPass(llvm::legacy::PassManager &pPM) {
459 llvm::Triple arch(getTargetMachine().getTargetTriple());
460 if (arch.isArch64Bit()) {
461 pPM.add(createRSInvokeHelperPass());
462 }
463 }
464
addDebugInfoPass(Script & script,llvm::legacy::PassManager & pPM)465 void Compiler::addDebugInfoPass(Script &script, llvm::legacy::PassManager &pPM) {
466 if (script.getSource().getDebugInfoEnabled())
467 pPM.add(createRSAddDebugInfoPass());
468 }
469
addExpandKernelPass(llvm::legacy::PassManager & pPM)470 void Compiler::addExpandKernelPass(llvm::legacy::PassManager &pPM) {
471 // Expand ForEach and reduce on CPU path to reduce launch overhead.
472 bool pEnableStepOpt = true;
473 pPM.add(createRSKernelExpandPass(pEnableStepOpt));
474 }
475
addGlobalInfoPass(Script & script,llvm::legacy::PassManager & pPM)476 void Compiler::addGlobalInfoPass(Script &script, llvm::legacy::PassManager &pPM) {
477 // Add additional information about RS global variables inside the Module.
478 if (script.getEmbedGlobalInfo()) {
479 pPM.add(createRSGlobalInfoPass(script.getEmbedGlobalInfoSkipConstant()));
480 }
481 }
482
addInvariantPass(llvm::legacy::PassManager & pPM)483 void Compiler::addInvariantPass(llvm::legacy::PassManager &pPM) {
484 // Mark Loads from RsExpandKernelDriverInfo as "load.invariant".
485 // Should run after ExpandForEach and before inlining.
486 pPM.add(createRSInvariantPass());
487 }
488
screenGlobalFunctions(Script & script)489 enum Compiler::ErrorCode Compiler::screenGlobalFunctions(Script &script) {
490 llvm::Module &module = script.getSource().getModule();
491
492 // Materialize the bitcode module in case this is a lazy-load module. Do not
493 // clear the materializer by calling materializeAllPermanently since the
494 // runtime library has not been merged into the module yet.
495 if (module.getMaterializer() != nullptr) {
496 std::error_code ec = module.materializeAll();
497 if (ec) {
498 ALOGE("Failed to materialize module `%s' when screening globals! (%s)",
499 module.getModuleIdentifier().c_str(), ec.message().c_str());
500 return kErrMaterialization;
501 }
502 }
503
504 // Add pass to check for illegal function calls.
505 llvm::legacy::PassManager pPM;
506 pPM.add(createRSScreenFunctionsPass());
507 pPM.run(module);
508
509 return kSuccess;
510
511 }
512
translateGEPs(Script & script)513 void Compiler::translateGEPs(Script &script) {
514 llvm::legacy::PassManager pPM;
515 pPM.add(createRSX86TranslateGEPPass());
516
517 // Materialization done in screenGlobalFunctions above.
518 pPM.run(script.getSource().getModule());
519 }
520