1 /**************************************************************************
2 *
3 * Copyright 2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 **************************************************************************/
27
28
29 /**
30 * The purpose of this module is to expose LLVM functionality not available
31 * through the C++ bindings.
32 */
33
34
35 // Undef these vars just to silence warnings
36 #undef PACKAGE_BUGREPORT
37 #undef PACKAGE_NAME
38 #undef PACKAGE_STRING
39 #undef PACKAGE_TARNAME
40 #undef PACKAGE_VERSION
41
42
43 #include <stddef.h>
44
45 #include <llvm/Config/llvm-config.h>
46 #include <llvm-c/Core.h>
47 #include <llvm-c/Support.h>
48 #include <llvm-c/ExecutionEngine.h>
49 #include <llvm/Target/TargetOptions.h>
50 #include <llvm/ExecutionEngine/ExecutionEngine.h>
51 #include <llvm/Analysis/TargetLibraryInfo.h>
52 #include <llvm/ExecutionEngine/SectionMemoryManager.h>
53 #include <llvm/Support/CommandLine.h>
54 #include <llvm/Support/PrettyStackTrace.h>
55 #include <llvm/ExecutionEngine/ObjectCache.h>
56 #include <llvm/Support/TargetSelect.h>
57 #include <llvm/CodeGen/SelectionDAGNodes.h>
58 #if LLVM_VERSION_MAJOR >= 15
59 #include <llvm/Support/MemoryBuffer.h>
60 #endif
61
62 #if LLVM_VERSION_MAJOR >= 17
63 #include <llvm/TargetParser/Host.h>
64 #include <llvm/TargetParser/Triple.h>
65 #else
66 #include <llvm/Support/Host.h>
67 #include <llvm/ADT/Triple.h>
68 #endif
69
70 #if LLVM_VERSION_MAJOR < 11
71 #include <llvm/IR/CallSite.h>
72 #endif
73 #include <llvm/IR/IRBuilder.h>
74 #include <llvm/IR/Module.h>
75 #include <llvm/Support/CBindingWrapping.h>
76
77 #include <llvm/Config/llvm-config.h>
78 #if LLVM_USE_INTEL_JITEVENTS
79 #include <llvm/ExecutionEngine/JITEventListener.h>
80 #endif
81
82 #include "c11/threads.h"
83 #include "util/u_thread.h"
84 #include "util/detect.h"
85 #include "util/u_debug.h"
86 #include "util/u_cpu_detect.h"
87
88 #include "lp_bld_misc.h"
89 #include "lp_bld_debug.h"
90
91 static void lp_run_atexit_for_destructors(void);
92
93 namespace {
94
95 class LLVMEnsureMultithreaded {
96 public:
LLVMEnsureMultithreaded()97 LLVMEnsureMultithreaded()
98 {
99 if (!LLVMIsMultithreaded()) {
100 LLVMStartMultithreaded();
101 }
102 }
103 };
104
105 static LLVMEnsureMultithreaded lLVMEnsureMultithreaded;
106
107 }
108
109 static once_flag init_native_targets_once_flag = ONCE_FLAG_INIT;
110
lp_bld_init_native_targets()111 void lp_bld_init_native_targets()
112 {
113 // If we have a native target, initialize it to ensure it is linked in and
114 // usable by the JIT.
115 llvm::InitializeNativeTarget();
116
117 llvm::InitializeNativeTargetAsmPrinter();
118
119 llvm::InitializeNativeTargetDisassembler();
120 #if MESA_DEBUG
121 {
122 char *env_llc_options = getenv("GALLIVM_LLC_OPTIONS");
123 if (env_llc_options) {
124 char *option;
125 char *options[64] = {(char *) "llc"}; // Warning without cast
126 int n;
127 for (n = 0, option = strtok(env_llc_options, " "); option; n++, option = strtok(NULL, " ")) {
128 options[n + 1] = option;
129 }
130 if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
131 debug_printf("llc additional options (%d):\n", n);
132 for (int i = 1; i <= n; i++)
133 debug_printf("\t%s\n", options[i]);
134 debug_printf("\n");
135 }
136 LLVMParseCommandLineOptions(n + 1, options, NULL);
137 }
138 }
139 #endif
140 lp_run_atexit_for_destructors();
141 }
142
143 extern "C" void
lp_set_target_options(void)144 lp_set_target_options(void)
145 {
146 /* The llvm target registry is not thread-safe, so drivers and gallium frontends
147 * that want to initialize targets should use the lp_set_target_options()
148 * function to safely initialize targets.
149 *
150 * LLVM targets should be initialized before the driver or gallium frontend tries
151 * to access the registry.
152 */
153 call_once(&init_native_targets_once_flag, lp_bld_init_native_targets);
154 }
155
156 extern "C"
157 LLVMTargetLibraryInfoRef
gallivm_create_target_library_info(const char * triple)158 gallivm_create_target_library_info(const char *triple)
159 {
160 return reinterpret_cast<LLVMTargetLibraryInfoRef>(
161 new llvm::TargetLibraryInfoImpl(
162 llvm::Triple(triple)));
163 }
164
165 extern "C"
166 void
gallivm_dispose_target_library_info(LLVMTargetLibraryInfoRef library_info)167 gallivm_dispose_target_library_info(LLVMTargetLibraryInfoRef library_info)
168 {
169 delete reinterpret_cast<
170 llvm::TargetLibraryInfoImpl
171 *>(library_info);
172 }
173
174
175 typedef llvm::RTDyldMemoryManager BaseMemoryManager;
176
177
178 /*
179 * Delegating is tedious but the default manager class is hidden in an
180 * anonymous namespace in LLVM, so we cannot just derive from it to change
181 * its behavior.
182 */
183 class DelegatingJITMemoryManager : public BaseMemoryManager {
184
185 protected:
186 virtual BaseMemoryManager *mgr() const = 0;
187
188 public:
189 /*
190 * From RTDyldMemoryManager
191 */
allocateCodeSection(uintptr_t Size,unsigned Alignment,unsigned SectionID,llvm::StringRef SectionName)192 virtual uint8_t *allocateCodeSection(uintptr_t Size,
193 unsigned Alignment,
194 unsigned SectionID,
195 llvm::StringRef SectionName) {
196 return mgr()->allocateCodeSection(Size, Alignment, SectionID,
197 SectionName);
198 }
allocateDataSection(uintptr_t Size,unsigned Alignment,unsigned SectionID,llvm::StringRef SectionName,bool IsReadOnly)199 virtual uint8_t *allocateDataSection(uintptr_t Size,
200 unsigned Alignment,
201 unsigned SectionID,
202 llvm::StringRef SectionName,
203 bool IsReadOnly) {
204 return mgr()->allocateDataSection(Size, Alignment, SectionID,
205 SectionName,
206 IsReadOnly);
207 }
registerEHFrames(uint8_t * Addr,uint64_t LoadAddr,size_t Size)208 virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) {
209 mgr()->registerEHFrames(Addr, LoadAddr, Size);
210 }
211 #if LLVM_VERSION_MAJOR >= 5
deregisterEHFrames()212 virtual void deregisterEHFrames() {
213 mgr()->deregisterEHFrames();
214 }
215 #else
deregisterEHFrames(uint8_t * Addr,uint64_t LoadAddr,size_t Size)216 virtual void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) {
217 mgr()->deregisterEHFrames(Addr, LoadAddr, Size);
218 }
219 #endif
getPointerToNamedFunction(const std::string & Name,bool AbortOnFailure=true)220 virtual void *getPointerToNamedFunction(const std::string &Name,
221 bool AbortOnFailure=true) {
222 return mgr()->getPointerToNamedFunction(Name, AbortOnFailure);
223 }
finalizeMemory(std::string * ErrMsg=0)224 virtual bool finalizeMemory(std::string *ErrMsg = 0) {
225 return mgr()->finalizeMemory(ErrMsg);
226 }
227 };
228
229
230 /*
231 * Delegate memory management to one shared manager for more efficient use
232 * of memory than creating a separate pool for each LLVM engine.
233 * Keep generated code until freeGeneratedCode() is called, instead of when
234 * memory manager is destroyed, which happens during engine destruction.
235 * This allows additional memory savings as we don't have to keep the engine
236 * around in order to use the code.
237 * All methods are delegated to the shared manager except destruction and
238 * deallocating code. For the latter we just remember what needs to be
239 * deallocated later. The shared manager is deleted once it is empty.
240 */
241 class ShaderMemoryManager : public DelegatingJITMemoryManager {
242
243 BaseMemoryManager *TheMM;
244
245 struct GeneratedCode {
246 typedef std::vector<void *> Vec;
247 Vec FunctionBody, ExceptionTable;
248 BaseMemoryManager *TheMM;
249
GeneratedCodeShaderMemoryManager::GeneratedCode250 GeneratedCode(BaseMemoryManager *MM) {
251 TheMM = MM;
252 }
253
~GeneratedCodeShaderMemoryManager::GeneratedCode254 ~GeneratedCode() {
255 }
256 };
257
258 GeneratedCode *code;
259
mgr() const260 BaseMemoryManager *mgr() const {
261 return TheMM;
262 }
263
264 public:
265
ShaderMemoryManager(BaseMemoryManager * MM)266 ShaderMemoryManager(BaseMemoryManager* MM) {
267 TheMM = MM;
268 code = new GeneratedCode(MM);
269 }
270
~ShaderMemoryManager()271 virtual ~ShaderMemoryManager() {
272 /*
273 * 'code' is purposely not deleted. It is the user's responsibility
274 * to call getGeneratedCode() and freeGeneratedCode().
275 */
276 }
277
getGeneratedCode()278 struct lp_generated_code *getGeneratedCode() {
279 return (struct lp_generated_code *) code;
280 }
281
freeGeneratedCode(struct lp_generated_code * code)282 static void freeGeneratedCode(struct lp_generated_code *code) {
283 delete (GeneratedCode *) code;
284 }
285
deallocateFunctionBody(void * Body)286 virtual void deallocateFunctionBody(void *Body) {
287 // remember for later deallocation
288 code->FunctionBody.push_back(Body);
289 }
290 };
291
292 class LPObjectCache : public llvm::ObjectCache {
293 private:
294 bool has_object;
295 struct lp_cached_code *cache_out;
296 public:
LPObjectCache(struct lp_cached_code * cache)297 LPObjectCache(struct lp_cached_code *cache) {
298 cache_out = cache;
299 has_object = false;
300 }
301
~LPObjectCache()302 ~LPObjectCache() {
303 }
notifyObjectCompiled(const llvm::Module * M,llvm::MemoryBufferRef Obj)304 void notifyObjectCompiled(const llvm::Module *M, llvm::MemoryBufferRef Obj) {
305 const std::string ModuleID = M->getModuleIdentifier();
306 if (has_object)
307 fprintf(stderr, "CACHE ALREADY HAS MODULE OBJECT\n");
308 has_object = true;
309 cache_out->data_size = Obj.getBufferSize();
310 cache_out->data = malloc(cache_out->data_size);
311 memcpy(cache_out->data, Obj.getBufferStart(), cache_out->data_size);
312 }
313
getObject(const llvm::Module * M)314 virtual std::unique_ptr<llvm::MemoryBuffer> getObject(const llvm::Module *M) {
315 if (cache_out->data_size) {
316 return llvm::MemoryBuffer::getMemBuffer(llvm::StringRef((const char *)cache_out->data, cache_out->data_size), "", false);
317 }
318 return NULL;
319 }
320
321 };
322
323 void
lp_build_fill_mattrs(std::vector<std::string> & MAttrs)324 lp_build_fill_mattrs(std::vector<std::string> &MAttrs)
325 {
326
327 #if DETECT_ARCH_ARM
328 /* llvm-3.3+ implements sys::getHostCPUFeatures for Arm,
329 * which allows us to enable/disable code generation based
330 * on the results of cpuid on these architectures.
331 */
332 #if LLVM_VERSION_MAJOR >= 19
333 /* llvm-19+ returns StringMap from getHostCPUFeatures.
334 */
335 auto features = llvm::sys::getHostCPUFeatures();
336 #else
337 llvm::StringMap<bool> features;
338 llvm::sys::getHostCPUFeatures(features);
339 #endif
340
341 for (llvm::StringMapIterator<bool> f = features.begin();
342 f != features.end();
343 ++f) {
344 MAttrs.push_back(((*f).second ? "+" : "-") + (*f).first().str());
345 }
346 #elif DETECT_ARCH_X86 || DETECT_ARCH_X86_64
347 /*
348 * Because we can override cpu caps with environment variables,
349 * so we do not use llvm::sys::getHostCPUFeatures to detect cpu features
350 * but using util_get_cpu_caps() instead.
351 */
352 #if DETECT_ARCH_X86_64
353 /*
354 * Without this, on some "buggy" qemu cpu setup, LLVM could crash
355 * if LLVM detects the wrong CPU type.
356 */
357 MAttrs.push_back("+64bit");
358 #endif
359 MAttrs.push_back(util_get_cpu_caps()->has_sse ? "+sse" : "-sse" );
360 MAttrs.push_back(util_get_cpu_caps()->has_sse2 ? "+sse2" : "-sse2" );
361 MAttrs.push_back(util_get_cpu_caps()->has_sse3 ? "+sse3" : "-sse3" );
362 MAttrs.push_back(util_get_cpu_caps()->has_ssse3 ? "+ssse3" : "-ssse3" );
363 MAttrs.push_back(util_get_cpu_caps()->has_sse4_1 ? "+sse4.1" : "-sse4.1");
364 MAttrs.push_back(util_get_cpu_caps()->has_sse4_2 ? "+sse4.2" : "-sse4.2");
365 /*
366 * AVX feature is not automatically detected from CPUID by the X86 target
367 * yet, because the old (yet default) JIT engine is not capable of
368 * emitting the opcodes. On newer llvm versions it is and at least some
369 * versions (tested with 3.3) will emit avx opcodes without this anyway.
370 */
371 MAttrs.push_back(util_get_cpu_caps()->has_avx ? "+avx" : "-avx");
372 MAttrs.push_back(util_get_cpu_caps()->has_f16c ? "+f16c" : "-f16c");
373 MAttrs.push_back(util_get_cpu_caps()->has_fma ? "+fma" : "-fma");
374 MAttrs.push_back(util_get_cpu_caps()->has_avx2 ? "+avx2" : "-avx2");
375
376 /* All avx512 have avx512f */
377 MAttrs.push_back(util_get_cpu_caps()->has_avx512f ? "+avx512f" : "-avx512f");
378 MAttrs.push_back(util_get_cpu_caps()->has_avx512cd ? "+avx512cd" : "-avx512cd");
379 #if LLVM_VERSION_MAJOR < 19
380 MAttrs.push_back(util_get_cpu_caps()->has_avx512er ? "+avx512er" : "-avx512er");
381 MAttrs.push_back(util_get_cpu_caps()->has_avx512pf ? "+avx512pf" : "-avx512pf");
382 #endif
383 MAttrs.push_back(util_get_cpu_caps()->has_avx512bw ? "+avx512bw" : "-avx512bw");
384 MAttrs.push_back(util_get_cpu_caps()->has_avx512dq ? "+avx512dq" : "-avx512dq");
385 MAttrs.push_back(util_get_cpu_caps()->has_avx512vl ? "+avx512vl" : "-avx512vl");
386 #endif
387 #if DETECT_ARCH_ARM
388 if (!util_get_cpu_caps()->has_neon) {
389 MAttrs.push_back("-neon");
390 MAttrs.push_back("-crypto");
391 MAttrs.push_back("-vfp2");
392 }
393 #endif
394
395 #if DETECT_ARCH_PPC
396 MAttrs.push_back(util_get_cpu_caps()->has_altivec ? "+altivec" : "-altivec");
397 /*
398 * Bug 25503 is fixed, by the same fix that fixed
399 * bug 26775, in versions of LLVM later than 3.8 (starting with 3.8.1).
400 * BZ 33531 actually comprises more than one bug, all of
401 * which are fixed in LLVM 4.0.
402 *
403 * With LLVM 4.0 or higher:
404 * Make sure VSX instructions are ENABLED (if supported), unless
405 * VSX instructions are explicitly enabled/disabled via GALLIVM_VSX=1 or 0.
406 */
407 if (util_get_cpu_caps()->has_altivec) {
408 MAttrs.push_back(util_get_cpu_caps()->has_vsx ? "+vsx" : "-vsx");
409 }
410 #endif
411
412 #if DETECT_ARCH_MIPS64
413 MAttrs.push_back(util_get_cpu_caps()->has_msa ? "+msa" : "-msa");
414 /* MSA requires a 64-bit FPU register file */
415 MAttrs.push_back("+fp64");
416 #endif
417
418 #if DETECT_ARCH_RISCV64 == 1
419 /* Before riscv is more matured and util_get_cpu_caps() is implemented,
420 * assume this for now since most of linux capable riscv machine are
421 * riscv64gc
422 */
423 MAttrs = {"+m","+c","+a","+d","+f"};
424 #endif
425
426 #if DETECT_ARCH_LOONGARCH64 == 1
427 /*
428 * No FPU-less LoongArch64 systems are ever shipped yet, and LP64D is
429 * the default ABI, so FPU is enabled here.
430 *
431 * The Software development convention defaults to have "128-bit
432 * vector", so LSX is enabled here, see
433 * https://github.com/loongson/la-softdev-convention/releases/download/v0.1/la-softdev-convention.pdf
434 */
435 MAttrs = {"+f","+d"};
436 #if LLVM_VERSION_MAJOR >= 18
437 MAttrs.push_back(util_get_cpu_caps()->has_lsx ? "+lsx" : "-lsx");
438 MAttrs.push_back(util_get_cpu_caps()->has_lasx ? "+lasx" : "-lasx");
439 #else
440 /*
441 * LLVM 17's LSX support is incomplete, and LLVM 16 isn't supported
442 * LSX and LASX. So explicitly mask it.
443 */
444 MAttrs.push_back("-lsx");
445 MAttrs.push_back("-lasx");
446 #endif
447 #endif
448 }
449
450 void
lp_build_dump_mattrs(std::vector<std::string> & MAttrs)451 lp_build_dump_mattrs(std::vector<std::string> &MAttrs)
452 {
453 if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
454 int n = MAttrs.size();
455 if (n > 0) {
456 debug_printf("llc -mattr option(s): ");
457 for (int i = 0; i < n; i++)
458 debug_printf("%s%s", MAttrs[i].c_str(), (i < n - 1) ? "," : "");
459 debug_printf("\n");
460 }
461 }
462 }
463
464 /**
465 * Same as LLVMCreateJITCompilerForModule, but:
466 * - allows using MCJIT and enabling AVX feature where available.
467 * - set target options
468 *
469 * See also:
470 * - llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
471 * - llvm/tools/lli/lli.cpp
472 * - http://markmail.org/message/ttkuhvgj4cxxy2on#query:+page:1+mid:aju2dggerju3ivd3+state:results
473 */
474 extern "C"
475 LLVMBool
lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef * OutJIT,lp_generated_code ** OutCode,struct lp_cached_code * cache_out,LLVMModuleRef M,LLVMMCJITMemoryManagerRef CMM,unsigned OptLevel,char ** OutError)476 lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
477 lp_generated_code **OutCode,
478 struct lp_cached_code *cache_out,
479 LLVMModuleRef M,
480 LLVMMCJITMemoryManagerRef CMM,
481 unsigned OptLevel,
482 char **OutError)
483 {
484 using namespace llvm;
485
486 std::string Error;
487 EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
488
489 /**
490 * LLVM 3.1+ haven't more "extern unsigned llvm::StackAlignmentOverride" and
491 * friends for configuring code generation options, like stack alignment.
492 */
493 TargetOptions options;
494 #if DETECT_ARCH_X86 && LLVM_VERSION_MAJOR < 13
495 options.StackAlignmentOverride = 4;
496 #endif
497
498 builder.setEngineKind(EngineKind::JIT)
499 .setErrorStr(&Error)
500 .setTargetOptions(options)
501 #if LLVM_VERSION_MAJOR >= 18
502 .setOptLevel((CodeGenOptLevel)OptLevel);
503 #else
504 .setOptLevel((CodeGenOpt::Level)OptLevel);
505 #endif
506
507 #if DETECT_OS_WINDOWS
508 /*
509 * MCJIT works on Windows, but currently only through ELF object format.
510 *
511 * XXX: We could use `LLVM_HOST_TRIPLE "-elf"` but LLVM_HOST_TRIPLE has
512 * different strings for MinGW/MSVC, so better play it safe and be
513 * explicit.
514 */
515 # if DETECT_ARCH_X86_64
516 LLVMSetTarget(M, "x86_64-pc-win32-elf");
517 # elif DETECT_ARCH_X86
518 LLVMSetTarget(M, "i686-pc-win32-elf");
519 # elif DETECT_ARCH_AARCH64
520 LLVMSetTarget(M, "aarch64-pc-win32-elf");
521 # else
522 # error Unsupported architecture for MCJIT on Windows.
523 # endif
524 #endif
525
526 std::vector<std::string> MAttrs;
527
528 lp_build_fill_mattrs(MAttrs);
529
530 builder.setMAttrs(MAttrs);
531
532 lp_build_dump_mattrs(MAttrs);
533
534 StringRef MCPU = llvm::sys::getHostCPUName();
535 /*
536 * The cpu bits are no longer set automatically, so need to set mcpu manually.
537 * Note that the MAttrs set above will be sort of ignored (since we should
538 * not set any which would not be set by specifying the cpu anyway).
539 * It ought to be safe though since getHostCPUName() should include bits
540 * not only from the cpu but environment as well (for instance if it's safe
541 * to use avx instructions which need OS support). According to
542 * http://llvm.org/bugs/show_bug.cgi?id=19429 however if I understand this
543 * right it may be necessary to specify older cpu (or disable mattrs) though
544 * when not using MCJIT so no instructions are generated which the old JIT
545 * can't handle. Not entirely sure if we really need to do anything yet.
546 */
547
548 #if DETECT_ARCH_PPC_64
549 /*
550 * Large programs, e.g. gnome-shell and firefox, may tax the addressability
551 * of the Medium code model once dynamically generated JIT-compiled shader
552 * programs are linked in and relocated. Yet the default code model as of
553 * LLVM 8 is Medium or even Small.
554 * The cost of changing from Medium to Large is negligible:
555 * - an additional 8-byte pointer stored immediately before the shader entrypoint;
556 * - change an add-immediate (addis) instruction to a load (ld).
557 */
558 builder.setCodeModel(CodeModel::Large);
559
560 #if UTIL_ARCH_LITTLE_ENDIAN
561 /*
562 * Versions of LLVM prior to 4.0 lacked a table entry for "POWER8NVL",
563 * resulting in (big-endian) "generic" being returned on
564 * little-endian Power8NVL systems. The result was that code that
565 * attempted to load the least significant 32 bits of a 64-bit quantity
566 * from memory loaded the wrong half. This resulted in failures in some
567 * Piglit tests, e.g.
568 * .../arb_gpu_shader_fp64/execution/conversion/frag-conversion-explicit-double-uint
569 */
570 if (MCPU == "generic")
571 MCPU = "pwr8";
572 #endif
573 #endif
574
575 #if DETECT_ARCH_MIPS64
576 /*
577 * ls3a4000 CPU and ls2k1000 SoC is a mips64r5 compatible with MSA SIMD
578 * instruction set implemented, while ls3a3000 is mips64r2 compatible
579 * only. getHostCPUName() return "generic" on all loongson
580 * mips CPU currently. So we override the MCPU to mips64r5 if MSA is
581 * implemented, feedback to mips64r2 for all other ordinary mips64 cpu.
582 */
583 if (MCPU == "generic")
584 MCPU = util_get_cpu_caps()->has_msa ? "mips64r5" : "mips64r2";
585 #endif
586
587 builder.setMCPU(MCPU);
588 if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
589 debug_printf("llc -mcpu option: %s\n", MCPU.str().c_str());
590 }
591
592 ShaderMemoryManager *MM = NULL;
593 BaseMemoryManager* JMM = reinterpret_cast<BaseMemoryManager*>(CMM);
594 MM = new ShaderMemoryManager(JMM);
595 *OutCode = MM->getGeneratedCode();
596
597 builder.setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager>(MM));
598 MM = NULL; // ownership taken by std::unique_ptr
599
600 ExecutionEngine *JIT;
601
602 JIT = builder.create();
603
604 if (cache_out) {
605 LPObjectCache *objcache = new LPObjectCache(cache_out);
606 JIT->setObjectCache(objcache);
607 cache_out->jit_obj_cache = (void *)objcache;
608 }
609
610 #if LLVM_USE_INTEL_JITEVENTS
611 JITEventListener *JEL = JITEventListener::createIntelJITEventListener();
612 JIT->RegisterJITEventListener(JEL);
613 #endif
614 if (JIT) {
615 *OutJIT = wrap(JIT);
616 return 0;
617 }
618 lp_free_generated_code(*OutCode);
619 *OutCode = 0;
620 delete MM;
621 *OutError = strdup(Error.c_str());
622 return 1;
623 }
624
625
626 extern "C"
627 void
lp_free_generated_code(struct lp_generated_code * code)628 lp_free_generated_code(struct lp_generated_code *code)
629 {
630 ShaderMemoryManager::freeGeneratedCode(code);
631 }
632
633 extern "C"
634 LLVMMCJITMemoryManagerRef
lp_get_default_memory_manager()635 lp_get_default_memory_manager()
636 {
637 BaseMemoryManager *mm;
638 mm = new llvm::SectionMemoryManager();
639 return reinterpret_cast<LLVMMCJITMemoryManagerRef>(mm);
640 }
641
642 extern "C"
643 void
lp_free_memory_manager(LLVMMCJITMemoryManagerRef memorymgr)644 lp_free_memory_manager(LLVMMCJITMemoryManagerRef memorymgr)
645 {
646 delete reinterpret_cast<BaseMemoryManager*>(memorymgr);
647 }
648
649 extern "C" void
lp_free_objcache(void * objcache_ptr)650 lp_free_objcache(void *objcache_ptr)
651 {
652 LPObjectCache *objcache = (LPObjectCache *)objcache_ptr;
653 delete objcache;
654 }
655
656 extern "C" LLVMValueRef
lp_get_called_value(LLVMValueRef call)657 lp_get_called_value(LLVMValueRef call)
658 {
659 return LLVMGetCalledValue(call);
660 }
661
662 extern "C" bool
lp_is_function(LLVMValueRef v)663 lp_is_function(LLVMValueRef v)
664 {
665 return LLVMGetValueKind(v) == LLVMFunctionValueKind;
666 }
667
668 extern "C" void
lp_set_module_stack_alignment_override(LLVMModuleRef MRef,unsigned align)669 lp_set_module_stack_alignment_override(LLVMModuleRef MRef, unsigned align)
670 {
671 #if LLVM_VERSION_MAJOR >= 13
672 llvm::Module *M = llvm::unwrap(MRef);
673 M->setOverrideStackAlignment(align);
674 #endif
675 }
676
677 using namespace llvm;
678
679 class GallivmRunAtExitForStaticDestructors : public SDNode
680 {
681 public:
682 /* getSDVTList (protected) calls getValueTypeList (private), which contains static variables. */
GallivmRunAtExitForStaticDestructors()683 GallivmRunAtExitForStaticDestructors(): SDNode(0, 0, DebugLoc(), getSDVTList(MVT::Other))
684 {
685 }
686 };
687
688 static void
lp_run_atexit_for_destructors(void)689 lp_run_atexit_for_destructors(void)
690 {
691 /* LLVM >= 16 registers static variable destructors on the first compile, which gcc
692 * implements by calling atexit there. Before that, u_queue registers its atexit
693 * handler to kill all threads. Since exit() runs atexit handlers in the reverse order,
694 * the LLVM destructors are called first while shader compiler threads may still be
695 * running, which crashes in LLVM in SelectionDAG.cpp.
696 *
697 * The solution is to run the code that declares the LLVM static variables first,
698 * so that atexit for LLVM is registered first and u_queue is registered after that,
699 * which ensures that all u_queue threads are terminated before LLVM destructors are
700 * called.
701 *
702 * This just executes the code that declares static variables.
703 */
704 GallivmRunAtExitForStaticDestructors();
705 }
706