1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "Debug.hpp"
16 #include "Print.hpp"
17 #include "Reactor.hpp"
18 #include "ReactorDebugInfo.hpp"
19
20 #include "ExecutableMemory.hpp"
21 #include "Optimizer.hpp"
22
23 #include "src/IceCfg.h"
24 #include "src/IceCfgNode.h"
25 #include "src/IceELFObjectWriter.h"
26 #include "src/IceELFStreamer.h"
27 #include "src/IceGlobalContext.h"
28 #include "src/IceGlobalInits.h"
29 #include "src/IceTypes.h"
30
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/FileSystem.h"
33 #include "llvm/Support/ManagedStatic.h"
34 #include "llvm/Support/raw_os_ostream.h"
35
36 #include "marl/event.h"
37
38 #if __has_feature(memory_sanitizer)
39 # include <sanitizer/msan_interface.h>
40 #endif
41
42 #if defined(_WIN32)
43 # ifndef WIN32_LEAN_AND_MEAN
44 # define WIN32_LEAN_AND_MEAN
45 # endif // !WIN32_LEAN_AND_MEAN
46 # ifndef NOMINMAX
47 # define NOMINMAX
48 # endif // !NOMINMAX
49 # include <Windows.h>
50 #endif
51
52 #include <array>
53 #include <cmath>
54 #include <iostream>
55 #include <limits>
56 #include <mutex>
57
58 // Subzero utility functions
59 // These functions only accept and return Subzero (Ice) types, and do not access any globals.
60 namespace {
61 namespace sz {
62
createFunction(Ice::GlobalContext * context,Ice::Type returnType,const std::vector<Ice::Type> & paramTypes)63 Ice::Cfg *createFunction(Ice::GlobalContext *context, Ice::Type returnType, const std::vector<Ice::Type> ¶mTypes)
64 {
65 uint32_t sequenceNumber = 0;
66 auto *function = Ice::Cfg::create(context, sequenceNumber).release();
67
68 function->setStackSizeLimit(512 * 1024); // 512 KiB
69
70 Ice::CfgLocalAllocatorScope allocScope{ function };
71
72 for(auto type : paramTypes)
73 {
74 Ice::Variable *arg = function->makeVariable(type);
75 function->addArg(arg);
76 }
77
78 Ice::CfgNode *node = function->makeNode();
79 function->setEntryNode(node);
80
81 return function;
82 }
83
getPointerType(Ice::Type elementType)84 Ice::Type getPointerType(Ice::Type elementType)
85 {
86 if(sizeof(void *) == 8)
87 {
88 return Ice::IceType_i64;
89 }
90 else
91 {
92 return Ice::IceType_i32;
93 }
94 }
95
allocateStackVariable(Ice::Cfg * function,Ice::Type type,int arraySize=0)96 Ice::Variable *allocateStackVariable(Ice::Cfg *function, Ice::Type type, int arraySize = 0)
97 {
98 int typeSize = Ice::typeWidthInBytes(type);
99 int totalSize = typeSize * (arraySize ? arraySize : 1);
100
101 auto bytes = Ice::ConstantInteger32::create(function->getContext(), Ice::IceType_i32, totalSize);
102 auto address = function->makeVariable(getPointerType(type));
103 auto alloca = Ice::InstAlloca::create(function, address, bytes, typeSize); // SRoA depends on the alignment to match the type size.
104 function->getEntryNode()->getInsts().push_front(alloca);
105
106 return address;
107 }
108
getConstantPointer(Ice::GlobalContext * context,void const * ptr)109 Ice::Constant *getConstantPointer(Ice::GlobalContext *context, void const *ptr)
110 {
111 if(sizeof(void *) == 8)
112 {
113 return context->getConstantInt64(reinterpret_cast<intptr_t>(ptr));
114 }
115 else
116 {
117 return context->getConstantInt32(reinterpret_cast<intptr_t>(ptr));
118 }
119 }
120
121 // TODO(amaiorano): remove this prototype once these are moved to separate header/cpp
122 Ice::Variable *createTruncate(Ice::Cfg *function, Ice::CfgNode *basicBlock, Ice::Operand *from, Ice::Type toType);
123
124 // Wrapper for calls on C functions with Ice types
Call(Ice::Cfg * function,Ice::CfgNode * basicBlock,Ice::Type retTy,Ice::Operand * callTarget,const std::vector<Ice::Operand * > & iceArgs,bool isVariadic)125 Ice::Variable *Call(Ice::Cfg *function, Ice::CfgNode *basicBlock, Ice::Type retTy, Ice::Operand *callTarget, const std::vector<Ice::Operand *> &iceArgs, bool isVariadic)
126 {
127 Ice::Variable *ret = nullptr;
128
129 // Subzero doesn't support boolean return values. Replace with an i32 temporarily,
130 // then truncate result to bool.
131 // TODO(b/151158858): Add support to Subzero's InstCall for bool-returning functions
132 const bool returningBool = (retTy == Ice::IceType_i1);
133 if(returningBool)
134 {
135 ret = function->makeVariable(Ice::IceType_i32);
136 }
137 else if(retTy != Ice::IceType_void)
138 {
139 ret = function->makeVariable(retTy);
140 }
141
142 auto call = Ice::InstCall::create(function, iceArgs.size(), ret, callTarget, false, false, isVariadic);
143 for(auto arg : iceArgs)
144 {
145 call->addArg(arg);
146 }
147
148 basicBlock->appendInst(call);
149
150 if(returningBool)
151 {
152 // Truncate result to bool so that if any (lsb) bits were set, result will be true
153 ret = createTruncate(function, basicBlock, ret, Ice::IceType_i1);
154 }
155
156 return ret;
157 }
158
Call(Ice::Cfg * function,Ice::CfgNode * basicBlock,Ice::Type retTy,void const * fptr,const std::vector<Ice::Operand * > & iceArgs,bool isVariadic)159 Ice::Variable *Call(Ice::Cfg *function, Ice::CfgNode *basicBlock, Ice::Type retTy, void const *fptr, const std::vector<Ice::Operand *> &iceArgs, bool isVariadic)
160 {
161 Ice::Operand *callTarget = getConstantPointer(function->getContext(), fptr);
162 return Call(function, basicBlock, retTy, callTarget, iceArgs, isVariadic);
163 }
164
165 // Wrapper for calls on C functions with Ice types
166 template<typename Return, typename... CArgs, typename... RArgs>
Call(Ice::Cfg * function,Ice::CfgNode * basicBlock,Return (fptr)(CArgs...),RArgs &&...args)167 Ice::Variable *Call(Ice::Cfg *function, Ice::CfgNode *basicBlock, Return(fptr)(CArgs...), RArgs &&...args)
168 {
169 static_assert(sizeof...(CArgs) == sizeof...(RArgs), "Expected number of args don't match");
170
171 Ice::Type retTy = T(rr::CToReactorT<Return>::type());
172 std::vector<Ice::Operand *> iceArgs{ std::forward<RArgs>(args)... };
173 return Call(function, basicBlock, retTy, reinterpret_cast<void const *>(fptr), iceArgs, false);
174 }
175
createTruncate(Ice::Cfg * function,Ice::CfgNode * basicBlock,Ice::Operand * from,Ice::Type toType)176 Ice::Variable *createTruncate(Ice::Cfg *function, Ice::CfgNode *basicBlock, Ice::Operand *from, Ice::Type toType)
177 {
178 Ice::Variable *to = function->makeVariable(toType);
179 Ice::InstCast *cast = Ice::InstCast::create(function, Ice::InstCast::Trunc, to, from);
180 basicBlock->appendInst(cast);
181 return to;
182 }
183
createLoad(Ice::Cfg * function,Ice::CfgNode * basicBlock,Ice::Operand * ptr,Ice::Type type,unsigned int align)184 Ice::Variable *createLoad(Ice::Cfg *function, Ice::CfgNode *basicBlock, Ice::Operand *ptr, Ice::Type type, unsigned int align)
185 {
186 Ice::Variable *result = function->makeVariable(type);
187 auto load = Ice::InstLoad::create(function, result, ptr, align);
188 basicBlock->appendInst(load);
189
190 return result;
191 }
192
193 } // namespace sz
194 } // namespace
195
196 namespace rr {
197 class ELFMemoryStreamer;
198 class CoroutineGenerator;
199 } // namespace rr
200
201 namespace {
202
203 // Used to automatically invoke llvm_shutdown() when driver is unloaded
204 llvm::llvm_shutdown_obj llvmShutdownObj;
205
206 // Default configuration settings. Must be accessed under mutex lock.
207 std::mutex defaultConfigLock;
defaultConfig()208 rr::Config &defaultConfig()
209 {
210 // This uses a static in a function to avoid the cost of a global static
211 // initializer. See http://neugierig.org/software/chromium/notes/2011/08/static-initializers.html
212 static rr::Config config = rr::Config::Edit()
213 .apply({});
214 return config;
215 }
216
217 Ice::GlobalContext *context = nullptr;
218 Ice::Cfg *function = nullptr;
219 Ice::CfgNode *entryBlock = nullptr;
220 Ice::CfgNode *basicBlockTop = nullptr;
221 Ice::CfgNode *basicBlock = nullptr;
222 Ice::CfgLocalAllocatorScope *allocator = nullptr;
223 rr::ELFMemoryStreamer *routine = nullptr;
224
225 std::mutex codegenMutex;
226
227 Ice::ELFFileStreamer *elfFile = nullptr;
228 Ice::Fdstream *out = nullptr;
229
230 // Coroutine globals
231 rr::Type *coroYieldType = nullptr;
232 std::shared_ptr<rr::CoroutineGenerator> coroGen;
getOrCreateScheduler()233 marl::Scheduler &getOrCreateScheduler()
234 {
235 static auto scheduler = [] {
236 marl::Scheduler::Config cfg;
237 cfg.setWorkerThreadCount(8);
238 return std::make_unique<marl::Scheduler>(cfg);
239 }();
240
241 return *scheduler;
242 }
243
244 rr::Nucleus::OptimizerCallback *optimizerCallback = nullptr;
245
246 } // Anonymous namespace
247
248 namespace {
249
250 #if !defined(__i386__) && defined(_M_IX86)
251 # define __i386__ 1
252 #endif
253
254 #if !defined(__x86_64__) && (defined(_M_AMD64) || defined(_M_X64))
255 # define __x86_64__ 1
256 #endif
257
toIce(rr::Optimization::Level level)258 Ice::OptLevel toIce(rr::Optimization::Level level)
259 {
260 switch(level)
261 {
262 // Note that Opt_0 and Opt_1 are not implemented by Subzero
263 case rr::Optimization::Level::None: return Ice::Opt_m1;
264 case rr::Optimization::Level::Less: return Ice::Opt_m1;
265 case rr::Optimization::Level::Default: return Ice::Opt_2;
266 case rr::Optimization::Level::Aggressive: return Ice::Opt_2;
267 default: UNREACHABLE("Unknown Optimization Level %d", int(level));
268 }
269 return Ice::Opt_2;
270 }
271
stdToIceMemoryOrder(std::memory_order memoryOrder)272 Ice::Intrinsics::MemoryOrder stdToIceMemoryOrder(std::memory_order memoryOrder)
273 {
274 switch(memoryOrder)
275 {
276 case std::memory_order_relaxed: return Ice::Intrinsics::MemoryOrderRelaxed;
277 case std::memory_order_consume: return Ice::Intrinsics::MemoryOrderConsume;
278 case std::memory_order_acquire: return Ice::Intrinsics::MemoryOrderAcquire;
279 case std::memory_order_release: return Ice::Intrinsics::MemoryOrderRelease;
280 case std::memory_order_acq_rel: return Ice::Intrinsics::MemoryOrderAcquireRelease;
281 case std::memory_order_seq_cst: return Ice::Intrinsics::MemoryOrderSequentiallyConsistent;
282 }
283 return Ice::Intrinsics::MemoryOrderInvalid;
284 }
285
286 class CPUID
287 {
288 public:
289 const static bool ARM;
290 const static bool SSE4_1;
291
292 private:
cpuid(int registers[4],int info)293 static void cpuid(int registers[4], int info)
294 {
295 #if defined(__i386__) || defined(__x86_64__)
296 # if defined(_WIN32)
297 __cpuid(registers, info);
298 # else
299 __asm volatile("cpuid"
300 : "=a"(registers[0]), "=b"(registers[1]), "=c"(registers[2]), "=d"(registers[3])
301 : "a"(info));
302 # endif
303 #else
304 registers[0] = 0;
305 registers[1] = 0;
306 registers[2] = 0;
307 registers[3] = 0;
308 #endif
309 }
310
detectARM()311 constexpr static bool detectARM()
312 {
313 #if defined(__arm__) || defined(__aarch64__)
314 return true;
315 #elif defined(__i386__) || defined(__x86_64__)
316 return false;
317 #elif defined(__mips__)
318 return false;
319 #else
320 # error "Unknown architecture"
321 #endif
322 }
323
detectSSE4_1()324 static bool detectSSE4_1()
325 {
326 #if defined(__i386__) || defined(__x86_64__)
327 int registers[4];
328 cpuid(registers, 1);
329 return (registers[2] & 0x00080000) != 0;
330 #else
331 return false;
332 #endif
333 }
334 };
335
336 constexpr bool CPUID::ARM = CPUID::detectARM();
337 const bool CPUID::SSE4_1 = CPUID::detectSSE4_1();
338 constexpr bool emulateIntrinsics = false;
339 constexpr bool emulateMismatchedBitCast = CPUID::ARM;
340
341 constexpr bool subzeroDumpEnabled = false;
342 constexpr bool subzeroEmitTextAsm = false;
343
344 #if !ALLOW_DUMP
345 static_assert(!subzeroDumpEnabled, "Compile Subzero with ALLOW_DUMP=1 for subzeroDumpEnabled");
346 static_assert(!subzeroEmitTextAsm, "Compile Subzero with ALLOW_DUMP=1 for subzeroEmitTextAsm");
347 #endif
348
349 } // anonymous namespace
350
351 namespace rr {
352
backendName()353 std::string Caps::backendName()
354 {
355 return "Subzero";
356 }
357
coroutinesSupported()358 bool Caps::coroutinesSupported()
359 {
360 return true;
361 }
362
fmaIsFast()363 bool Caps::fmaIsFast()
364 {
365 // TODO(b/214591655): Subzero currently never emits FMA instructions. std::fma() is called instead.
366 return false;
367 }
368
369 enum EmulatedType
370 {
371 EmulatedShift = 16,
372 EmulatedV2 = 2 << EmulatedShift,
373 EmulatedV4 = 4 << EmulatedShift,
374 EmulatedV8 = 8 << EmulatedShift,
375 EmulatedBits = EmulatedV2 | EmulatedV4 | EmulatedV8,
376
377 Type_v2i32 = Ice::IceType_v4i32 | EmulatedV2,
378 Type_v4i16 = Ice::IceType_v8i16 | EmulatedV4,
379 Type_v2i16 = Ice::IceType_v8i16 | EmulatedV2,
380 Type_v8i8 = Ice::IceType_v16i8 | EmulatedV8,
381 Type_v4i8 = Ice::IceType_v16i8 | EmulatedV4,
382 Type_v2f32 = Ice::IceType_v4f32 | EmulatedV2,
383 };
384
385 class Value : public Ice::Operand
386 {};
387 class SwitchCases : public Ice::InstSwitch
388 {};
389 class BasicBlock : public Ice::CfgNode
390 {};
391
T(Type * t)392 Ice::Type T(Type *t)
393 {
394 static_assert(static_cast<unsigned int>(Ice::IceType_NUM) < static_cast<unsigned int>(EmulatedBits), "Ice::Type overlaps with our emulated types!");
395 return (Ice::Type)(reinterpret_cast<std::intptr_t>(t) & ~EmulatedBits);
396 }
397
T(Ice::Type t)398 Type *T(Ice::Type t)
399 {
400 return reinterpret_cast<Type *>(t);
401 }
402
T(EmulatedType t)403 Type *T(EmulatedType t)
404 {
405 return reinterpret_cast<Type *>(t);
406 }
407
T(const std::vector<Type * > & types)408 std::vector<Ice::Type> T(const std::vector<Type *> &types)
409 {
410 std::vector<Ice::Type> result;
411 result.reserve(types.size());
412 for(auto &t : types)
413 {
414 result.push_back(T(t));
415 }
416 return result;
417 }
418
V(Ice::Operand * v)419 Value *V(Ice::Operand *v)
420 {
421 return reinterpret_cast<Value *>(v);
422 }
423
V(Value * v)424 Ice::Operand *V(Value *v)
425 {
426 return reinterpret_cast<Ice::Operand *>(v);
427 }
428
V(const std::vector<Value * > & values)429 std::vector<Ice::Operand *> V(const std::vector<Value *> &values)
430 {
431 std::vector<Ice::Operand *> result;
432 result.reserve(values.size());
433 for(auto &v : values)
434 {
435 result.push_back(V(v));
436 }
437 return result;
438 }
439
B(Ice::CfgNode * b)440 BasicBlock *B(Ice::CfgNode *b)
441 {
442 return reinterpret_cast<BasicBlock *>(b);
443 }
444
typeSize(Type * type)445 static size_t typeSize(Type *type)
446 {
447 if(reinterpret_cast<std::intptr_t>(type) & EmulatedBits)
448 {
449 switch(reinterpret_cast<std::intptr_t>(type))
450 {
451 case Type_v2i32: return 8;
452 case Type_v4i16: return 8;
453 case Type_v2i16: return 4;
454 case Type_v8i8: return 8;
455 case Type_v4i8: return 4;
456 case Type_v2f32: return 8;
457 default: ASSERT(false);
458 }
459 }
460
461 return Ice::typeWidthInBytes(T(type));
462 }
463
finalizeFunction()464 static void finalizeFunction()
465 {
466 // Create a return if none was added
467 if(::basicBlock->getInsts().empty() || ::basicBlock->getInsts().back().getKind() != Ice::Inst::Ret)
468 {
469 Nucleus::createRetVoid();
470 }
471
472 // Connect the entry block to the top of the initial basic block
473 auto br = Ice::InstBr::create(::function, ::basicBlockTop);
474 ::entryBlock->appendInst(br);
475 }
476
477 using ElfHeader = std::conditional<sizeof(void *) == 8, Elf64_Ehdr, Elf32_Ehdr>::type;
478 using SectionHeader = std::conditional<sizeof(void *) == 8, Elf64_Shdr, Elf32_Shdr>::type;
479
sectionHeader(const ElfHeader * elfHeader)480 inline const SectionHeader *sectionHeader(const ElfHeader *elfHeader)
481 {
482 return reinterpret_cast<const SectionHeader *>((intptr_t)elfHeader + elfHeader->e_shoff);
483 }
484
elfSection(const ElfHeader * elfHeader,int index)485 inline const SectionHeader *elfSection(const ElfHeader *elfHeader, int index)
486 {
487 return §ionHeader(elfHeader)[index];
488 }
489
relocateSymbol(const ElfHeader * elfHeader,const Elf32_Rel & relocation,const SectionHeader & relocationTable)490 static void *relocateSymbol(const ElfHeader *elfHeader, const Elf32_Rel &relocation, const SectionHeader &relocationTable)
491 {
492 const SectionHeader *target = elfSection(elfHeader, relocationTable.sh_info);
493
494 uint32_t index = relocation.getSymbol();
495 int table = relocationTable.sh_link;
496 void *symbolValue = nullptr;
497
498 if(index != SHN_UNDEF)
499 {
500 if(table == SHN_UNDEF) return nullptr;
501 const SectionHeader *symbolTable = elfSection(elfHeader, table);
502
503 uint32_t symtab_entries = symbolTable->sh_size / symbolTable->sh_entsize;
504 if(index >= symtab_entries)
505 {
506 ASSERT(index < symtab_entries && "Symbol Index out of range");
507 return nullptr;
508 }
509
510 intptr_t symbolAddress = (intptr_t)elfHeader + symbolTable->sh_offset;
511 Elf32_Sym &symbol = ((Elf32_Sym *)symbolAddress)[index];
512 uint16_t section = symbol.st_shndx;
513
514 if(section != SHN_UNDEF && section < SHN_LORESERVE)
515 {
516 const SectionHeader *target = elfSection(elfHeader, symbol.st_shndx);
517 symbolValue = reinterpret_cast<void *>((intptr_t)elfHeader + symbol.st_value + target->sh_offset);
518 }
519 else
520 {
521 return nullptr;
522 }
523 }
524
525 intptr_t address = (intptr_t)elfHeader + target->sh_offset;
526 unaligned_ptr<int32_t> patchSite = (int32_t *)(address + relocation.r_offset);
527
528 if(CPUID::ARM)
529 {
530 switch(relocation.getType())
531 {
532 case R_ARM_NONE:
533 // No relocation
534 break;
535 case R_ARM_MOVW_ABS_NC:
536 {
537 uint32_t thumb = 0; // Calls to Thumb code not supported.
538 uint32_t lo = (uint32_t)(intptr_t)symbolValue | thumb;
539 *patchSite = (*patchSite & 0xFFF0F000) | ((lo & 0xF000) << 4) | (lo & 0x0FFF);
540 }
541 break;
542 case R_ARM_MOVT_ABS:
543 {
544 uint32_t hi = (uint32_t)(intptr_t)(symbolValue) >> 16;
545 *patchSite = (*patchSite & 0xFFF0F000) | ((hi & 0xF000) << 4) | (hi & 0x0FFF);
546 }
547 break;
548 default:
549 ASSERT(false && "Unsupported relocation type");
550 return nullptr;
551 }
552 }
553 else
554 {
555 switch(relocation.getType())
556 {
557 case R_386_NONE:
558 // No relocation
559 break;
560 case R_386_32:
561 *patchSite = (int32_t)((intptr_t)symbolValue + *patchSite);
562 break;
563 case R_386_PC32:
564 *patchSite = (int32_t)((intptr_t)symbolValue + *patchSite - (intptr_t)patchSite);
565 break;
566 default:
567 ASSERT(false && "Unsupported relocation type");
568 return nullptr;
569 }
570 }
571
572 return symbolValue;
573 }
574
relocateSymbol(const ElfHeader * elfHeader,const Elf64_Rela & relocation,const SectionHeader & relocationTable)575 static void *relocateSymbol(const ElfHeader *elfHeader, const Elf64_Rela &relocation, const SectionHeader &relocationTable)
576 {
577 const SectionHeader *target = elfSection(elfHeader, relocationTable.sh_info);
578
579 uint32_t index = relocation.getSymbol();
580 int table = relocationTable.sh_link;
581 void *symbolValue = nullptr;
582
583 if(index != SHN_UNDEF)
584 {
585 if(table == SHN_UNDEF) return nullptr;
586 const SectionHeader *symbolTable = elfSection(elfHeader, table);
587
588 uint32_t symtab_entries = symbolTable->sh_size / symbolTable->sh_entsize;
589 if(index >= symtab_entries)
590 {
591 ASSERT(index < symtab_entries && "Symbol Index out of range");
592 return nullptr;
593 }
594
595 intptr_t symbolAddress = (intptr_t)elfHeader + symbolTable->sh_offset;
596 Elf64_Sym &symbol = ((Elf64_Sym *)symbolAddress)[index];
597 uint16_t section = symbol.st_shndx;
598
599 if(section != SHN_UNDEF && section < SHN_LORESERVE)
600 {
601 const SectionHeader *target = elfSection(elfHeader, symbol.st_shndx);
602 symbolValue = reinterpret_cast<void *>((intptr_t)elfHeader + symbol.st_value + target->sh_offset);
603 }
604 else
605 {
606 return nullptr;
607 }
608 }
609
610 intptr_t address = (intptr_t)elfHeader + target->sh_offset;
611 unaligned_ptr<int32_t> patchSite32 = (int32_t *)(address + relocation.r_offset);
612 unaligned_ptr<int64_t> patchSite64 = (int64_t *)(address + relocation.r_offset);
613
614 switch(relocation.getType())
615 {
616 case R_X86_64_NONE:
617 // No relocation
618 break;
619 case R_X86_64_64:
620 *patchSite64 = (int64_t)((intptr_t)symbolValue + *patchSite64 + relocation.r_addend);
621 break;
622 case R_X86_64_PC32:
623 *patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 - (intptr_t)patchSite32 + relocation.r_addend);
624 break;
625 case R_X86_64_32S:
626 *patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 + relocation.r_addend);
627 break;
628 default:
629 ASSERT(false && "Unsupported relocation type");
630 return nullptr;
631 }
632
633 return symbolValue;
634 }
635
636 struct EntryPoint
637 {
638 const void *entry;
639 size_t codeSize = 0;
640 };
641
loadImage(uint8_t * const elfImage,const std::vector<const char * > & functionNames)642 std::vector<EntryPoint> loadImage(uint8_t *const elfImage, const std::vector<const char *> &functionNames)
643 {
644 ASSERT(functionNames.size() > 0);
645 std::vector<EntryPoint> entryPoints(functionNames.size());
646
647 ElfHeader *elfHeader = (ElfHeader *)elfImage;
648
649 // TODO: assert?
650 if(!elfHeader->checkMagic())
651 {
652 return {};
653 }
654
655 // Expect ELF bitness to match platform
656 ASSERT(sizeof(void *) == 8 ? elfHeader->getFileClass() == ELFCLASS64 : elfHeader->getFileClass() == ELFCLASS32);
657 #if defined(__i386__)
658 ASSERT(sizeof(void *) == 4 && elfHeader->e_machine == EM_386);
659 #elif defined(__x86_64__)
660 ASSERT(sizeof(void *) == 8 && elfHeader->e_machine == EM_X86_64);
661 #elif defined(__arm__)
662 ASSERT(sizeof(void *) == 4 && elfHeader->e_machine == EM_ARM);
663 #elif defined(__aarch64__)
664 ASSERT(sizeof(void *) == 8 && elfHeader->e_machine == EM_AARCH64);
665 #elif defined(__mips__)
666 ASSERT(sizeof(void *) == 4 && elfHeader->e_machine == EM_MIPS);
667 #else
668 # error "Unsupported platform"
669 #endif
670
671 SectionHeader *sectionHeader = (SectionHeader *)(elfImage + elfHeader->e_shoff);
672
673 for(int i = 0; i < elfHeader->e_shnum; i++)
674 {
675 if(sectionHeader[i].sh_type == SHT_PROGBITS)
676 {
677 if(sectionHeader[i].sh_flags & SHF_EXECINSTR)
678 {
679 auto findSectionNameEntryIndex = [&]() -> size_t {
680 auto sectionNameOffset = sectionHeader[elfHeader->e_shstrndx].sh_offset + sectionHeader[i].sh_name;
681 const char *sectionName = reinterpret_cast<const char *>(elfImage + sectionNameOffset);
682
683 for(size_t j = 0; j < functionNames.size(); ++j)
684 {
685 if(strstr(sectionName, functionNames[j]) != nullptr)
686 {
687 return j;
688 }
689 }
690
691 UNREACHABLE("Failed to find executable section that matches input function names");
692 return static_cast<size_t>(-1);
693 };
694
695 size_t index = findSectionNameEntryIndex();
696 entryPoints[index].entry = elfImage + sectionHeader[i].sh_offset;
697 entryPoints[index].codeSize = sectionHeader[i].sh_size;
698 }
699 }
700 else if(sectionHeader[i].sh_type == SHT_REL)
701 {
702 ASSERT(sizeof(void *) == 4 && "UNIMPLEMENTED"); // Only expected/implemented for 32-bit code
703
704 for(Elf32_Word index = 0; index < sectionHeader[i].sh_size / sectionHeader[i].sh_entsize; index++)
705 {
706 const Elf32_Rel &relocation = ((const Elf32_Rel *)(elfImage + sectionHeader[i].sh_offset))[index];
707 relocateSymbol(elfHeader, relocation, sectionHeader[i]);
708 }
709 }
710 else if(sectionHeader[i].sh_type == SHT_RELA)
711 {
712 ASSERT(sizeof(void *) == 8 && "UNIMPLEMENTED"); // Only expected/implemented for 64-bit code
713
714 for(Elf32_Word index = 0; index < sectionHeader[i].sh_size / sectionHeader[i].sh_entsize; index++)
715 {
716 const Elf64_Rela &relocation = ((const Elf64_Rela *)(elfImage + sectionHeader[i].sh_offset))[index];
717 relocateSymbol(elfHeader, relocation, sectionHeader[i]);
718 }
719 }
720 }
721
722 return entryPoints;
723 }
724
725 template<typename T>
726 struct ExecutableAllocator
727 {
ExecutableAllocatorrr::ExecutableAllocator728 ExecutableAllocator() {}
729 template<class U>
ExecutableAllocatorrr::ExecutableAllocator730 ExecutableAllocator(const ExecutableAllocator<U> &other)
731 {}
732
733 using value_type = T;
734 using size_type = std::size_t;
735
allocaterr::ExecutableAllocator736 T *allocate(size_type n)
737 {
738 return (T *)allocateMemoryPages(
739 sizeof(T) * n, PERMISSION_READ | PERMISSION_WRITE, true);
740 }
741
deallocaterr::ExecutableAllocator742 void deallocate(T *p, size_type n)
743 {
744 deallocateMemoryPages(p, sizeof(T) * n);
745 }
746 };
747
748 class ELFMemoryStreamer : public Ice::ELFStreamer, public Routine
749 {
750 ELFMemoryStreamer(const ELFMemoryStreamer &) = delete;
751 ELFMemoryStreamer &operator=(const ELFMemoryStreamer &) = delete;
752
753 public:
ELFMemoryStreamer()754 ELFMemoryStreamer()
755 : Routine()
756 {
757 position = 0;
758 buffer.reserve(0x1000);
759 }
760
~ELFMemoryStreamer()761 ~ELFMemoryStreamer() override
762 {
763 }
764
write8(uint8_t Value)765 void write8(uint8_t Value) override
766 {
767 if(position == (uint64_t)buffer.size())
768 {
769 buffer.push_back(Value);
770 position++;
771 }
772 else if(position < (uint64_t)buffer.size())
773 {
774 buffer[position] = Value;
775 position++;
776 }
777 else
778 ASSERT(false && "UNIMPLEMENTED");
779 }
780
writeBytes(llvm::StringRef Bytes)781 void writeBytes(llvm::StringRef Bytes) override
782 {
783 std::size_t oldSize = buffer.size();
784 buffer.resize(oldSize + Bytes.size());
785 memcpy(&buffer[oldSize], Bytes.begin(), Bytes.size());
786 position += Bytes.size();
787 }
788
tell() const789 uint64_t tell() const override
790 {
791 return position;
792 }
793
seek(uint64_t Off)794 void seek(uint64_t Off) override
795 {
796 position = Off;
797 }
798
loadImageAndGetEntryPoints(const std::vector<const char * > & functionNames)799 std::vector<EntryPoint> loadImageAndGetEntryPoints(const std::vector<const char *> &functionNames)
800 {
801 auto entryPoints = loadImage(&buffer[0], functionNames);
802
803 #if defined(_WIN32)
804 FlushInstructionCache(GetCurrentProcess(), NULL, 0);
805 #else
806 for(auto &entryPoint : entryPoints)
807 {
808 __builtin___clear_cache((char *)entryPoint.entry, (char *)entryPoint.entry + entryPoint.codeSize);
809 }
810 #endif
811
812 return entryPoints;
813 }
814
finalize()815 void finalize()
816 {
817 position = std::numeric_limits<std::size_t>::max(); // Can't stream more data after this
818
819 protectMemoryPages(&buffer[0], buffer.size(), PERMISSION_READ | PERMISSION_EXECUTE);
820 }
821
setEntry(int index,const void * func)822 void setEntry(int index, const void *func)
823 {
824 ASSERT(func);
825 funcs[index] = func;
826 }
827
getEntry(int index) const828 const void *getEntry(int index) const override
829 {
830 ASSERT(funcs[index]);
831 return funcs[index];
832 }
833
addConstantData(const void * data,size_t size,size_t alignment=1)834 const void *addConstantData(const void *data, size_t size, size_t alignment = 1)
835 {
836 // Check if we already have a suitable constant.
837 for(const auto &c : constantsPool)
838 {
839 void *ptr = c.data.get();
840 size_t space = c.space;
841
842 void *alignedPtr = std::align(alignment, size, ptr, space);
843
844 if(space < size)
845 {
846 continue;
847 }
848
849 if(memcmp(data, alignedPtr, size) == 0)
850 {
851 return alignedPtr;
852 }
853 }
854
855 // TODO(b/148086935): Replace with a buffer allocator.
856 size_t space = size + alignment;
857 auto buf = std::unique_ptr<uint8_t[]>(new uint8_t[space]);
858 void *ptr = buf.get();
859 void *alignedPtr = std::align(alignment, size, ptr, space);
860 ASSERT(alignedPtr);
861 memcpy(alignedPtr, data, size);
862 constantsPool.emplace_back(std::move(buf), space);
863
864 return alignedPtr;
865 }
866
867 private:
868 struct Constant
869 {
Constantrr::ELFMemoryStreamer::Constant870 Constant(std::unique_ptr<uint8_t[]> data, size_t space)
871 : data(std::move(data))
872 , space(space)
873 {}
874
875 std::unique_ptr<uint8_t[]> data;
876 size_t space;
877 };
878
879 std::array<const void *, Nucleus::CoroutineEntryCount> funcs = {};
880 std::vector<uint8_t, ExecutableAllocator<uint8_t>> buffer;
881 std::size_t position;
882 std::vector<Constant> constantsPool;
883 };
884
885 #ifdef ENABLE_RR_PRINT
VPrintf(const std::vector<Value * > & vals)886 void VPrintf(const std::vector<Value *> &vals)
887 {
888 sz::Call(::function, ::basicBlock, Ice::IceType_i32, reinterpret_cast<const void *>(rr::DebugPrintf), V(vals), true);
889 }
890 #endif // ENABLE_RR_PRINT
891
Nucleus()892 Nucleus::Nucleus()
893 {
894 ::codegenMutex.lock(); // SubzeroReactor is currently not thread safe
895
896 Ice::ClFlags &Flags = Ice::ClFlags::Flags;
897 Ice::ClFlags::getParsedClFlags(Flags);
898
899 #if defined(__arm__)
900 Flags.setTargetArch(Ice::Target_ARM32);
901 Flags.setTargetInstructionSet(Ice::ARM32InstructionSet_HWDivArm);
902 #elif defined(__mips__)
903 Flags.setTargetArch(Ice::Target_MIPS32);
904 Flags.setTargetInstructionSet(Ice::BaseInstructionSet);
905 #else // x86
906 Flags.setTargetArch(sizeof(void *) == 8 ? Ice::Target_X8664 : Ice::Target_X8632);
907 Flags.setTargetInstructionSet(CPUID::SSE4_1 ? Ice::X86InstructionSet_SSE4_1 : Ice::X86InstructionSet_SSE2);
908 #endif
909 Flags.setOutFileType(Ice::FT_Elf);
910 Flags.setOptLevel(toIce(getDefaultConfig().getOptimization().getLevel()));
911 Flags.setVerbose(subzeroDumpEnabled ? Ice::IceV_Most : Ice::IceV_None);
912 Flags.setDisableHybridAssembly(true);
913
914 // Emit functions into separate sections in the ELF so we can find them by name
915 Flags.setFunctionSections(true);
916
917 static llvm::raw_os_ostream cout(std::cout);
918 static llvm::raw_os_ostream cerr(std::cerr);
919
920 if(subzeroEmitTextAsm)
921 {
922 // Decorate text asm with liveness info
923 Flags.setDecorateAsm(true);
924 }
925
926 if(false) // Write out to a file
927 {
928 std::error_code errorCode;
929 ::out = new Ice::Fdstream("out.o", errorCode, llvm::sys::fs::F_None);
930 ::elfFile = new Ice::ELFFileStreamer(*out);
931 ::context = new Ice::GlobalContext(&cout, &cout, &cerr, elfFile);
932 }
933 else
934 {
935 ELFMemoryStreamer *elfMemory = new ELFMemoryStreamer();
936 ::context = new Ice::GlobalContext(&cout, &cout, &cerr, elfMemory);
937 ::routine = elfMemory;
938 }
939
940 #if !__has_feature(memory_sanitizer)
941 // thread_local variables in shared libraries are initialized at load-time,
942 // but this is not observed by MemorySanitizer if the loader itself was not
943 // instrumented, leading to false-positive uninitialized variable errors.
944 ASSERT(Variable::unmaterializedVariables == nullptr);
945 #endif
946 Variable::unmaterializedVariables = new Variable::UnmaterializedVariables{};
947 }
948
~Nucleus()949 Nucleus::~Nucleus()
950 {
951 delete Variable::unmaterializedVariables;
952 Variable::unmaterializedVariables = nullptr;
953
954 delete ::routine;
955 ::routine = nullptr;
956
957 delete ::allocator;
958 ::allocator = nullptr;
959
960 delete ::function;
961 ::function = nullptr;
962
963 delete ::context;
964 ::context = nullptr;
965
966 delete ::elfFile;
967 ::elfFile = nullptr;
968
969 delete ::out;
970 ::out = nullptr;
971
972 ::entryBlock = nullptr;
973 ::basicBlock = nullptr;
974 ::basicBlockTop = nullptr;
975
976 ::codegenMutex.unlock();
977 }
978
setDefaultConfig(const Config & cfg)979 void Nucleus::setDefaultConfig(const Config &cfg)
980 {
981 std::unique_lock<std::mutex> lock(::defaultConfigLock);
982 ::defaultConfig() = cfg;
983 }
984
adjustDefaultConfig(const Config::Edit & cfgEdit)985 void Nucleus::adjustDefaultConfig(const Config::Edit &cfgEdit)
986 {
987 std::unique_lock<std::mutex> lock(::defaultConfigLock);
988 auto &config = ::defaultConfig();
989 config = cfgEdit.apply(config);
990 }
991
getDefaultConfig()992 Config Nucleus::getDefaultConfig()
993 {
994 std::unique_lock<std::mutex> lock(::defaultConfigLock);
995 return ::defaultConfig();
996 }
997
998 // This function lowers and produces executable binary code in memory for the input functions,
999 // and returns a Routine with the entry points to these functions.
1000 template<size_t Count>
acquireRoutine(Ice::Cfg * const (& functions)[Count],const char * const (& names)[Count],const Config::Edit * cfgEdit)1001 static std::shared_ptr<Routine> acquireRoutine(Ice::Cfg *const (&functions)[Count], const char *const (&names)[Count], const Config::Edit *cfgEdit)
1002 {
1003 // This logic is modeled after the IceCompiler, as well as GlobalContext::translateFunctions
1004 // and GlobalContext::emitItems.
1005
1006 if(subzeroDumpEnabled)
1007 {
1008 // Output dump strings immediately, rather than once buffer is full. Useful for debugging.
1009 ::context->getStrDump().SetUnbuffered();
1010 }
1011
1012 ::context->emitFileHeader();
1013
1014 // Translate
1015
1016 for(size_t i = 0; i < Count; ++i)
1017 {
1018 Ice::Cfg *currFunc = functions[i];
1019
1020 // Install function allocator in TLS for Cfg-specific container allocators
1021 Ice::CfgLocalAllocatorScope allocScope(currFunc);
1022
1023 currFunc->setFunctionName(Ice::GlobalString::createWithString(::context, names[i]));
1024
1025 if(::optimizerCallback)
1026 {
1027 Nucleus::OptimizerReport report;
1028 rr::optimize(currFunc, &report);
1029 ::optimizerCallback(&report);
1030 ::optimizerCallback = nullptr;
1031 }
1032 else
1033 {
1034 rr::optimize(currFunc);
1035 }
1036
1037 currFunc->computeInOutEdges();
1038 ASSERT_MSG(!currFunc->hasError(), "%s", currFunc->getError().c_str());
1039
1040 currFunc->translate();
1041 ASSERT_MSG(!currFunc->hasError(), "%s", currFunc->getError().c_str());
1042
1043 currFunc->getAssembler<>()->setInternal(currFunc->getInternal());
1044
1045 if(subzeroEmitTextAsm)
1046 {
1047 currFunc->emit();
1048 }
1049
1050 currFunc->emitIAS();
1051
1052 if(currFunc->hasError())
1053 {
1054 return nullptr;
1055 }
1056 }
1057
1058 // Emit items
1059
1060 ::context->lowerGlobals("");
1061
1062 auto objectWriter = ::context->getObjectWriter();
1063
1064 for(size_t i = 0; i < Count; ++i)
1065 {
1066 Ice::Cfg *currFunc = functions[i];
1067
1068 // Accumulate globals from functions to emit into the "last" section at the end
1069 auto globals = currFunc->getGlobalInits();
1070 if(globals && !globals->empty())
1071 {
1072 ::context->getGlobals()->merge(globals.get());
1073 }
1074
1075 auto assembler = currFunc->releaseAssembler();
1076 assembler->alignFunction();
1077 objectWriter->writeFunctionCode(currFunc->getFunctionName(), currFunc->getInternal(), assembler.get());
1078 }
1079
1080 ::context->lowerGlobals("last");
1081 ::context->lowerConstants();
1082 ::context->lowerJumpTables();
1083
1084 objectWriter->setUndefinedSyms(::context->getConstantExternSyms());
1085 ::context->emitTargetRODataSections();
1086 objectWriter->writeNonUserSections();
1087
1088 // Done compiling functions, get entry pointers to each of them
1089 auto entryPoints = ::routine->loadImageAndGetEntryPoints({ names, names + Count });
1090 ASSERT(entryPoints.size() == Count);
1091 for(size_t i = 0; i < entryPoints.size(); ++i)
1092 {
1093 ::routine->setEntry(i, entryPoints[i].entry);
1094 }
1095
1096 ::routine->finalize();
1097
1098 Routine *handoffRoutine = ::routine;
1099 ::routine = nullptr;
1100
1101 return std::shared_ptr<Routine>(handoffRoutine);
1102 }
1103
acquireRoutine(const char * name,const Config::Edit * cfgEdit)1104 std::shared_ptr<Routine> Nucleus::acquireRoutine(const char *name, const Config::Edit *cfgEdit /* = nullptr */)
1105 {
1106 finalizeFunction();
1107 return rr::acquireRoutine({ ::function }, { name }, cfgEdit);
1108 }
1109
allocateStackVariable(Type * t,int arraySize)1110 Value *Nucleus::allocateStackVariable(Type *t, int arraySize)
1111 {
1112 Ice::Type type = T(t);
1113 int typeSize = Ice::typeWidthInBytes(type);
1114 int totalSize = typeSize * (arraySize ? arraySize : 1);
1115
1116 auto bytes = Ice::ConstantInteger32::create(::context, Ice::IceType_i32, totalSize);
1117 auto address = ::function->makeVariable(T(getPointerType(t)));
1118 auto alloca = Ice::InstAlloca::create(::function, address, bytes, typeSize); // SRoA depends on the alignment to match the type size.
1119 ::function->getEntryNode()->getInsts().push_front(alloca);
1120
1121 return V(address);
1122 }
1123
createBasicBlock()1124 BasicBlock *Nucleus::createBasicBlock()
1125 {
1126 return B(::function->makeNode());
1127 }
1128
getInsertBlock()1129 BasicBlock *Nucleus::getInsertBlock()
1130 {
1131 return B(::basicBlock);
1132 }
1133
setInsertBlock(BasicBlock * basicBlock)1134 void Nucleus::setInsertBlock(BasicBlock *basicBlock)
1135 {
1136 // ASSERT(::basicBlock->getInsts().back().getTerminatorEdges().size() >= 0 && "Previous basic block must have a terminator");
1137
1138 ::basicBlock = basicBlock;
1139 }
1140
createFunction(Type * returnType,const std::vector<Type * > & paramTypes)1141 void Nucleus::createFunction(Type *returnType, const std::vector<Type *> ¶mTypes)
1142 {
1143 ASSERT(::function == nullptr);
1144 ASSERT(::allocator == nullptr);
1145 ASSERT(::entryBlock == nullptr);
1146 ASSERT(::basicBlock == nullptr);
1147 ASSERT(::basicBlockTop == nullptr);
1148
1149 ::function = sz::createFunction(::context, T(returnType), T(paramTypes));
1150
1151 // NOTE: The scoped allocator sets the TLS allocator to the one in the function. This global one
1152 // becomes invalid if another one is created; for example, when creating await and destroy functions
1153 // for coroutines, in which case, we must make sure to create a new scoped allocator for ::function again.
1154 // TODO: Get rid of this as a global, and create scoped allocs in every Nucleus function instead.
1155 ::allocator = new Ice::CfgLocalAllocatorScope(::function);
1156
1157 ::entryBlock = ::function->getEntryNode();
1158 ::basicBlock = ::function->makeNode();
1159 ::basicBlockTop = ::basicBlock;
1160 }
1161
getArgument(unsigned int index)1162 Value *Nucleus::getArgument(unsigned int index)
1163 {
1164 return V(::function->getArgs()[index]);
1165 }
1166
createRetVoid()1167 void Nucleus::createRetVoid()
1168 {
1169 RR_DEBUG_INFO_UPDATE_LOC();
1170
1171 // Code generated after this point is unreachable, so any variables
1172 // being read can safely return an undefined value. We have to avoid
1173 // materializing variables after the terminator ret instruction.
1174 Variable::killUnmaterialized();
1175
1176 Ice::InstRet *ret = Ice::InstRet::create(::function);
1177 ::basicBlock->appendInst(ret);
1178 }
1179
createRet(Value * v)1180 void Nucleus::createRet(Value *v)
1181 {
1182 RR_DEBUG_INFO_UPDATE_LOC();
1183
1184 // Code generated after this point is unreachable, so any variables
1185 // being read can safely return an undefined value. We have to avoid
1186 // materializing variables after the terminator ret instruction.
1187 Variable::killUnmaterialized();
1188
1189 Ice::InstRet *ret = Ice::InstRet::create(::function, v);
1190 ::basicBlock->appendInst(ret);
1191 }
1192
createBr(BasicBlock * dest)1193 void Nucleus::createBr(BasicBlock *dest)
1194 {
1195 RR_DEBUG_INFO_UPDATE_LOC();
1196 Variable::materializeAll();
1197
1198 auto br = Ice::InstBr::create(::function, dest);
1199 ::basicBlock->appendInst(br);
1200 }
1201
createCondBr(Value * cond,BasicBlock * ifTrue,BasicBlock * ifFalse)1202 void Nucleus::createCondBr(Value *cond, BasicBlock *ifTrue, BasicBlock *ifFalse)
1203 {
1204 RR_DEBUG_INFO_UPDATE_LOC();
1205 Variable::materializeAll();
1206
1207 auto br = Ice::InstBr::create(::function, cond, ifTrue, ifFalse);
1208 ::basicBlock->appendInst(br);
1209 }
1210
isCommutative(Ice::InstArithmetic::OpKind op)1211 static bool isCommutative(Ice::InstArithmetic::OpKind op)
1212 {
1213 switch(op)
1214 {
1215 case Ice::InstArithmetic::Add:
1216 case Ice::InstArithmetic::Fadd:
1217 case Ice::InstArithmetic::Mul:
1218 case Ice::InstArithmetic::Fmul:
1219 case Ice::InstArithmetic::And:
1220 case Ice::InstArithmetic::Or:
1221 case Ice::InstArithmetic::Xor:
1222 return true;
1223 default:
1224 return false;
1225 }
1226 }
1227
createArithmetic(Ice::InstArithmetic::OpKind op,Value * lhs,Value * rhs)1228 static Value *createArithmetic(Ice::InstArithmetic::OpKind op, Value *lhs, Value *rhs)
1229 {
1230 ASSERT(lhs->getType() == rhs->getType() || llvm::isa<Ice::Constant>(rhs));
1231
1232 bool swapOperands = llvm::isa<Ice::Constant>(lhs) && isCommutative(op);
1233
1234 Ice::Variable *result = ::function->makeVariable(lhs->getType());
1235 Ice::InstArithmetic *arithmetic = Ice::InstArithmetic::create(::function, op, result, swapOperands ? rhs : lhs, swapOperands ? lhs : rhs);
1236 ::basicBlock->appendInst(arithmetic);
1237
1238 return V(result);
1239 }
1240
createAdd(Value * lhs,Value * rhs)1241 Value *Nucleus::createAdd(Value *lhs, Value *rhs)
1242 {
1243 RR_DEBUG_INFO_UPDATE_LOC();
1244 return createArithmetic(Ice::InstArithmetic::Add, lhs, rhs);
1245 }
1246
createSub(Value * lhs,Value * rhs)1247 Value *Nucleus::createSub(Value *lhs, Value *rhs)
1248 {
1249 RR_DEBUG_INFO_UPDATE_LOC();
1250 return createArithmetic(Ice::InstArithmetic::Sub, lhs, rhs);
1251 }
1252
createMul(Value * lhs,Value * rhs)1253 Value *Nucleus::createMul(Value *lhs, Value *rhs)
1254 {
1255 RR_DEBUG_INFO_UPDATE_LOC();
1256 return createArithmetic(Ice::InstArithmetic::Mul, lhs, rhs);
1257 }
1258
createUDiv(Value * lhs,Value * rhs)1259 Value *Nucleus::createUDiv(Value *lhs, Value *rhs)
1260 {
1261 RR_DEBUG_INFO_UPDATE_LOC();
1262 return createArithmetic(Ice::InstArithmetic::Udiv, lhs, rhs);
1263 }
1264
createSDiv(Value * lhs,Value * rhs)1265 Value *Nucleus::createSDiv(Value *lhs, Value *rhs)
1266 {
1267 RR_DEBUG_INFO_UPDATE_LOC();
1268 return createArithmetic(Ice::InstArithmetic::Sdiv, lhs, rhs);
1269 }
1270
createFAdd(Value * lhs,Value * rhs)1271 Value *Nucleus::createFAdd(Value *lhs, Value *rhs)
1272 {
1273 RR_DEBUG_INFO_UPDATE_LOC();
1274 return createArithmetic(Ice::InstArithmetic::Fadd, lhs, rhs);
1275 }
1276
createFSub(Value * lhs,Value * rhs)1277 Value *Nucleus::createFSub(Value *lhs, Value *rhs)
1278 {
1279 RR_DEBUG_INFO_UPDATE_LOC();
1280 return createArithmetic(Ice::InstArithmetic::Fsub, lhs, rhs);
1281 }
1282
createFMul(Value * lhs,Value * rhs)1283 Value *Nucleus::createFMul(Value *lhs, Value *rhs)
1284 {
1285 RR_DEBUG_INFO_UPDATE_LOC();
1286 return createArithmetic(Ice::InstArithmetic::Fmul, lhs, rhs);
1287 }
1288
createFDiv(Value * lhs,Value * rhs)1289 Value *Nucleus::createFDiv(Value *lhs, Value *rhs)
1290 {
1291 RR_DEBUG_INFO_UPDATE_LOC();
1292 return createArithmetic(Ice::InstArithmetic::Fdiv, lhs, rhs);
1293 }
1294
createURem(Value * lhs,Value * rhs)1295 Value *Nucleus::createURem(Value *lhs, Value *rhs)
1296 {
1297 RR_DEBUG_INFO_UPDATE_LOC();
1298 return createArithmetic(Ice::InstArithmetic::Urem, lhs, rhs);
1299 }
1300
createSRem(Value * lhs,Value * rhs)1301 Value *Nucleus::createSRem(Value *lhs, Value *rhs)
1302 {
1303 RR_DEBUG_INFO_UPDATE_LOC();
1304 return createArithmetic(Ice::InstArithmetic::Srem, lhs, rhs);
1305 }
1306
createFRem(Value * lhs,Value * rhs)1307 Value *Nucleus::createFRem(Value *lhs, Value *rhs)
1308 {
1309 RR_DEBUG_INFO_UPDATE_LOC();
1310 // TODO(b/148139679) Fix Subzero generating invalid code for FRem on vector types
1311 // createArithmetic(Ice::InstArithmetic::Frem, lhs, rhs);
1312 UNIMPLEMENTED("b/148139679 Nucleus::createFRem");
1313 return nullptr;
1314 }
1315
createShl(Value * lhs,Value * rhs)1316 Value *Nucleus::createShl(Value *lhs, Value *rhs)
1317 {
1318 RR_DEBUG_INFO_UPDATE_LOC();
1319 return createArithmetic(Ice::InstArithmetic::Shl, lhs, rhs);
1320 }
1321
createLShr(Value * lhs,Value * rhs)1322 Value *Nucleus::createLShr(Value *lhs, Value *rhs)
1323 {
1324 RR_DEBUG_INFO_UPDATE_LOC();
1325 return createArithmetic(Ice::InstArithmetic::Lshr, lhs, rhs);
1326 }
1327
createAShr(Value * lhs,Value * rhs)1328 Value *Nucleus::createAShr(Value *lhs, Value *rhs)
1329 {
1330 RR_DEBUG_INFO_UPDATE_LOC();
1331 return createArithmetic(Ice::InstArithmetic::Ashr, lhs, rhs);
1332 }
1333
createAnd(Value * lhs,Value * rhs)1334 Value *Nucleus::createAnd(Value *lhs, Value *rhs)
1335 {
1336 RR_DEBUG_INFO_UPDATE_LOC();
1337 return createArithmetic(Ice::InstArithmetic::And, lhs, rhs);
1338 }
1339
createOr(Value * lhs,Value * rhs)1340 Value *Nucleus::createOr(Value *lhs, Value *rhs)
1341 {
1342 RR_DEBUG_INFO_UPDATE_LOC();
1343 return createArithmetic(Ice::InstArithmetic::Or, lhs, rhs);
1344 }
1345
createXor(Value * lhs,Value * rhs)1346 Value *Nucleus::createXor(Value *lhs, Value *rhs)
1347 {
1348 RR_DEBUG_INFO_UPDATE_LOC();
1349 return createArithmetic(Ice::InstArithmetic::Xor, lhs, rhs);
1350 }
1351
createNeg(Value * v)1352 Value *Nucleus::createNeg(Value *v)
1353 {
1354 RR_DEBUG_INFO_UPDATE_LOC();
1355 return createSub(createNullValue(T(v->getType())), v);
1356 }
1357
createFNeg(Value * v)1358 Value *Nucleus::createFNeg(Value *v)
1359 {
1360 RR_DEBUG_INFO_UPDATE_LOC();
1361 double c[4] = { -0.0, -0.0, -0.0, -0.0 };
1362 Value *negativeZero = Ice::isVectorType(v->getType()) ? createConstantVector(c, T(v->getType())) : V(::context->getConstantFloat(-0.0f));
1363
1364 return createFSub(negativeZero, v);
1365 }
1366
createNot(Value * v)1367 Value *Nucleus::createNot(Value *v)
1368 {
1369 RR_DEBUG_INFO_UPDATE_LOC();
1370 if(Ice::isScalarIntegerType(v->getType()))
1371 {
1372 return createXor(v, V(::context->getConstantInt(v->getType(), -1)));
1373 }
1374 else // Vector
1375 {
1376 int64_t c[16] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 };
1377 return createXor(v, createConstantVector(c, T(v->getType())));
1378 }
1379 }
1380
validateAtomicAndMemoryOrderArgs(bool atomic,std::memory_order memoryOrder)1381 static void validateAtomicAndMemoryOrderArgs(bool atomic, std::memory_order memoryOrder)
1382 {
1383 #if defined(__i386__) || defined(__x86_64__)
1384 // We're good, atomics and strictest memory order (except seq_cst) are guaranteed.
1385 // Note that sequential memory ordering could be guaranteed by using x86's LOCK prefix.
1386 // Note also that relaxed memory order could be implemented using MOVNTPS and friends.
1387 #else
1388 if(atomic)
1389 {
1390 UNIMPLEMENTED("b/150475088 Atomic load/store not implemented for current platform");
1391 }
1392 if(memoryOrder != std::memory_order_relaxed)
1393 {
1394 UNIMPLEMENTED("b/150475088 Memory order other than memory_order_relaxed not implemented for current platform");
1395 }
1396 #endif
1397
1398 // Vulkan doesn't allow sequential memory order
1399 ASSERT(memoryOrder != std::memory_order_seq_cst);
1400 }
1401
createLoad(Value * ptr,Type * type,bool isVolatile,unsigned int align,bool atomic,std::memory_order memoryOrder)1402 Value *Nucleus::createLoad(Value *ptr, Type *type, bool isVolatile, unsigned int align, bool atomic, std::memory_order memoryOrder)
1403 {
1404 RR_DEBUG_INFO_UPDATE_LOC();
1405 validateAtomicAndMemoryOrderArgs(atomic, memoryOrder);
1406
1407 int valueType = (int)reinterpret_cast<intptr_t>(type);
1408 Ice::Variable *result = nullptr;
1409
1410 if((valueType & EmulatedBits) && (align != 0)) // Narrow vector not stored on stack.
1411 {
1412 if(emulateIntrinsics)
1413 {
1414 if(typeSize(type) == 4)
1415 {
1416 auto pointer = RValue<Pointer<Byte>>(ptr);
1417 Int x = *Pointer<Int>(pointer);
1418
1419 Int4 vector;
1420 vector = Insert(vector, x, 0);
1421
1422 result = ::function->makeVariable(T(type));
1423 auto bitcast = Ice::InstCast::create(::function, Ice::InstCast::Bitcast, result, vector.loadValue());
1424 ::basicBlock->appendInst(bitcast);
1425 }
1426 else if(typeSize(type) == 8)
1427 {
1428 ASSERT_MSG(!atomic, "Emulated 64-bit loads are not atomic");
1429 auto pointer = RValue<Pointer<Byte>>(ptr);
1430 Int x = *Pointer<Int>(pointer);
1431 Int y = *Pointer<Int>(pointer + 4);
1432
1433 Int4 vector;
1434 vector = Insert(vector, x, 0);
1435 vector = Insert(vector, y, 1);
1436
1437 result = ::function->makeVariable(T(type));
1438 auto bitcast = Ice::InstCast::create(::function, Ice::InstCast::Bitcast, result, vector.loadValue());
1439 ::basicBlock->appendInst(bitcast);
1440 }
1441 else
1442 UNREACHABLE("typeSize(type): %d", int(typeSize(type)));
1443 }
1444 else
1445 {
1446 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::LoadSubVector, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
1447 result = ::function->makeVariable(T(type));
1448 auto load = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
1449 load->addArg(ptr);
1450 load->addArg(::context->getConstantInt32(typeSize(type)));
1451 ::basicBlock->appendInst(load);
1452 }
1453 }
1454 else
1455 {
1456 result = sz::createLoad(::function, ::basicBlock, V(ptr), T(type), align);
1457 }
1458
1459 ASSERT(result);
1460 return V(result);
1461 }
1462
createStore(Value * value,Value * ptr,Type * type,bool isVolatile,unsigned int align,bool atomic,std::memory_order memoryOrder)1463 Value *Nucleus::createStore(Value *value, Value *ptr, Type *type, bool isVolatile, unsigned int align, bool atomic, std::memory_order memoryOrder)
1464 {
1465 RR_DEBUG_INFO_UPDATE_LOC();
1466 validateAtomicAndMemoryOrderArgs(atomic, memoryOrder);
1467
1468 #if __has_feature(memory_sanitizer)
1469 // Mark all (non-stack) memory writes as initialized by calling __msan_unpoison
1470 if(align != 0)
1471 {
1472 auto call = Ice::InstCall::create(::function, 2, nullptr, ::context->getConstantInt64(reinterpret_cast<intptr_t>(__msan_unpoison)), false);
1473 call->addArg(ptr);
1474 call->addArg(::context->getConstantInt64(typeSize(type)));
1475 ::basicBlock->appendInst(call);
1476 }
1477 #endif
1478
1479 int valueType = (int)reinterpret_cast<intptr_t>(type);
1480
1481 if((valueType & EmulatedBits) && (align != 0)) // Narrow vector not stored on stack.
1482 {
1483 if(emulateIntrinsics)
1484 {
1485 if(typeSize(type) == 4)
1486 {
1487 Ice::Variable *vector = ::function->makeVariable(Ice::IceType_v4i32);
1488 auto bitcast = Ice::InstCast::create(::function, Ice::InstCast::Bitcast, vector, value);
1489 ::basicBlock->appendInst(bitcast);
1490
1491 RValue<Int4> v(V(vector));
1492
1493 auto pointer = RValue<Pointer<Byte>>(ptr);
1494 Int x = Extract(v, 0);
1495 *Pointer<Int>(pointer) = x;
1496 }
1497 else if(typeSize(type) == 8)
1498 {
1499 ASSERT_MSG(!atomic, "Emulated 64-bit stores are not atomic");
1500 Ice::Variable *vector = ::function->makeVariable(Ice::IceType_v4i32);
1501 auto bitcast = Ice::InstCast::create(::function, Ice::InstCast::Bitcast, vector, value);
1502 ::basicBlock->appendInst(bitcast);
1503
1504 RValue<Int4> v(V(vector));
1505
1506 auto pointer = RValue<Pointer<Byte>>(ptr);
1507 Int x = Extract(v, 0);
1508 *Pointer<Int>(pointer) = x;
1509 Int y = Extract(v, 1);
1510 *Pointer<Int>(pointer + 4) = y;
1511 }
1512 else
1513 UNREACHABLE("typeSize(type): %d", int(typeSize(type)));
1514 }
1515 else
1516 {
1517 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::StoreSubVector, Ice::Intrinsics::SideEffects_T, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_T };
1518 auto store = Ice::InstIntrinsic::create(::function, 3, nullptr, intrinsic);
1519 store->addArg(value);
1520 store->addArg(ptr);
1521 store->addArg(::context->getConstantInt32(typeSize(type)));
1522 ::basicBlock->appendInst(store);
1523 }
1524 }
1525 else
1526 {
1527 ASSERT(value->getType() == T(type));
1528
1529 auto store = Ice::InstStore::create(::function, V(value), V(ptr), align);
1530 ::basicBlock->appendInst(store);
1531 }
1532
1533 return value;
1534 }
1535
createGEP(Value * ptr,Type * type,Value * index,bool unsignedIndex)1536 Value *Nucleus::createGEP(Value *ptr, Type *type, Value *index, bool unsignedIndex)
1537 {
1538 RR_DEBUG_INFO_UPDATE_LOC();
1539 ASSERT(index->getType() == Ice::IceType_i32);
1540
1541 if(auto *constant = llvm::dyn_cast<Ice::ConstantInteger32>(index))
1542 {
1543 int32_t offset = constant->getValue() * (int)typeSize(type);
1544
1545 if(offset == 0)
1546 {
1547 return ptr;
1548 }
1549
1550 return createAdd(ptr, createConstantInt(offset));
1551 }
1552
1553 if(!Ice::isByteSizedType(T(type)))
1554 {
1555 index = createMul(index, createConstantInt((int)typeSize(type)));
1556 }
1557
1558 if(sizeof(void *) == 8)
1559 {
1560 if(unsignedIndex)
1561 {
1562 index = createZExt(index, T(Ice::IceType_i64));
1563 }
1564 else
1565 {
1566 index = createSExt(index, T(Ice::IceType_i64));
1567 }
1568 }
1569
1570 return createAdd(ptr, index);
1571 }
1572
createAtomicRMW(Ice::Intrinsics::AtomicRMWOperation rmwOp,Value * ptr,Value * value,std::memory_order memoryOrder)1573 static Value *createAtomicRMW(Ice::Intrinsics::AtomicRMWOperation rmwOp, Value *ptr, Value *value, std::memory_order memoryOrder)
1574 {
1575 Ice::Variable *result = ::function->makeVariable(value->getType());
1576
1577 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AtomicRMW, Ice::Intrinsics::SideEffects_T, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_T };
1578 auto inst = Ice::InstIntrinsic::create(::function, 0, result, intrinsic);
1579 auto op = ::context->getConstantInt32(rmwOp);
1580 auto order = ::context->getConstantInt32(stdToIceMemoryOrder(memoryOrder));
1581 inst->addArg(op);
1582 inst->addArg(ptr);
1583 inst->addArg(value);
1584 inst->addArg(order);
1585 ::basicBlock->appendInst(inst);
1586
1587 return V(result);
1588 }
1589
createAtomicAdd(Value * ptr,Value * value,std::memory_order memoryOrder)1590 Value *Nucleus::createAtomicAdd(Value *ptr, Value *value, std::memory_order memoryOrder)
1591 {
1592 RR_DEBUG_INFO_UPDATE_LOC();
1593 return createAtomicRMW(Ice::Intrinsics::AtomicAdd, ptr, value, memoryOrder);
1594 }
1595
createAtomicSub(Value * ptr,Value * value,std::memory_order memoryOrder)1596 Value *Nucleus::createAtomicSub(Value *ptr, Value *value, std::memory_order memoryOrder)
1597 {
1598 RR_DEBUG_INFO_UPDATE_LOC();
1599 return createAtomicRMW(Ice::Intrinsics::AtomicSub, ptr, value, memoryOrder);
1600 }
1601
createAtomicAnd(Value * ptr,Value * value,std::memory_order memoryOrder)1602 Value *Nucleus::createAtomicAnd(Value *ptr, Value *value, std::memory_order memoryOrder)
1603 {
1604 RR_DEBUG_INFO_UPDATE_LOC();
1605 return createAtomicRMW(Ice::Intrinsics::AtomicAnd, ptr, value, memoryOrder);
1606 }
1607
createAtomicOr(Value * ptr,Value * value,std::memory_order memoryOrder)1608 Value *Nucleus::createAtomicOr(Value *ptr, Value *value, std::memory_order memoryOrder)
1609 {
1610 RR_DEBUG_INFO_UPDATE_LOC();
1611 return createAtomicRMW(Ice::Intrinsics::AtomicOr, ptr, value, memoryOrder);
1612 }
1613
createAtomicXor(Value * ptr,Value * value,std::memory_order memoryOrder)1614 Value *Nucleus::createAtomicXor(Value *ptr, Value *value, std::memory_order memoryOrder)
1615 {
1616 RR_DEBUG_INFO_UPDATE_LOC();
1617 return createAtomicRMW(Ice::Intrinsics::AtomicXor, ptr, value, memoryOrder);
1618 }
1619
createAtomicExchange(Value * ptr,Value * value,std::memory_order memoryOrder)1620 Value *Nucleus::createAtomicExchange(Value *ptr, Value *value, std::memory_order memoryOrder)
1621 {
1622 RR_DEBUG_INFO_UPDATE_LOC();
1623 return createAtomicRMW(Ice::Intrinsics::AtomicExchange, ptr, value, memoryOrder);
1624 }
1625
createAtomicCompareExchange(Value * ptr,Value * value,Value * compare,std::memory_order memoryOrderEqual,std::memory_order memoryOrderUnequal)1626 Value *Nucleus::createAtomicCompareExchange(Value *ptr, Value *value, Value *compare, std::memory_order memoryOrderEqual, std::memory_order memoryOrderUnequal)
1627 {
1628 RR_DEBUG_INFO_UPDATE_LOC();
1629 Ice::Variable *result = ::function->makeVariable(value->getType());
1630
1631 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AtomicCmpxchg, Ice::Intrinsics::SideEffects_T, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_T };
1632 auto inst = Ice::InstIntrinsic::create(::function, 0, result, intrinsic);
1633 auto orderEq = ::context->getConstantInt32(stdToIceMemoryOrder(memoryOrderEqual));
1634 auto orderNeq = ::context->getConstantInt32(stdToIceMemoryOrder(memoryOrderUnequal));
1635 inst->addArg(ptr);
1636 inst->addArg(compare);
1637 inst->addArg(value);
1638 inst->addArg(orderEq);
1639 inst->addArg(orderNeq);
1640 ::basicBlock->appendInst(inst);
1641
1642 return V(result);
1643 }
1644
createCast(Ice::InstCast::OpKind op,Value * v,Type * destType)1645 static Value *createCast(Ice::InstCast::OpKind op, Value *v, Type *destType)
1646 {
1647 if(v->getType() == T(destType))
1648 {
1649 return v;
1650 }
1651
1652 Ice::Variable *result = ::function->makeVariable(T(destType));
1653 Ice::InstCast *cast = Ice::InstCast::create(::function, op, result, v);
1654 ::basicBlock->appendInst(cast);
1655
1656 return V(result);
1657 }
1658
createTrunc(Value * v,Type * destType)1659 Value *Nucleus::createTrunc(Value *v, Type *destType)
1660 {
1661 RR_DEBUG_INFO_UPDATE_LOC();
1662 return createCast(Ice::InstCast::Trunc, v, destType);
1663 }
1664
createZExt(Value * v,Type * destType)1665 Value *Nucleus::createZExt(Value *v, Type *destType)
1666 {
1667 RR_DEBUG_INFO_UPDATE_LOC();
1668 return createCast(Ice::InstCast::Zext, v, destType);
1669 }
1670
createSExt(Value * v,Type * destType)1671 Value *Nucleus::createSExt(Value *v, Type *destType)
1672 {
1673 RR_DEBUG_INFO_UPDATE_LOC();
1674 return createCast(Ice::InstCast::Sext, v, destType);
1675 }
1676
createFPToUI(Value * v,Type * destType)1677 Value *Nucleus::createFPToUI(Value *v, Type *destType)
1678 {
1679 RR_DEBUG_INFO_UPDATE_LOC();
1680 return createCast(Ice::InstCast::Fptoui, v, destType);
1681 }
1682
createFPToSI(Value * v,Type * destType)1683 Value *Nucleus::createFPToSI(Value *v, Type *destType)
1684 {
1685 RR_DEBUG_INFO_UPDATE_LOC();
1686 return createCast(Ice::InstCast::Fptosi, v, destType);
1687 }
1688
createSIToFP(Value * v,Type * destType)1689 Value *Nucleus::createSIToFP(Value *v, Type *destType)
1690 {
1691 RR_DEBUG_INFO_UPDATE_LOC();
1692 return createCast(Ice::InstCast::Sitofp, v, destType);
1693 }
1694
createFPTrunc(Value * v,Type * destType)1695 Value *Nucleus::createFPTrunc(Value *v, Type *destType)
1696 {
1697 RR_DEBUG_INFO_UPDATE_LOC();
1698 return createCast(Ice::InstCast::Fptrunc, v, destType);
1699 }
1700
createFPExt(Value * v,Type * destType)1701 Value *Nucleus::createFPExt(Value *v, Type *destType)
1702 {
1703 RR_DEBUG_INFO_UPDATE_LOC();
1704 return createCast(Ice::InstCast::Fpext, v, destType);
1705 }
1706
createBitCast(Value * v,Type * destType)1707 Value *Nucleus::createBitCast(Value *v, Type *destType)
1708 {
1709 RR_DEBUG_INFO_UPDATE_LOC();
1710 // Bitcasts must be between types of the same logical size. But with emulated narrow vectors we need
1711 // support for casting between scalars and wide vectors. For platforms where this is not supported,
1712 // emulate them by writing to the stack and reading back as the destination type.
1713 if(emulateMismatchedBitCast)
1714 {
1715 if(!Ice::isVectorType(v->getType()) && Ice::isVectorType(T(destType)))
1716 {
1717 Value *address = allocateStackVariable(destType);
1718 createStore(v, address, T(v->getType()));
1719 return createLoad(address, destType);
1720 }
1721 else if(Ice::isVectorType(v->getType()) && !Ice::isVectorType(T(destType)))
1722 {
1723 Value *address = allocateStackVariable(T(v->getType()));
1724 createStore(v, address, T(v->getType()));
1725 return createLoad(address, destType);
1726 }
1727 }
1728
1729 return createCast(Ice::InstCast::Bitcast, v, destType);
1730 }
1731
createIntCompare(Ice::InstIcmp::ICond condition,Value * lhs,Value * rhs)1732 static Value *createIntCompare(Ice::InstIcmp::ICond condition, Value *lhs, Value *rhs)
1733 {
1734 ASSERT(lhs->getType() == rhs->getType());
1735
1736 auto result = ::function->makeVariable(Ice::isScalarIntegerType(lhs->getType()) ? Ice::IceType_i1 : lhs->getType());
1737 auto cmp = Ice::InstIcmp::create(::function, condition, result, lhs, rhs);
1738 ::basicBlock->appendInst(cmp);
1739
1740 return V(result);
1741 }
1742
createICmpEQ(Value * lhs,Value * rhs)1743 Value *Nucleus::createICmpEQ(Value *lhs, Value *rhs)
1744 {
1745 RR_DEBUG_INFO_UPDATE_LOC();
1746 return createIntCompare(Ice::InstIcmp::Eq, lhs, rhs);
1747 }
1748
createICmpNE(Value * lhs,Value * rhs)1749 Value *Nucleus::createICmpNE(Value *lhs, Value *rhs)
1750 {
1751 RR_DEBUG_INFO_UPDATE_LOC();
1752 return createIntCompare(Ice::InstIcmp::Ne, lhs, rhs);
1753 }
1754
createICmpUGT(Value * lhs,Value * rhs)1755 Value *Nucleus::createICmpUGT(Value *lhs, Value *rhs)
1756 {
1757 RR_DEBUG_INFO_UPDATE_LOC();
1758 return createIntCompare(Ice::InstIcmp::Ugt, lhs, rhs);
1759 }
1760
createICmpUGE(Value * lhs,Value * rhs)1761 Value *Nucleus::createICmpUGE(Value *lhs, Value *rhs)
1762 {
1763 RR_DEBUG_INFO_UPDATE_LOC();
1764 return createIntCompare(Ice::InstIcmp::Uge, lhs, rhs);
1765 }
1766
createICmpULT(Value * lhs,Value * rhs)1767 Value *Nucleus::createICmpULT(Value *lhs, Value *rhs)
1768 {
1769 RR_DEBUG_INFO_UPDATE_LOC();
1770 return createIntCompare(Ice::InstIcmp::Ult, lhs, rhs);
1771 }
1772
createICmpULE(Value * lhs,Value * rhs)1773 Value *Nucleus::createICmpULE(Value *lhs, Value *rhs)
1774 {
1775 RR_DEBUG_INFO_UPDATE_LOC();
1776 return createIntCompare(Ice::InstIcmp::Ule, lhs, rhs);
1777 }
1778
createICmpSGT(Value * lhs,Value * rhs)1779 Value *Nucleus::createICmpSGT(Value *lhs, Value *rhs)
1780 {
1781 RR_DEBUG_INFO_UPDATE_LOC();
1782 return createIntCompare(Ice::InstIcmp::Sgt, lhs, rhs);
1783 }
1784
createICmpSGE(Value * lhs,Value * rhs)1785 Value *Nucleus::createICmpSGE(Value *lhs, Value *rhs)
1786 {
1787 RR_DEBUG_INFO_UPDATE_LOC();
1788 return createIntCompare(Ice::InstIcmp::Sge, lhs, rhs);
1789 }
1790
createICmpSLT(Value * lhs,Value * rhs)1791 Value *Nucleus::createICmpSLT(Value *lhs, Value *rhs)
1792 {
1793 RR_DEBUG_INFO_UPDATE_LOC();
1794 return createIntCompare(Ice::InstIcmp::Slt, lhs, rhs);
1795 }
1796
createICmpSLE(Value * lhs,Value * rhs)1797 Value *Nucleus::createICmpSLE(Value *lhs, Value *rhs)
1798 {
1799 RR_DEBUG_INFO_UPDATE_LOC();
1800 return createIntCompare(Ice::InstIcmp::Sle, lhs, rhs);
1801 }
1802
createFloatCompare(Ice::InstFcmp::FCond condition,Value * lhs,Value * rhs)1803 static Value *createFloatCompare(Ice::InstFcmp::FCond condition, Value *lhs, Value *rhs)
1804 {
1805 ASSERT(lhs->getType() == rhs->getType());
1806 ASSERT(Ice::isScalarFloatingType(lhs->getType()) || lhs->getType() == Ice::IceType_v4f32);
1807
1808 auto result = ::function->makeVariable(Ice::isScalarFloatingType(lhs->getType()) ? Ice::IceType_i1 : Ice::IceType_v4i32);
1809 auto cmp = Ice::InstFcmp::create(::function, condition, result, lhs, rhs);
1810 ::basicBlock->appendInst(cmp);
1811
1812 return V(result);
1813 }
1814
createFCmpOEQ(Value * lhs,Value * rhs)1815 Value *Nucleus::createFCmpOEQ(Value *lhs, Value *rhs)
1816 {
1817 RR_DEBUG_INFO_UPDATE_LOC();
1818 return createFloatCompare(Ice::InstFcmp::Oeq, lhs, rhs);
1819 }
1820
createFCmpOGT(Value * lhs,Value * rhs)1821 Value *Nucleus::createFCmpOGT(Value *lhs, Value *rhs)
1822 {
1823 RR_DEBUG_INFO_UPDATE_LOC();
1824 return createFloatCompare(Ice::InstFcmp::Ogt, lhs, rhs);
1825 }
1826
createFCmpOGE(Value * lhs,Value * rhs)1827 Value *Nucleus::createFCmpOGE(Value *lhs, Value *rhs)
1828 {
1829 RR_DEBUG_INFO_UPDATE_LOC();
1830 return createFloatCompare(Ice::InstFcmp::Oge, lhs, rhs);
1831 }
1832
createFCmpOLT(Value * lhs,Value * rhs)1833 Value *Nucleus::createFCmpOLT(Value *lhs, Value *rhs)
1834 {
1835 RR_DEBUG_INFO_UPDATE_LOC();
1836 return createFloatCompare(Ice::InstFcmp::Olt, lhs, rhs);
1837 }
1838
createFCmpOLE(Value * lhs,Value * rhs)1839 Value *Nucleus::createFCmpOLE(Value *lhs, Value *rhs)
1840 {
1841 RR_DEBUG_INFO_UPDATE_LOC();
1842 return createFloatCompare(Ice::InstFcmp::Ole, lhs, rhs);
1843 }
1844
createFCmpONE(Value * lhs,Value * rhs)1845 Value *Nucleus::createFCmpONE(Value *lhs, Value *rhs)
1846 {
1847 RR_DEBUG_INFO_UPDATE_LOC();
1848 return createFloatCompare(Ice::InstFcmp::One, lhs, rhs);
1849 }
1850
createFCmpORD(Value * lhs,Value * rhs)1851 Value *Nucleus::createFCmpORD(Value *lhs, Value *rhs)
1852 {
1853 RR_DEBUG_INFO_UPDATE_LOC();
1854 return createFloatCompare(Ice::InstFcmp::Ord, lhs, rhs);
1855 }
1856
createFCmpUNO(Value * lhs,Value * rhs)1857 Value *Nucleus::createFCmpUNO(Value *lhs, Value *rhs)
1858 {
1859 RR_DEBUG_INFO_UPDATE_LOC();
1860 return createFloatCompare(Ice::InstFcmp::Uno, lhs, rhs);
1861 }
1862
createFCmpUEQ(Value * lhs,Value * rhs)1863 Value *Nucleus::createFCmpUEQ(Value *lhs, Value *rhs)
1864 {
1865 RR_DEBUG_INFO_UPDATE_LOC();
1866 return createFloatCompare(Ice::InstFcmp::Ueq, lhs, rhs);
1867 }
1868
createFCmpUGT(Value * lhs,Value * rhs)1869 Value *Nucleus::createFCmpUGT(Value *lhs, Value *rhs)
1870 {
1871 RR_DEBUG_INFO_UPDATE_LOC();
1872 return createFloatCompare(Ice::InstFcmp::Ugt, lhs, rhs);
1873 }
1874
createFCmpUGE(Value * lhs,Value * rhs)1875 Value *Nucleus::createFCmpUGE(Value *lhs, Value *rhs)
1876 {
1877 RR_DEBUG_INFO_UPDATE_LOC();
1878 return createFloatCompare(Ice::InstFcmp::Uge, lhs, rhs);
1879 }
1880
createFCmpULT(Value * lhs,Value * rhs)1881 Value *Nucleus::createFCmpULT(Value *lhs, Value *rhs)
1882 {
1883 RR_DEBUG_INFO_UPDATE_LOC();
1884 return createFloatCompare(Ice::InstFcmp::Ult, lhs, rhs);
1885 }
1886
createFCmpULE(Value * lhs,Value * rhs)1887 Value *Nucleus::createFCmpULE(Value *lhs, Value *rhs)
1888 {
1889 RR_DEBUG_INFO_UPDATE_LOC();
1890 return createFloatCompare(Ice::InstFcmp::Ule, lhs, rhs);
1891 }
1892
createFCmpUNE(Value * lhs,Value * rhs)1893 Value *Nucleus::createFCmpUNE(Value *lhs, Value *rhs)
1894 {
1895 RR_DEBUG_INFO_UPDATE_LOC();
1896 return createFloatCompare(Ice::InstFcmp::Une, lhs, rhs);
1897 }
1898
createExtractElement(Value * vector,Type * type,int index)1899 Value *Nucleus::createExtractElement(Value *vector, Type *type, int index)
1900 {
1901 RR_DEBUG_INFO_UPDATE_LOC();
1902 auto result = ::function->makeVariable(T(type));
1903 auto extract = Ice::InstExtractElement::create(::function, result, V(vector), ::context->getConstantInt32(index));
1904 ::basicBlock->appendInst(extract);
1905
1906 return V(result);
1907 }
1908
createInsertElement(Value * vector,Value * element,int index)1909 Value *Nucleus::createInsertElement(Value *vector, Value *element, int index)
1910 {
1911 RR_DEBUG_INFO_UPDATE_LOC();
1912 auto result = ::function->makeVariable(vector->getType());
1913 auto insert = Ice::InstInsertElement::create(::function, result, vector, element, ::context->getConstantInt32(index));
1914 ::basicBlock->appendInst(insert);
1915
1916 return V(result);
1917 }
1918
createShuffleVector(Value * V1,Value * V2,const int * select)1919 Value *Nucleus::createShuffleVector(Value *V1, Value *V2, const int *select)
1920 {
1921 RR_DEBUG_INFO_UPDATE_LOC();
1922 ASSERT(V1->getType() == V2->getType());
1923
1924 int size = Ice::typeNumElements(V1->getType());
1925 auto result = ::function->makeVariable(V1->getType());
1926 auto shuffle = Ice::InstShuffleVector::create(::function, result, V1, V2);
1927
1928 for(int i = 0; i < size; i++)
1929 {
1930 shuffle->addIndex(llvm::cast<Ice::ConstantInteger32>(::context->getConstantInt32(select[i])));
1931 }
1932
1933 ::basicBlock->appendInst(shuffle);
1934
1935 return V(result);
1936 }
1937
createSelect(Value * C,Value * ifTrue,Value * ifFalse)1938 Value *Nucleus::createSelect(Value *C, Value *ifTrue, Value *ifFalse)
1939 {
1940 RR_DEBUG_INFO_UPDATE_LOC();
1941 ASSERT(ifTrue->getType() == ifFalse->getType());
1942
1943 auto result = ::function->makeVariable(ifTrue->getType());
1944 auto *select = Ice::InstSelect::create(::function, result, C, ifTrue, ifFalse);
1945 ::basicBlock->appendInst(select);
1946
1947 return V(result);
1948 }
1949
createSwitch(Value * control,BasicBlock * defaultBranch,unsigned numCases)1950 SwitchCases *Nucleus::createSwitch(Value *control, BasicBlock *defaultBranch, unsigned numCases)
1951 {
1952 RR_DEBUG_INFO_UPDATE_LOC();
1953 auto switchInst = Ice::InstSwitch::create(::function, numCases, control, defaultBranch);
1954 ::basicBlock->appendInst(switchInst);
1955
1956 return reinterpret_cast<SwitchCases *>(switchInst);
1957 }
1958
addSwitchCase(SwitchCases * switchCases,int label,BasicBlock * branch)1959 void Nucleus::addSwitchCase(SwitchCases *switchCases, int label, BasicBlock *branch)
1960 {
1961 RR_DEBUG_INFO_UPDATE_LOC();
1962 switchCases->addBranch(label, label, branch);
1963 }
1964
createUnreachable()1965 void Nucleus::createUnreachable()
1966 {
1967 RR_DEBUG_INFO_UPDATE_LOC();
1968 Ice::InstUnreachable *unreachable = Ice::InstUnreachable::create(::function);
1969 ::basicBlock->appendInst(unreachable);
1970 }
1971
getType(Value * value)1972 Type *Nucleus::getType(Value *value)
1973 {
1974 return T(V(value)->getType());
1975 }
1976
getContainedType(Type * vectorType)1977 Type *Nucleus::getContainedType(Type *vectorType)
1978 {
1979 Ice::Type vecTy = T(vectorType);
1980 switch(vecTy)
1981 {
1982 case Ice::IceType_v4i1: return T(Ice::IceType_i1);
1983 case Ice::IceType_v8i1: return T(Ice::IceType_i1);
1984 case Ice::IceType_v16i1: return T(Ice::IceType_i1);
1985 case Ice::IceType_v16i8: return T(Ice::IceType_i8);
1986 case Ice::IceType_v8i16: return T(Ice::IceType_i16);
1987 case Ice::IceType_v4i32: return T(Ice::IceType_i32);
1988 case Ice::IceType_v4f32: return T(Ice::IceType_f32);
1989 default:
1990 ASSERT_MSG(false, "getContainedType: input type is not a vector type");
1991 return {};
1992 }
1993 }
1994
getPointerType(Type * ElementType)1995 Type *Nucleus::getPointerType(Type *ElementType)
1996 {
1997 return T(sz::getPointerType(T(ElementType)));
1998 }
1999
getNaturalIntType()2000 static constexpr Ice::Type getNaturalIntType()
2001 {
2002 constexpr size_t intSize = sizeof(int);
2003 static_assert(intSize == 4 || intSize == 8, "");
2004 return intSize == 4 ? Ice::IceType_i32 : Ice::IceType_i64;
2005 }
2006
getPrintfStorageType(Type * valueType)2007 Type *Nucleus::getPrintfStorageType(Type *valueType)
2008 {
2009 Ice::Type valueTy = T(valueType);
2010 switch(valueTy)
2011 {
2012 case Ice::IceType_i32:
2013 return T(getNaturalIntType());
2014
2015 case Ice::IceType_f32:
2016 return T(Ice::IceType_f64);
2017
2018 default:
2019 UNIMPLEMENTED_NO_BUG("getPrintfStorageType: add more cases as needed");
2020 return {};
2021 }
2022 }
2023
createNullValue(Type * Ty)2024 Value *Nucleus::createNullValue(Type *Ty)
2025 {
2026 RR_DEBUG_INFO_UPDATE_LOC();
2027 if(Ice::isVectorType(T(Ty)))
2028 {
2029 ASSERT(Ice::typeNumElements(T(Ty)) <= 16);
2030 int64_t c[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2031 return createConstantVector(c, Ty);
2032 }
2033 else
2034 {
2035 return V(::context->getConstantZero(T(Ty)));
2036 }
2037 }
2038
createConstantLong(int64_t i)2039 Value *Nucleus::createConstantLong(int64_t i)
2040 {
2041 RR_DEBUG_INFO_UPDATE_LOC();
2042 return V(::context->getConstantInt64(i));
2043 }
2044
createConstantInt(int i)2045 Value *Nucleus::createConstantInt(int i)
2046 {
2047 RR_DEBUG_INFO_UPDATE_LOC();
2048 return V(::context->getConstantInt32(i));
2049 }
2050
createConstantInt(unsigned int i)2051 Value *Nucleus::createConstantInt(unsigned int i)
2052 {
2053 RR_DEBUG_INFO_UPDATE_LOC();
2054 return V(::context->getConstantInt32(i));
2055 }
2056
createConstantBool(bool b)2057 Value *Nucleus::createConstantBool(bool b)
2058 {
2059 RR_DEBUG_INFO_UPDATE_LOC();
2060 return V(::context->getConstantInt1(b));
2061 }
2062
createConstantByte(signed char i)2063 Value *Nucleus::createConstantByte(signed char i)
2064 {
2065 RR_DEBUG_INFO_UPDATE_LOC();
2066 return V(::context->getConstantInt8(i));
2067 }
2068
createConstantByte(unsigned char i)2069 Value *Nucleus::createConstantByte(unsigned char i)
2070 {
2071 RR_DEBUG_INFO_UPDATE_LOC();
2072 return V(::context->getConstantInt8(i));
2073 }
2074
createConstantShort(short i)2075 Value *Nucleus::createConstantShort(short i)
2076 {
2077 RR_DEBUG_INFO_UPDATE_LOC();
2078 return V(::context->getConstantInt16(i));
2079 }
2080
createConstantShort(unsigned short i)2081 Value *Nucleus::createConstantShort(unsigned short i)
2082 {
2083 RR_DEBUG_INFO_UPDATE_LOC();
2084 return V(::context->getConstantInt16(i));
2085 }
2086
createConstantFloat(float x)2087 Value *Nucleus::createConstantFloat(float x)
2088 {
2089 RR_DEBUG_INFO_UPDATE_LOC();
2090 return V(::context->getConstantFloat(x));
2091 }
2092
createNullPointer(Type * Ty)2093 Value *Nucleus::createNullPointer(Type *Ty)
2094 {
2095 RR_DEBUG_INFO_UPDATE_LOC();
2096 return createNullValue(T(sizeof(void *) == 8 ? Ice::IceType_i64 : Ice::IceType_i32));
2097 }
2098
IceConstantData(void const * data,size_t size,size_t alignment=1)2099 static Ice::Constant *IceConstantData(void const *data, size_t size, size_t alignment = 1)
2100 {
2101 return sz::getConstantPointer(::context, ::routine->addConstantData(data, size, alignment));
2102 }
2103
createConstantVector(const int64_t * constants,Type * type)2104 Value *Nucleus::createConstantVector(const int64_t *constants, Type *type)
2105 {
2106 RR_DEBUG_INFO_UPDATE_LOC();
2107 const int vectorSize = 16;
2108 ASSERT(Ice::typeWidthInBytes(T(type)) == vectorSize);
2109 const int alignment = vectorSize;
2110
2111 const int64_t *i = constants;
2112 const double *f = reinterpret_cast<const double *>(constants);
2113
2114 // TODO(b/148082873): Fix global variable constants when generating multiple functions
2115 Ice::Constant *ptr = nullptr;
2116
2117 switch((int)reinterpret_cast<intptr_t>(type))
2118 {
2119 case Ice::IceType_v4i32:
2120 case Ice::IceType_v4i1:
2121 {
2122 const int initializer[4] = { (int)i[0], (int)i[1], (int)i[2], (int)i[3] };
2123 static_assert(sizeof(initializer) == vectorSize, "!");
2124 ptr = IceConstantData(initializer, vectorSize, alignment);
2125 }
2126 break;
2127 case Ice::IceType_v4f32:
2128 {
2129 const float initializer[4] = { (float)f[0], (float)f[1], (float)f[2], (float)f[3] };
2130 static_assert(sizeof(initializer) == vectorSize, "!");
2131 ptr = IceConstantData(initializer, vectorSize, alignment);
2132 }
2133 break;
2134 case Ice::IceType_v8i16:
2135 case Ice::IceType_v8i1:
2136 {
2137 const short initializer[8] = { (short)i[0], (short)i[1], (short)i[2], (short)i[3], (short)i[4], (short)i[5], (short)i[6], (short)i[7] };
2138 static_assert(sizeof(initializer) == vectorSize, "!");
2139 ptr = IceConstantData(initializer, vectorSize, alignment);
2140 }
2141 break;
2142 case Ice::IceType_v16i8:
2143 case Ice::IceType_v16i1:
2144 {
2145 const char initializer[16] = { (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[4], (char)i[5], (char)i[6], (char)i[7], (char)i[8], (char)i[9], (char)i[10], (char)i[11], (char)i[12], (char)i[13], (char)i[14], (char)i[15] };
2146 static_assert(sizeof(initializer) == vectorSize, "!");
2147 ptr = IceConstantData(initializer, vectorSize, alignment);
2148 }
2149 break;
2150 case Type_v2i32:
2151 {
2152 const int initializer[4] = { (int)i[0], (int)i[1], (int)i[0], (int)i[1] };
2153 static_assert(sizeof(initializer) == vectorSize, "!");
2154 ptr = IceConstantData(initializer, vectorSize, alignment);
2155 }
2156 break;
2157 case Type_v2f32:
2158 {
2159 const float initializer[4] = { (float)f[0], (float)f[1], (float)f[0], (float)f[1] };
2160 static_assert(sizeof(initializer) == vectorSize, "!");
2161 ptr = IceConstantData(initializer, vectorSize, alignment);
2162 }
2163 break;
2164 case Type_v4i16:
2165 {
2166 const short initializer[8] = { (short)i[0], (short)i[1], (short)i[2], (short)i[3], (short)i[0], (short)i[1], (short)i[2], (short)i[3] };
2167 static_assert(sizeof(initializer) == vectorSize, "!");
2168 ptr = IceConstantData(initializer, vectorSize, alignment);
2169 }
2170 break;
2171 case Type_v8i8:
2172 {
2173 const char initializer[16] = { (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[4], (char)i[5], (char)i[6], (char)i[7], (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[4], (char)i[5], (char)i[6], (char)i[7] };
2174 static_assert(sizeof(initializer) == vectorSize, "!");
2175 ptr = IceConstantData(initializer, vectorSize, alignment);
2176 }
2177 break;
2178 case Type_v4i8:
2179 {
2180 const char initializer[16] = { (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[0], (char)i[1], (char)i[2], (char)i[3] };
2181 static_assert(sizeof(initializer) == vectorSize, "!");
2182 ptr = IceConstantData(initializer, vectorSize, alignment);
2183 }
2184 break;
2185 default:
2186 UNREACHABLE("Unknown constant vector type: %d", (int)reinterpret_cast<intptr_t>(type));
2187 }
2188
2189 ASSERT(ptr);
2190
2191 Ice::Variable *result = sz::createLoad(::function, ::basicBlock, ptr, T(type), alignment);
2192 return V(result);
2193 }
2194
createConstantVector(const double * constants,Type * type)2195 Value *Nucleus::createConstantVector(const double *constants, Type *type)
2196 {
2197 return createConstantVector((const int64_t *)constants, type);
2198 }
2199
createConstantString(const char * v)2200 Value *Nucleus::createConstantString(const char *v)
2201 {
2202 // NOTE: Do not call RR_DEBUG_INFO_UPDATE_LOC() here to avoid recursion when called from rr::Printv
2203 return V(IceConstantData(v, strlen(v) + 1));
2204 }
2205
setOptimizerCallback(OptimizerCallback * callback)2206 void Nucleus::setOptimizerCallback(OptimizerCallback *callback)
2207 {
2208 ::optimizerCallback = callback;
2209 }
2210
type()2211 Type *Void::type()
2212 {
2213 return T(Ice::IceType_void);
2214 }
2215
type()2216 Type *Bool::type()
2217 {
2218 return T(Ice::IceType_i1);
2219 }
2220
type()2221 Type *Byte::type()
2222 {
2223 return T(Ice::IceType_i8);
2224 }
2225
type()2226 Type *SByte::type()
2227 {
2228 return T(Ice::IceType_i8);
2229 }
2230
type()2231 Type *Short::type()
2232 {
2233 return T(Ice::IceType_i16);
2234 }
2235
type()2236 Type *UShort::type()
2237 {
2238 return T(Ice::IceType_i16);
2239 }
2240
type()2241 Type *Byte4::type()
2242 {
2243 return T(Type_v4i8);
2244 }
2245
type()2246 Type *SByte4::type()
2247 {
2248 return T(Type_v4i8);
2249 }
2250
2251 namespace {
SaturateUnsigned(RValue<Short> x)2252 RValue<Byte> SaturateUnsigned(RValue<Short> x)
2253 {
2254 return Byte(IfThenElse(Int(x) > 0xFF, Int(0xFF), IfThenElse(Int(x) < 0, Int(0), Int(x))));
2255 }
2256
Extract(RValue<Byte8> val,int i)2257 RValue<Byte> Extract(RValue<Byte8> val, int i)
2258 {
2259 return RValue<Byte>(Nucleus::createExtractElement(val.value(), Byte::type(), i));
2260 }
2261
Insert(RValue<Byte8> val,RValue<Byte> element,int i)2262 RValue<Byte8> Insert(RValue<Byte8> val, RValue<Byte> element, int i)
2263 {
2264 return RValue<Byte8>(Nucleus::createInsertElement(val.value(), element.value(), i));
2265 }
2266 } // namespace
2267
AddSat(RValue<Byte8> x,RValue<Byte8> y)2268 RValue<Byte8> AddSat(RValue<Byte8> x, RValue<Byte8> y)
2269 {
2270 RR_DEBUG_INFO_UPDATE_LOC();
2271 if(emulateIntrinsics)
2272 {
2273 Byte8 result;
2274 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 0)) + Int(Extract(y, 0)))), 0);
2275 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 1)) + Int(Extract(y, 1)))), 1);
2276 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 2)) + Int(Extract(y, 2)))), 2);
2277 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 3)) + Int(Extract(y, 3)))), 3);
2278 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 4)) + Int(Extract(y, 4)))), 4);
2279 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 5)) + Int(Extract(y, 5)))), 5);
2280 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 6)) + Int(Extract(y, 6)))), 6);
2281 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 7)) + Int(Extract(y, 7)))), 7);
2282
2283 return result;
2284 }
2285 else
2286 {
2287 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v16i8);
2288 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AddSaturateUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2289 auto paddusb = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2290 paddusb->addArg(x.value());
2291 paddusb->addArg(y.value());
2292 ::basicBlock->appendInst(paddusb);
2293
2294 return RValue<Byte8>(V(result));
2295 }
2296 }
2297
SubSat(RValue<Byte8> x,RValue<Byte8> y)2298 RValue<Byte8> SubSat(RValue<Byte8> x, RValue<Byte8> y)
2299 {
2300 RR_DEBUG_INFO_UPDATE_LOC();
2301 if(emulateIntrinsics)
2302 {
2303 Byte8 result;
2304 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 0)) - Int(Extract(y, 0)))), 0);
2305 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 1)) - Int(Extract(y, 1)))), 1);
2306 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 2)) - Int(Extract(y, 2)))), 2);
2307 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 3)) - Int(Extract(y, 3)))), 3);
2308 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 4)) - Int(Extract(y, 4)))), 4);
2309 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 5)) - Int(Extract(y, 5)))), 5);
2310 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 6)) - Int(Extract(y, 6)))), 6);
2311 result = Insert(result, SaturateUnsigned(Short(Int(Extract(x, 7)) - Int(Extract(y, 7)))), 7);
2312
2313 return result;
2314 }
2315 else
2316 {
2317 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v16i8);
2318 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SubtractSaturateUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2319 auto psubusw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2320 psubusw->addArg(x.value());
2321 psubusw->addArg(y.value());
2322 ::basicBlock->appendInst(psubusw);
2323
2324 return RValue<Byte8>(V(result));
2325 }
2326 }
2327
Extract(RValue<SByte8> val,int i)2328 RValue<SByte> Extract(RValue<SByte8> val, int i)
2329 {
2330 RR_DEBUG_INFO_UPDATE_LOC();
2331 return RValue<SByte>(Nucleus::createExtractElement(val.value(), SByte::type(), i));
2332 }
2333
Insert(RValue<SByte8> val,RValue<SByte> element,int i)2334 RValue<SByte8> Insert(RValue<SByte8> val, RValue<SByte> element, int i)
2335 {
2336 RR_DEBUG_INFO_UPDATE_LOC();
2337 return RValue<SByte8>(Nucleus::createInsertElement(val.value(), element.value(), i));
2338 }
2339
operator >>(RValue<SByte8> lhs,unsigned char rhs)2340 RValue<SByte8> operator>>(RValue<SByte8> lhs, unsigned char rhs)
2341 {
2342 RR_DEBUG_INFO_UPDATE_LOC();
2343 if(emulateIntrinsics)
2344 {
2345 SByte8 result;
2346 result = Insert(result, Extract(lhs, 0) >> SByte(rhs), 0);
2347 result = Insert(result, Extract(lhs, 1) >> SByte(rhs), 1);
2348 result = Insert(result, Extract(lhs, 2) >> SByte(rhs), 2);
2349 result = Insert(result, Extract(lhs, 3) >> SByte(rhs), 3);
2350 result = Insert(result, Extract(lhs, 4) >> SByte(rhs), 4);
2351 result = Insert(result, Extract(lhs, 5) >> SByte(rhs), 5);
2352 result = Insert(result, Extract(lhs, 6) >> SByte(rhs), 6);
2353 result = Insert(result, Extract(lhs, 7) >> SByte(rhs), 7);
2354
2355 return result;
2356 }
2357 else
2358 {
2359 #if defined(__i386__) || defined(__x86_64__)
2360 // SSE2 doesn't support byte vector shifts, so shift as shorts and recombine.
2361 RValue<Short4> hi = (As<Short4>(lhs) >> rhs) & Short4(0xFF00u);
2362 RValue<Short4> lo = As<Short4>(As<UShort4>((As<Short4>(lhs) << 8) >> rhs) >> 8);
2363
2364 return As<SByte8>(hi | lo);
2365 #else
2366 return RValue<SByte8>(Nucleus::createAShr(lhs.value(), V(::context->getConstantInt32(rhs))));
2367 #endif
2368 }
2369 }
2370
SignMask(RValue<Byte8> x)2371 RValue<Int> SignMask(RValue<Byte8> x)
2372 {
2373 RR_DEBUG_INFO_UPDATE_LOC();
2374 if(emulateIntrinsics || CPUID::ARM)
2375 {
2376 Byte8 xx = As<Byte8>(As<SByte8>(x) >> 7) & Byte8(0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80);
2377 return Int(Extract(xx, 0)) | Int(Extract(xx, 1)) | Int(Extract(xx, 2)) | Int(Extract(xx, 3)) | Int(Extract(xx, 4)) | Int(Extract(xx, 5)) | Int(Extract(xx, 6)) | Int(Extract(xx, 7));
2378 }
2379 else
2380 {
2381 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
2382 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SignMask, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2383 auto movmsk = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
2384 movmsk->addArg(x.value());
2385 ::basicBlock->appendInst(movmsk);
2386
2387 return RValue<Int>(V(result)) & 0xFF;
2388 }
2389 }
2390
2391 // RValue<Byte8> CmpGT(RValue<Byte8> x, RValue<Byte8> y)
2392 // {
2393 // return RValue<Byte8>(createIntCompare(Ice::InstIcmp::Ugt, x.value(), y.value()));
2394 // }
2395
CmpEQ(RValue<Byte8> x,RValue<Byte8> y)2396 RValue<Byte8> CmpEQ(RValue<Byte8> x, RValue<Byte8> y)
2397 {
2398 RR_DEBUG_INFO_UPDATE_LOC();
2399 return RValue<Byte8>(Nucleus::createICmpEQ(x.value(), y.value()));
2400 }
2401
type()2402 Type *Byte8::type()
2403 {
2404 return T(Type_v8i8);
2405 }
2406
2407 // RValue<SByte8> operator<<(RValue<SByte8> lhs, unsigned char rhs)
2408 // {
2409 // return RValue<SByte8>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
2410 // }
2411
2412 // RValue<SByte8> operator>>(RValue<SByte8> lhs, unsigned char rhs)
2413 // {
2414 // return RValue<SByte8>(Nucleus::createAShr(lhs.value(), V(::context->getConstantInt32(rhs))));
2415 // }
2416
SaturateSigned(RValue<Short> x)2417 RValue<SByte> SaturateSigned(RValue<Short> x)
2418 {
2419 RR_DEBUG_INFO_UPDATE_LOC();
2420 return SByte(IfThenElse(Int(x) > 0x7F, Int(0x7F), IfThenElse(Int(x) < -0x80, Int(0x80), Int(x))));
2421 }
2422
AddSat(RValue<SByte8> x,RValue<SByte8> y)2423 RValue<SByte8> AddSat(RValue<SByte8> x, RValue<SByte8> y)
2424 {
2425 RR_DEBUG_INFO_UPDATE_LOC();
2426 if(emulateIntrinsics)
2427 {
2428 SByte8 result;
2429 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 0)) + Int(Extract(y, 0)))), 0);
2430 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 1)) + Int(Extract(y, 1)))), 1);
2431 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 2)) + Int(Extract(y, 2)))), 2);
2432 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 3)) + Int(Extract(y, 3)))), 3);
2433 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 4)) + Int(Extract(y, 4)))), 4);
2434 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 5)) + Int(Extract(y, 5)))), 5);
2435 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 6)) + Int(Extract(y, 6)))), 6);
2436 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 7)) + Int(Extract(y, 7)))), 7);
2437
2438 return result;
2439 }
2440 else
2441 {
2442 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v16i8);
2443 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AddSaturateSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2444 auto paddsb = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2445 paddsb->addArg(x.value());
2446 paddsb->addArg(y.value());
2447 ::basicBlock->appendInst(paddsb);
2448
2449 return RValue<SByte8>(V(result));
2450 }
2451 }
2452
SubSat(RValue<SByte8> x,RValue<SByte8> y)2453 RValue<SByte8> SubSat(RValue<SByte8> x, RValue<SByte8> y)
2454 {
2455 RR_DEBUG_INFO_UPDATE_LOC();
2456 if(emulateIntrinsics)
2457 {
2458 SByte8 result;
2459 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 0)) - Int(Extract(y, 0)))), 0);
2460 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 1)) - Int(Extract(y, 1)))), 1);
2461 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 2)) - Int(Extract(y, 2)))), 2);
2462 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 3)) - Int(Extract(y, 3)))), 3);
2463 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 4)) - Int(Extract(y, 4)))), 4);
2464 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 5)) - Int(Extract(y, 5)))), 5);
2465 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 6)) - Int(Extract(y, 6)))), 6);
2466 result = Insert(result, SaturateSigned(Short(Int(Extract(x, 7)) - Int(Extract(y, 7)))), 7);
2467
2468 return result;
2469 }
2470 else
2471 {
2472 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v16i8);
2473 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SubtractSaturateSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2474 auto psubsb = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2475 psubsb->addArg(x.value());
2476 psubsb->addArg(y.value());
2477 ::basicBlock->appendInst(psubsb);
2478
2479 return RValue<SByte8>(V(result));
2480 }
2481 }
2482
SignMask(RValue<SByte8> x)2483 RValue<Int> SignMask(RValue<SByte8> x)
2484 {
2485 RR_DEBUG_INFO_UPDATE_LOC();
2486 if(emulateIntrinsics || CPUID::ARM)
2487 {
2488 SByte8 xx = (x >> 7) & SByte8(0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80);
2489 return Int(Extract(xx, 0)) | Int(Extract(xx, 1)) | Int(Extract(xx, 2)) | Int(Extract(xx, 3)) | Int(Extract(xx, 4)) | Int(Extract(xx, 5)) | Int(Extract(xx, 6)) | Int(Extract(xx, 7));
2490 }
2491 else
2492 {
2493 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
2494 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SignMask, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2495 auto movmsk = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
2496 movmsk->addArg(x.value());
2497 ::basicBlock->appendInst(movmsk);
2498
2499 return RValue<Int>(V(result)) & 0xFF;
2500 }
2501 }
2502
CmpGT(RValue<SByte8> x,RValue<SByte8> y)2503 RValue<Byte8> CmpGT(RValue<SByte8> x, RValue<SByte8> y)
2504 {
2505 RR_DEBUG_INFO_UPDATE_LOC();
2506 return RValue<Byte8>(createIntCompare(Ice::InstIcmp::Sgt, x.value(), y.value()));
2507 }
2508
CmpEQ(RValue<SByte8> x,RValue<SByte8> y)2509 RValue<Byte8> CmpEQ(RValue<SByte8> x, RValue<SByte8> y)
2510 {
2511 RR_DEBUG_INFO_UPDATE_LOC();
2512 return RValue<Byte8>(Nucleus::createICmpEQ(x.value(), y.value()));
2513 }
2514
type()2515 Type *SByte8::type()
2516 {
2517 return T(Type_v8i8);
2518 }
2519
type()2520 Type *Byte16::type()
2521 {
2522 return T(Ice::IceType_v16i8);
2523 }
2524
type()2525 Type *SByte16::type()
2526 {
2527 return T(Ice::IceType_v16i8);
2528 }
2529
type()2530 Type *Short2::type()
2531 {
2532 return T(Type_v2i16);
2533 }
2534
type()2535 Type *UShort2::type()
2536 {
2537 return T(Type_v2i16);
2538 }
2539
Short4(RValue<Int4> cast)2540 Short4::Short4(RValue<Int4> cast)
2541 {
2542 int select[8] = { 0, 2, 4, 6, 0, 2, 4, 6 };
2543 Value *short8 = Nucleus::createBitCast(cast.value(), Short8::type());
2544 Value *packed = Nucleus::createShuffleVector(short8, short8, select);
2545
2546 Value *int2 = RValue<Int2>(Int2(As<Int4>(packed))).value();
2547 Value *short4 = Nucleus::createBitCast(int2, Short4::type());
2548
2549 storeValue(short4);
2550 }
2551
2552 // Short4::Short4(RValue<Float> cast)
2553 // {
2554 // }
2555
Short4(RValue<Float4> cast)2556 Short4::Short4(RValue<Float4> cast)
2557 {
2558 // TODO(b/150791192): Generalize and optimize
2559 auto smin = std::numeric_limits<short>::min();
2560 auto smax = std::numeric_limits<short>::max();
2561 *this = Short4(Int4(Max(Min(cast, Float4(smax)), Float4(smin))));
2562 }
2563
operator <<(RValue<Short4> lhs,unsigned char rhs)2564 RValue<Short4> operator<<(RValue<Short4> lhs, unsigned char rhs)
2565 {
2566 RR_DEBUG_INFO_UPDATE_LOC();
2567 if(emulateIntrinsics)
2568 {
2569 Short4 result;
2570 result = Insert(result, Extract(lhs, 0) << Short(rhs), 0);
2571 result = Insert(result, Extract(lhs, 1) << Short(rhs), 1);
2572 result = Insert(result, Extract(lhs, 2) << Short(rhs), 2);
2573 result = Insert(result, Extract(lhs, 3) << Short(rhs), 3);
2574
2575 return result;
2576 }
2577 else
2578 {
2579 return RValue<Short4>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
2580 }
2581 }
2582
operator >>(RValue<Short4> lhs,unsigned char rhs)2583 RValue<Short4> operator>>(RValue<Short4> lhs, unsigned char rhs)
2584 {
2585 RR_DEBUG_INFO_UPDATE_LOC();
2586 if(emulateIntrinsics)
2587 {
2588 Short4 result;
2589 result = Insert(result, Extract(lhs, 0) >> Short(rhs), 0);
2590 result = Insert(result, Extract(lhs, 1) >> Short(rhs), 1);
2591 result = Insert(result, Extract(lhs, 2) >> Short(rhs), 2);
2592 result = Insert(result, Extract(lhs, 3) >> Short(rhs), 3);
2593
2594 return result;
2595 }
2596 else
2597 {
2598 return RValue<Short4>(Nucleus::createAShr(lhs.value(), V(::context->getConstantInt32(rhs))));
2599 }
2600 }
2601
Max(RValue<Short4> x,RValue<Short4> y)2602 RValue<Short4> Max(RValue<Short4> x, RValue<Short4> y)
2603 {
2604 RR_DEBUG_INFO_UPDATE_LOC();
2605 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v8i1);
2606 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Sle, condition, x.value(), y.value());
2607 ::basicBlock->appendInst(cmp);
2608
2609 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2610 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
2611 ::basicBlock->appendInst(select);
2612
2613 return RValue<Short4>(V(result));
2614 }
2615
Min(RValue<Short4> x,RValue<Short4> y)2616 RValue<Short4> Min(RValue<Short4> x, RValue<Short4> y)
2617 {
2618 RR_DEBUG_INFO_UPDATE_LOC();
2619 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v8i1);
2620 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Sgt, condition, x.value(), y.value());
2621 ::basicBlock->appendInst(cmp);
2622
2623 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2624 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
2625 ::basicBlock->appendInst(select);
2626
2627 return RValue<Short4>(V(result));
2628 }
2629
SaturateSigned(RValue<Int> x)2630 RValue<Short> SaturateSigned(RValue<Int> x)
2631 {
2632 RR_DEBUG_INFO_UPDATE_LOC();
2633 return Short(IfThenElse(x > 0x7FFF, Int(0x7FFF), IfThenElse(x < -0x8000, Int(0x8000), x)));
2634 }
2635
AddSat(RValue<Short4> x,RValue<Short4> y)2636 RValue<Short4> AddSat(RValue<Short4> x, RValue<Short4> y)
2637 {
2638 RR_DEBUG_INFO_UPDATE_LOC();
2639 if(emulateIntrinsics)
2640 {
2641 Short4 result;
2642 result = Insert(result, SaturateSigned(Int(Extract(x, 0)) + Int(Extract(y, 0))), 0);
2643 result = Insert(result, SaturateSigned(Int(Extract(x, 1)) + Int(Extract(y, 1))), 1);
2644 result = Insert(result, SaturateSigned(Int(Extract(x, 2)) + Int(Extract(y, 2))), 2);
2645 result = Insert(result, SaturateSigned(Int(Extract(x, 3)) + Int(Extract(y, 3))), 3);
2646
2647 return result;
2648 }
2649 else
2650 {
2651 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2652 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AddSaturateSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2653 auto paddsw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2654 paddsw->addArg(x.value());
2655 paddsw->addArg(y.value());
2656 ::basicBlock->appendInst(paddsw);
2657
2658 return RValue<Short4>(V(result));
2659 }
2660 }
2661
SubSat(RValue<Short4> x,RValue<Short4> y)2662 RValue<Short4> SubSat(RValue<Short4> x, RValue<Short4> y)
2663 {
2664 RR_DEBUG_INFO_UPDATE_LOC();
2665 if(emulateIntrinsics)
2666 {
2667 Short4 result;
2668 result = Insert(result, SaturateSigned(Int(Extract(x, 0)) - Int(Extract(y, 0))), 0);
2669 result = Insert(result, SaturateSigned(Int(Extract(x, 1)) - Int(Extract(y, 1))), 1);
2670 result = Insert(result, SaturateSigned(Int(Extract(x, 2)) - Int(Extract(y, 2))), 2);
2671 result = Insert(result, SaturateSigned(Int(Extract(x, 3)) - Int(Extract(y, 3))), 3);
2672
2673 return result;
2674 }
2675 else
2676 {
2677 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2678 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SubtractSaturateSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2679 auto psubsw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2680 psubsw->addArg(x.value());
2681 psubsw->addArg(y.value());
2682 ::basicBlock->appendInst(psubsw);
2683
2684 return RValue<Short4>(V(result));
2685 }
2686 }
2687
MulHigh(RValue<Short4> x,RValue<Short4> y)2688 RValue<Short4> MulHigh(RValue<Short4> x, RValue<Short4> y)
2689 {
2690 RR_DEBUG_INFO_UPDATE_LOC();
2691 if(emulateIntrinsics)
2692 {
2693 Short4 result;
2694 result = Insert(result, Short((Int(Extract(x, 0)) * Int(Extract(y, 0))) >> 16), 0);
2695 result = Insert(result, Short((Int(Extract(x, 1)) * Int(Extract(y, 1))) >> 16), 1);
2696 result = Insert(result, Short((Int(Extract(x, 2)) * Int(Extract(y, 2))) >> 16), 2);
2697 result = Insert(result, Short((Int(Extract(x, 3)) * Int(Extract(y, 3))) >> 16), 3);
2698
2699 return result;
2700 }
2701 else
2702 {
2703 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2704 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::MultiplyHighSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2705 auto pmulhw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2706 pmulhw->addArg(x.value());
2707 pmulhw->addArg(y.value());
2708 ::basicBlock->appendInst(pmulhw);
2709
2710 return RValue<Short4>(V(result));
2711 }
2712 }
2713
MulAdd(RValue<Short4> x,RValue<Short4> y)2714 RValue<Int2> MulAdd(RValue<Short4> x, RValue<Short4> y)
2715 {
2716 RR_DEBUG_INFO_UPDATE_LOC();
2717 if(emulateIntrinsics)
2718 {
2719 Int2 result;
2720 result = Insert(result, Int(Extract(x, 0)) * Int(Extract(y, 0)) + Int(Extract(x, 1)) * Int(Extract(y, 1)), 0);
2721 result = Insert(result, Int(Extract(x, 2)) * Int(Extract(y, 2)) + Int(Extract(x, 3)) * Int(Extract(y, 3)), 1);
2722
2723 return result;
2724 }
2725 else
2726 {
2727 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2728 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::MultiplyAddPairs, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2729 auto pmaddwd = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2730 pmaddwd->addArg(x.value());
2731 pmaddwd->addArg(y.value());
2732 ::basicBlock->appendInst(pmaddwd);
2733
2734 return As<Int2>(V(result));
2735 }
2736 }
2737
PackSigned(RValue<Short4> x,RValue<Short4> y)2738 RValue<SByte8> PackSigned(RValue<Short4> x, RValue<Short4> y)
2739 {
2740 RR_DEBUG_INFO_UPDATE_LOC();
2741 if(emulateIntrinsics)
2742 {
2743 SByte8 result;
2744 result = Insert(result, SaturateSigned(Extract(x, 0)), 0);
2745 result = Insert(result, SaturateSigned(Extract(x, 1)), 1);
2746 result = Insert(result, SaturateSigned(Extract(x, 2)), 2);
2747 result = Insert(result, SaturateSigned(Extract(x, 3)), 3);
2748 result = Insert(result, SaturateSigned(Extract(y, 0)), 4);
2749 result = Insert(result, SaturateSigned(Extract(y, 1)), 5);
2750 result = Insert(result, SaturateSigned(Extract(y, 2)), 6);
2751 result = Insert(result, SaturateSigned(Extract(y, 3)), 7);
2752
2753 return result;
2754 }
2755 else
2756 {
2757 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v16i8);
2758 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::VectorPackSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2759 auto pack = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2760 pack->addArg(x.value());
2761 pack->addArg(y.value());
2762 ::basicBlock->appendInst(pack);
2763
2764 return As<SByte8>(Swizzle(As<Int4>(V(result)), 0x0202));
2765 }
2766 }
2767
PackUnsigned(RValue<Short4> x,RValue<Short4> y)2768 RValue<Byte8> PackUnsigned(RValue<Short4> x, RValue<Short4> y)
2769 {
2770 RR_DEBUG_INFO_UPDATE_LOC();
2771 if(emulateIntrinsics)
2772 {
2773 Byte8 result;
2774 result = Insert(result, SaturateUnsigned(Extract(x, 0)), 0);
2775 result = Insert(result, SaturateUnsigned(Extract(x, 1)), 1);
2776 result = Insert(result, SaturateUnsigned(Extract(x, 2)), 2);
2777 result = Insert(result, SaturateUnsigned(Extract(x, 3)), 3);
2778 result = Insert(result, SaturateUnsigned(Extract(y, 0)), 4);
2779 result = Insert(result, SaturateUnsigned(Extract(y, 1)), 5);
2780 result = Insert(result, SaturateUnsigned(Extract(y, 2)), 6);
2781 result = Insert(result, SaturateUnsigned(Extract(y, 3)), 7);
2782
2783 return result;
2784 }
2785 else
2786 {
2787 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v16i8);
2788 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::VectorPackUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2789 auto pack = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2790 pack->addArg(x.value());
2791 pack->addArg(y.value());
2792 ::basicBlock->appendInst(pack);
2793
2794 return As<Byte8>(Swizzle(As<Int4>(V(result)), 0x0202));
2795 }
2796 }
2797
CmpGT(RValue<Short4> x,RValue<Short4> y)2798 RValue<Short4> CmpGT(RValue<Short4> x, RValue<Short4> y)
2799 {
2800 RR_DEBUG_INFO_UPDATE_LOC();
2801 return RValue<Short4>(createIntCompare(Ice::InstIcmp::Sgt, x.value(), y.value()));
2802 }
2803
CmpEQ(RValue<Short4> x,RValue<Short4> y)2804 RValue<Short4> CmpEQ(RValue<Short4> x, RValue<Short4> y)
2805 {
2806 RR_DEBUG_INFO_UPDATE_LOC();
2807 return RValue<Short4>(Nucleus::createICmpEQ(x.value(), y.value()));
2808 }
2809
type()2810 Type *Short4::type()
2811 {
2812 return T(Type_v4i16);
2813 }
2814
UShort4(RValue<Float4> cast,bool saturate)2815 UShort4::UShort4(RValue<Float4> cast, bool saturate)
2816 {
2817 if(saturate)
2818 {
2819 if(CPUID::SSE4_1)
2820 {
2821 // x86 produces 0x80000000 on 32-bit integer overflow/underflow.
2822 // PackUnsigned takes care of 0x0000 saturation.
2823 Int4 int4(Min(cast, Float4(0xFFFF)));
2824 *this = As<UShort4>(PackUnsigned(int4, int4));
2825 }
2826 else if(CPUID::ARM)
2827 {
2828 // ARM saturates the 32-bit integer result on overflow/undeflow.
2829 Int4 int4(cast);
2830 *this = As<UShort4>(PackUnsigned(int4, int4));
2831 }
2832 else
2833 {
2834 *this = Short4(Int4(Max(Min(cast, Float4(0xFFFF)), Float4(0x0000))));
2835 }
2836 }
2837 else
2838 {
2839 *this = Short4(Int4(cast));
2840 }
2841 }
2842
Extract(RValue<UShort4> val,int i)2843 RValue<UShort> Extract(RValue<UShort4> val, int i)
2844 {
2845 return RValue<UShort>(Nucleus::createExtractElement(val.value(), UShort::type(), i));
2846 }
2847
operator <<(RValue<UShort4> lhs,unsigned char rhs)2848 RValue<UShort4> operator<<(RValue<UShort4> lhs, unsigned char rhs)
2849 {
2850 RR_DEBUG_INFO_UPDATE_LOC();
2851 if(emulateIntrinsics)
2852
2853 {
2854 UShort4 result;
2855 result = Insert(result, Extract(lhs, 0) << UShort(rhs), 0);
2856 result = Insert(result, Extract(lhs, 1) << UShort(rhs), 1);
2857 result = Insert(result, Extract(lhs, 2) << UShort(rhs), 2);
2858 result = Insert(result, Extract(lhs, 3) << UShort(rhs), 3);
2859
2860 return result;
2861 }
2862 else
2863 {
2864 return RValue<UShort4>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
2865 }
2866 }
2867
operator >>(RValue<UShort4> lhs,unsigned char rhs)2868 RValue<UShort4> operator>>(RValue<UShort4> lhs, unsigned char rhs)
2869 {
2870 RR_DEBUG_INFO_UPDATE_LOC();
2871 if(emulateIntrinsics)
2872 {
2873 UShort4 result;
2874 result = Insert(result, Extract(lhs, 0) >> UShort(rhs), 0);
2875 result = Insert(result, Extract(lhs, 1) >> UShort(rhs), 1);
2876 result = Insert(result, Extract(lhs, 2) >> UShort(rhs), 2);
2877 result = Insert(result, Extract(lhs, 3) >> UShort(rhs), 3);
2878
2879 return result;
2880 }
2881 else
2882 {
2883 return RValue<UShort4>(Nucleus::createLShr(lhs.value(), V(::context->getConstantInt32(rhs))));
2884 }
2885 }
2886
Max(RValue<UShort4> x,RValue<UShort4> y)2887 RValue<UShort4> Max(RValue<UShort4> x, RValue<UShort4> y)
2888 {
2889 RR_DEBUG_INFO_UPDATE_LOC();
2890 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v8i1);
2891 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Ule, condition, x.value(), y.value());
2892 ::basicBlock->appendInst(cmp);
2893
2894 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2895 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
2896 ::basicBlock->appendInst(select);
2897
2898 return RValue<UShort4>(V(result));
2899 }
2900
Min(RValue<UShort4> x,RValue<UShort4> y)2901 RValue<UShort4> Min(RValue<UShort4> x, RValue<UShort4> y)
2902 {
2903 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v8i1);
2904 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Ugt, condition, x.value(), y.value());
2905 ::basicBlock->appendInst(cmp);
2906
2907 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2908 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
2909 ::basicBlock->appendInst(select);
2910
2911 return RValue<UShort4>(V(result));
2912 }
2913
SaturateUnsigned(RValue<Int> x)2914 RValue<UShort> SaturateUnsigned(RValue<Int> x)
2915 {
2916 RR_DEBUG_INFO_UPDATE_LOC();
2917 return UShort(IfThenElse(x > 0xFFFF, Int(0xFFFF), IfThenElse(x < 0, Int(0), x)));
2918 }
2919
AddSat(RValue<UShort4> x,RValue<UShort4> y)2920 RValue<UShort4> AddSat(RValue<UShort4> x, RValue<UShort4> y)
2921 {
2922 RR_DEBUG_INFO_UPDATE_LOC();
2923 if(emulateIntrinsics)
2924 {
2925 UShort4 result;
2926 result = Insert(result, SaturateUnsigned(Int(Extract(x, 0)) + Int(Extract(y, 0))), 0);
2927 result = Insert(result, SaturateUnsigned(Int(Extract(x, 1)) + Int(Extract(y, 1))), 1);
2928 result = Insert(result, SaturateUnsigned(Int(Extract(x, 2)) + Int(Extract(y, 2))), 2);
2929 result = Insert(result, SaturateUnsigned(Int(Extract(x, 3)) + Int(Extract(y, 3))), 3);
2930
2931 return result;
2932 }
2933 else
2934 {
2935 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2936 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AddSaturateUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2937 auto paddusw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2938 paddusw->addArg(x.value());
2939 paddusw->addArg(y.value());
2940 ::basicBlock->appendInst(paddusw);
2941
2942 return RValue<UShort4>(V(result));
2943 }
2944 }
2945
SubSat(RValue<UShort4> x,RValue<UShort4> y)2946 RValue<UShort4> SubSat(RValue<UShort4> x, RValue<UShort4> y)
2947 {
2948 RR_DEBUG_INFO_UPDATE_LOC();
2949 if(emulateIntrinsics)
2950 {
2951 UShort4 result;
2952 result = Insert(result, SaturateUnsigned(Int(Extract(x, 0)) - Int(Extract(y, 0))), 0);
2953 result = Insert(result, SaturateUnsigned(Int(Extract(x, 1)) - Int(Extract(y, 1))), 1);
2954 result = Insert(result, SaturateUnsigned(Int(Extract(x, 2)) - Int(Extract(y, 2))), 2);
2955 result = Insert(result, SaturateUnsigned(Int(Extract(x, 3)) - Int(Extract(y, 3))), 3);
2956
2957 return result;
2958 }
2959 else
2960 {
2961 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2962 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SubtractSaturateUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2963 auto psubusw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2964 psubusw->addArg(x.value());
2965 psubusw->addArg(y.value());
2966 ::basicBlock->appendInst(psubusw);
2967
2968 return RValue<UShort4>(V(result));
2969 }
2970 }
2971
MulHigh(RValue<UShort4> x,RValue<UShort4> y)2972 RValue<UShort4> MulHigh(RValue<UShort4> x, RValue<UShort4> y)
2973 {
2974 RR_DEBUG_INFO_UPDATE_LOC();
2975 if(emulateIntrinsics)
2976 {
2977 UShort4 result;
2978 result = Insert(result, UShort((UInt(Extract(x, 0)) * UInt(Extract(y, 0))) >> 16), 0);
2979 result = Insert(result, UShort((UInt(Extract(x, 1)) * UInt(Extract(y, 1))) >> 16), 1);
2980 result = Insert(result, UShort((UInt(Extract(x, 2)) * UInt(Extract(y, 2))) >> 16), 2);
2981 result = Insert(result, UShort((UInt(Extract(x, 3)) * UInt(Extract(y, 3))) >> 16), 3);
2982
2983 return result;
2984 }
2985 else
2986 {
2987 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
2988 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::MultiplyHighUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
2989 auto pmulhuw = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
2990 pmulhuw->addArg(x.value());
2991 pmulhuw->addArg(y.value());
2992 ::basicBlock->appendInst(pmulhuw);
2993
2994 return RValue<UShort4>(V(result));
2995 }
2996 }
2997
MulHigh(RValue<Int4> x,RValue<Int4> y)2998 RValue<Int4> MulHigh(RValue<Int4> x, RValue<Int4> y)
2999 {
3000 RR_DEBUG_INFO_UPDATE_LOC();
3001 // TODO: For x86, build an intrinsics version of this which uses shuffles + pmuludq.
3002
3003 // Scalarized implementation.
3004 Int4 result;
3005 result = Insert(result, Int((Long(Extract(x, 0)) * Long(Extract(y, 0))) >> Long(Int(32))), 0);
3006 result = Insert(result, Int((Long(Extract(x, 1)) * Long(Extract(y, 1))) >> Long(Int(32))), 1);
3007 result = Insert(result, Int((Long(Extract(x, 2)) * Long(Extract(y, 2))) >> Long(Int(32))), 2);
3008 result = Insert(result, Int((Long(Extract(x, 3)) * Long(Extract(y, 3))) >> Long(Int(32))), 3);
3009
3010 return result;
3011 }
3012
MulHigh(RValue<UInt4> x,RValue<UInt4> y)3013 RValue<UInt4> MulHigh(RValue<UInt4> x, RValue<UInt4> y)
3014 {
3015 RR_DEBUG_INFO_UPDATE_LOC();
3016 // TODO: For x86, build an intrinsics version of this which uses shuffles + pmuludq.
3017
3018 if(false) // Partial product based implementation.
3019 {
3020 auto xh = x >> 16;
3021 auto yh = y >> 16;
3022 auto xl = x & UInt4(0x0000FFFF);
3023 auto yl = y & UInt4(0x0000FFFF);
3024 auto xlyh = xl * yh;
3025 auto xhyl = xh * yl;
3026 auto xlyhh = xlyh >> 16;
3027 auto xhylh = xhyl >> 16;
3028 auto xlyhl = xlyh & UInt4(0x0000FFFF);
3029 auto xhyll = xhyl & UInt4(0x0000FFFF);
3030 auto xlylh = (xl * yl) >> 16;
3031 auto oflow = (xlyhl + xhyll + xlylh) >> 16;
3032
3033 return (xh * yh) + (xlyhh + xhylh) + oflow;
3034 }
3035
3036 // Scalarized implementation.
3037 Int4 result;
3038 result = Insert(result, Int((Long(UInt(Extract(As<Int4>(x), 0))) * Long(UInt(Extract(As<Int4>(y), 0)))) >> Long(Int(32))), 0);
3039 result = Insert(result, Int((Long(UInt(Extract(As<Int4>(x), 1))) * Long(UInt(Extract(As<Int4>(y), 1)))) >> Long(Int(32))), 1);
3040 result = Insert(result, Int((Long(UInt(Extract(As<Int4>(x), 2))) * Long(UInt(Extract(As<Int4>(y), 2)))) >> Long(Int(32))), 2);
3041 result = Insert(result, Int((Long(UInt(Extract(As<Int4>(x), 3))) * Long(UInt(Extract(As<Int4>(y), 3)))) >> Long(Int(32))), 3);
3042
3043 return As<UInt4>(result);
3044 }
3045
Average(RValue<UShort4> x,RValue<UShort4> y)3046 RValue<UShort4> Average(RValue<UShort4> x, RValue<UShort4> y)
3047 {
3048 RR_DEBUG_INFO_UPDATE_LOC();
3049 UNIMPLEMENTED_NO_BUG("RValue<UShort4> Average(RValue<UShort4> x, RValue<UShort4> y)");
3050 return UShort4(0);
3051 }
3052
type()3053 Type *UShort4::type()
3054 {
3055 return T(Type_v4i16);
3056 }
3057
Extract(RValue<Short8> val,int i)3058 RValue<Short> Extract(RValue<Short8> val, int i)
3059 {
3060 RR_DEBUG_INFO_UPDATE_LOC();
3061 return RValue<Short>(Nucleus::createExtractElement(val.value(), Short::type(), i));
3062 }
3063
Insert(RValue<Short8> val,RValue<Short> element,int i)3064 RValue<Short8> Insert(RValue<Short8> val, RValue<Short> element, int i)
3065 {
3066 RR_DEBUG_INFO_UPDATE_LOC();
3067 return RValue<Short8>(Nucleus::createInsertElement(val.value(), element.value(), i));
3068 }
3069
operator <<(RValue<Short8> lhs,unsigned char rhs)3070 RValue<Short8> operator<<(RValue<Short8> lhs, unsigned char rhs)
3071 {
3072 RR_DEBUG_INFO_UPDATE_LOC();
3073 if(emulateIntrinsics)
3074 {
3075 Short8 result;
3076 result = Insert(result, Extract(lhs, 0) << Short(rhs), 0);
3077 result = Insert(result, Extract(lhs, 1) << Short(rhs), 1);
3078 result = Insert(result, Extract(lhs, 2) << Short(rhs), 2);
3079 result = Insert(result, Extract(lhs, 3) << Short(rhs), 3);
3080 result = Insert(result, Extract(lhs, 4) << Short(rhs), 4);
3081 result = Insert(result, Extract(lhs, 5) << Short(rhs), 5);
3082 result = Insert(result, Extract(lhs, 6) << Short(rhs), 6);
3083 result = Insert(result, Extract(lhs, 7) << Short(rhs), 7);
3084
3085 return result;
3086 }
3087 else
3088 {
3089 return RValue<Short8>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
3090 }
3091 }
3092
operator >>(RValue<Short8> lhs,unsigned char rhs)3093 RValue<Short8> operator>>(RValue<Short8> lhs, unsigned char rhs)
3094 {
3095 RR_DEBUG_INFO_UPDATE_LOC();
3096 if(emulateIntrinsics)
3097 {
3098 Short8 result;
3099 result = Insert(result, Extract(lhs, 0) >> Short(rhs), 0);
3100 result = Insert(result, Extract(lhs, 1) >> Short(rhs), 1);
3101 result = Insert(result, Extract(lhs, 2) >> Short(rhs), 2);
3102 result = Insert(result, Extract(lhs, 3) >> Short(rhs), 3);
3103 result = Insert(result, Extract(lhs, 4) >> Short(rhs), 4);
3104 result = Insert(result, Extract(lhs, 5) >> Short(rhs), 5);
3105 result = Insert(result, Extract(lhs, 6) >> Short(rhs), 6);
3106 result = Insert(result, Extract(lhs, 7) >> Short(rhs), 7);
3107
3108 return result;
3109 }
3110 else
3111 {
3112 return RValue<Short8>(Nucleus::createAShr(lhs.value(), V(::context->getConstantInt32(rhs))));
3113 }
3114 }
3115
MulAdd(RValue<Short8> x,RValue<Short8> y)3116 RValue<Int4> MulAdd(RValue<Short8> x, RValue<Short8> y)
3117 {
3118 RR_DEBUG_INFO_UPDATE_LOC();
3119 UNIMPLEMENTED_NO_BUG("RValue<Int4> MulAdd(RValue<Short8> x, RValue<Short8> y)");
3120 return Int4(0);
3121 }
3122
MulHigh(RValue<Short8> x,RValue<Short8> y)3123 RValue<Short8> MulHigh(RValue<Short8> x, RValue<Short8> y)
3124 {
3125 RR_DEBUG_INFO_UPDATE_LOC();
3126 UNIMPLEMENTED_NO_BUG("RValue<Short8> MulHigh(RValue<Short8> x, RValue<Short8> y)");
3127 return Short8(0);
3128 }
3129
type()3130 Type *Short8::type()
3131 {
3132 return T(Ice::IceType_v8i16);
3133 }
3134
Extract(RValue<UShort8> val,int i)3135 RValue<UShort> Extract(RValue<UShort8> val, int i)
3136 {
3137 RR_DEBUG_INFO_UPDATE_LOC();
3138 return RValue<UShort>(Nucleus::createExtractElement(val.value(), UShort::type(), i));
3139 }
3140
Insert(RValue<UShort8> val,RValue<UShort> element,int i)3141 RValue<UShort8> Insert(RValue<UShort8> val, RValue<UShort> element, int i)
3142 {
3143 RR_DEBUG_INFO_UPDATE_LOC();
3144 return RValue<UShort8>(Nucleus::createInsertElement(val.value(), element.value(), i));
3145 }
3146
operator <<(RValue<UShort8> lhs,unsigned char rhs)3147 RValue<UShort8> operator<<(RValue<UShort8> lhs, unsigned char rhs)
3148 {
3149 RR_DEBUG_INFO_UPDATE_LOC();
3150 if(emulateIntrinsics)
3151 {
3152 UShort8 result;
3153 result = Insert(result, Extract(lhs, 0) << UShort(rhs), 0);
3154 result = Insert(result, Extract(lhs, 1) << UShort(rhs), 1);
3155 result = Insert(result, Extract(lhs, 2) << UShort(rhs), 2);
3156 result = Insert(result, Extract(lhs, 3) << UShort(rhs), 3);
3157 result = Insert(result, Extract(lhs, 4) << UShort(rhs), 4);
3158 result = Insert(result, Extract(lhs, 5) << UShort(rhs), 5);
3159 result = Insert(result, Extract(lhs, 6) << UShort(rhs), 6);
3160 result = Insert(result, Extract(lhs, 7) << UShort(rhs), 7);
3161
3162 return result;
3163 }
3164 else
3165 {
3166 return RValue<UShort8>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
3167 }
3168 }
3169
operator >>(RValue<UShort8> lhs,unsigned char rhs)3170 RValue<UShort8> operator>>(RValue<UShort8> lhs, unsigned char rhs)
3171 {
3172 RR_DEBUG_INFO_UPDATE_LOC();
3173 if(emulateIntrinsics)
3174 {
3175 UShort8 result;
3176 result = Insert(result, Extract(lhs, 0) >> UShort(rhs), 0);
3177 result = Insert(result, Extract(lhs, 1) >> UShort(rhs), 1);
3178 result = Insert(result, Extract(lhs, 2) >> UShort(rhs), 2);
3179 result = Insert(result, Extract(lhs, 3) >> UShort(rhs), 3);
3180 result = Insert(result, Extract(lhs, 4) >> UShort(rhs), 4);
3181 result = Insert(result, Extract(lhs, 5) >> UShort(rhs), 5);
3182 result = Insert(result, Extract(lhs, 6) >> UShort(rhs), 6);
3183 result = Insert(result, Extract(lhs, 7) >> UShort(rhs), 7);
3184
3185 return result;
3186 }
3187 else
3188 {
3189 return RValue<UShort8>(Nucleus::createLShr(lhs.value(), V(::context->getConstantInt32(rhs))));
3190 }
3191 }
3192
MulHigh(RValue<UShort8> x,RValue<UShort8> y)3193 RValue<UShort8> MulHigh(RValue<UShort8> x, RValue<UShort8> y)
3194 {
3195 RR_DEBUG_INFO_UPDATE_LOC();
3196 UNIMPLEMENTED_NO_BUG("RValue<UShort8> MulHigh(RValue<UShort8> x, RValue<UShort8> y)");
3197 return UShort8(0);
3198 }
3199
type()3200 Type *UShort8::type()
3201 {
3202 return T(Ice::IceType_v8i16);
3203 }
3204
operator ++(Int & val,int)3205 RValue<Int> operator++(Int &val, int) // Post-increment
3206 {
3207 RR_DEBUG_INFO_UPDATE_LOC();
3208 RValue<Int> res = val;
3209 val += 1;
3210 return res;
3211 }
3212
operator ++(Int & val)3213 const Int &operator++(Int &val) // Pre-increment
3214 {
3215 RR_DEBUG_INFO_UPDATE_LOC();
3216 val += 1;
3217 return val;
3218 }
3219
operator --(Int & val,int)3220 RValue<Int> operator--(Int &val, int) // Post-decrement
3221 {
3222 RR_DEBUG_INFO_UPDATE_LOC();
3223 RValue<Int> res = val;
3224 val -= 1;
3225 return res;
3226 }
3227
operator --(Int & val)3228 const Int &operator--(Int &val) // Pre-decrement
3229 {
3230 RR_DEBUG_INFO_UPDATE_LOC();
3231 val -= 1;
3232 return val;
3233 }
3234
RoundInt(RValue<Float> cast)3235 RValue<Int> RoundInt(RValue<Float> cast)
3236 {
3237 RR_DEBUG_INFO_UPDATE_LOC();
3238 if(emulateIntrinsics || CPUID::ARM)
3239 {
3240 // Push the fractional part off the mantissa. Accurate up to +/-2^22.
3241 return Int((cast + Float(0x00C00000)) - Float(0x00C00000));
3242 }
3243 else
3244 {
3245 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
3246 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Nearbyint, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3247 auto nearbyint = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
3248 nearbyint->addArg(cast.value());
3249 ::basicBlock->appendInst(nearbyint);
3250
3251 return RValue<Int>(V(result));
3252 }
3253 }
3254
type()3255 Type *Int::type()
3256 {
3257 return T(Ice::IceType_i32);
3258 }
3259
type()3260 Type *Long::type()
3261 {
3262 return T(Ice::IceType_i64);
3263 }
3264
UInt(RValue<Float> cast)3265 UInt::UInt(RValue<Float> cast)
3266 {
3267 RR_DEBUG_INFO_UPDATE_LOC();
3268 // Smallest positive value representable in UInt, but not in Int
3269 const unsigned int ustart = 0x80000000u;
3270 const float ustartf = float(ustart);
3271
3272 // If the value is negative, store 0, otherwise store the result of the conversion
3273 storeValue((~(As<Int>(cast) >> 31) &
3274 // Check if the value can be represented as an Int
3275 IfThenElse(cast >= ustartf,
3276 // If the value is too large, subtract ustart and re-add it after conversion.
3277 As<Int>(As<UInt>(Int(cast - Float(ustartf))) + UInt(ustart)),
3278 // Otherwise, just convert normally
3279 Int(cast)))
3280 .value());
3281 }
3282
operator ++(UInt & val,int)3283 RValue<UInt> operator++(UInt &val, int) // Post-increment
3284 {
3285 RR_DEBUG_INFO_UPDATE_LOC();
3286 RValue<UInt> res = val;
3287 val += 1;
3288 return res;
3289 }
3290
operator ++(UInt & val)3291 const UInt &operator++(UInt &val) // Pre-increment
3292 {
3293 RR_DEBUG_INFO_UPDATE_LOC();
3294 val += 1;
3295 return val;
3296 }
3297
operator --(UInt & val,int)3298 RValue<UInt> operator--(UInt &val, int) // Post-decrement
3299 {
3300 RR_DEBUG_INFO_UPDATE_LOC();
3301 RValue<UInt> res = val;
3302 val -= 1;
3303 return res;
3304 }
3305
operator --(UInt & val)3306 const UInt &operator--(UInt &val) // Pre-decrement
3307 {
3308 RR_DEBUG_INFO_UPDATE_LOC();
3309 val -= 1;
3310 return val;
3311 }
3312
3313 // RValue<UInt> RoundUInt(RValue<Float> cast)
3314 // {
3315 // ASSERT(false && "UNIMPLEMENTED"); return RValue<UInt>(V(nullptr));
3316 // }
3317
type()3318 Type *UInt::type()
3319 {
3320 return T(Ice::IceType_i32);
3321 }
3322
3323 // Int2::Int2(RValue<Int> cast)
3324 // {
3325 // Value *extend = Nucleus::createZExt(cast.value(), Long::type());
3326 // Value *vector = Nucleus::createBitCast(extend, Int2::type());
3327 //
3328 // Constant *shuffle[2];
3329 // shuffle[0] = Nucleus::createConstantInt(0);
3330 // shuffle[1] = Nucleus::createConstantInt(0);
3331 //
3332 // Value *replicate = Nucleus::createShuffleVector(vector, UndefValue::get(Int2::type()), Nucleus::createConstantVector(shuffle, 2));
3333 //
3334 // storeValue(replicate);
3335 // }
3336
operator <<(RValue<Int2> lhs,unsigned char rhs)3337 RValue<Int2> operator<<(RValue<Int2> lhs, unsigned char rhs)
3338 {
3339 RR_DEBUG_INFO_UPDATE_LOC();
3340 if(emulateIntrinsics)
3341 {
3342 Int2 result;
3343 result = Insert(result, Extract(lhs, 0) << Int(rhs), 0);
3344 result = Insert(result, Extract(lhs, 1) << Int(rhs), 1);
3345
3346 return result;
3347 }
3348 else
3349 {
3350 return RValue<Int2>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
3351 }
3352 }
3353
operator >>(RValue<Int2> lhs,unsigned char rhs)3354 RValue<Int2> operator>>(RValue<Int2> lhs, unsigned char rhs)
3355 {
3356 RR_DEBUG_INFO_UPDATE_LOC();
3357 if(emulateIntrinsics)
3358 {
3359 Int2 result;
3360 result = Insert(result, Extract(lhs, 0) >> Int(rhs), 0);
3361 result = Insert(result, Extract(lhs, 1) >> Int(rhs), 1);
3362
3363 return result;
3364 }
3365 else
3366 {
3367 return RValue<Int2>(Nucleus::createAShr(lhs.value(), V(::context->getConstantInt32(rhs))));
3368 }
3369 }
3370
type()3371 Type *Int2::type()
3372 {
3373 return T(Type_v2i32);
3374 }
3375
operator <<(RValue<UInt2> lhs,unsigned char rhs)3376 RValue<UInt2> operator<<(RValue<UInt2> lhs, unsigned char rhs)
3377 {
3378 RR_DEBUG_INFO_UPDATE_LOC();
3379 if(emulateIntrinsics)
3380 {
3381 UInt2 result;
3382 result = Insert(result, Extract(lhs, 0) << UInt(rhs), 0);
3383 result = Insert(result, Extract(lhs, 1) << UInt(rhs), 1);
3384
3385 return result;
3386 }
3387 else
3388 {
3389 return RValue<UInt2>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
3390 }
3391 }
3392
operator >>(RValue<UInt2> lhs,unsigned char rhs)3393 RValue<UInt2> operator>>(RValue<UInt2> lhs, unsigned char rhs)
3394 {
3395 RR_DEBUG_INFO_UPDATE_LOC();
3396 if(emulateIntrinsics)
3397 {
3398 UInt2 result;
3399 result = Insert(result, Extract(lhs, 0) >> UInt(rhs), 0);
3400 result = Insert(result, Extract(lhs, 1) >> UInt(rhs), 1);
3401
3402 return result;
3403 }
3404 else
3405 {
3406 return RValue<UInt2>(Nucleus::createLShr(lhs.value(), V(::context->getConstantInt32(rhs))));
3407 }
3408 }
3409
type()3410 Type *UInt2::type()
3411 {
3412 return T(Type_v2i32);
3413 }
3414
Int4(RValue<Byte4> cast)3415 Int4::Int4(RValue<Byte4> cast)
3416 : XYZW(this)
3417 {
3418 RR_DEBUG_INFO_UPDATE_LOC();
3419 Value *x = Nucleus::createBitCast(cast.value(), Int::type());
3420 Value *a = Nucleus::createInsertElement(loadValue(), x, 0);
3421
3422 Value *e;
3423 int swizzle[16] = { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 };
3424 Value *b = Nucleus::createBitCast(a, Byte16::type());
3425 Value *c = Nucleus::createShuffleVector(b, Nucleus::createNullValue(Byte16::type()), swizzle);
3426
3427 int swizzle2[8] = { 0, 8, 1, 9, 2, 10, 3, 11 };
3428 Value *d = Nucleus::createBitCast(c, Short8::type());
3429 e = Nucleus::createShuffleVector(d, Nucleus::createNullValue(Short8::type()), swizzle2);
3430
3431 Value *f = Nucleus::createBitCast(e, Int4::type());
3432 storeValue(f);
3433 }
3434
Int4(RValue<SByte4> cast)3435 Int4::Int4(RValue<SByte4> cast)
3436 : XYZW(this)
3437 {
3438 RR_DEBUG_INFO_UPDATE_LOC();
3439 Value *x = Nucleus::createBitCast(cast.value(), Int::type());
3440 Value *a = Nucleus::createInsertElement(loadValue(), x, 0);
3441
3442 int swizzle[16] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7 };
3443 Value *b = Nucleus::createBitCast(a, Byte16::type());
3444 Value *c = Nucleus::createShuffleVector(b, b, swizzle);
3445
3446 int swizzle2[8] = { 0, 0, 1, 1, 2, 2, 3, 3 };
3447 Value *d = Nucleus::createBitCast(c, Short8::type());
3448 Value *e = Nucleus::createShuffleVector(d, d, swizzle2);
3449
3450 *this = As<Int4>(e) >> 24;
3451 }
3452
Int4(RValue<Short4> cast)3453 Int4::Int4(RValue<Short4> cast)
3454 : XYZW(this)
3455 {
3456 RR_DEBUG_INFO_UPDATE_LOC();
3457 int swizzle[8] = { 0, 0, 1, 1, 2, 2, 3, 3 };
3458 Value *c = Nucleus::createShuffleVector(cast.value(), cast.value(), swizzle);
3459
3460 *this = As<Int4>(c) >> 16;
3461 }
3462
Int4(RValue<UShort4> cast)3463 Int4::Int4(RValue<UShort4> cast)
3464 : XYZW(this)
3465 {
3466 RR_DEBUG_INFO_UPDATE_LOC();
3467 int swizzle[8] = { 0, 8, 1, 9, 2, 10, 3, 11 };
3468 Value *c = Nucleus::createShuffleVector(cast.value(), Short8(0, 0, 0, 0, 0, 0, 0, 0).loadValue(), swizzle);
3469 Value *d = Nucleus::createBitCast(c, Int4::type());
3470 storeValue(d);
3471 }
3472
Int4(RValue<Int> rhs)3473 Int4::Int4(RValue<Int> rhs)
3474 : XYZW(this)
3475 {
3476 RR_DEBUG_INFO_UPDATE_LOC();
3477 Value *vector = Nucleus::createBitCast(rhs.value(), Int4::type());
3478
3479 int swizzle[4] = { 0, 0, 0, 0 };
3480 Value *replicate = Nucleus::createShuffleVector(vector, vector, swizzle);
3481
3482 storeValue(replicate);
3483 }
3484
operator <<(RValue<Int4> lhs,unsigned char rhs)3485 RValue<Int4> operator<<(RValue<Int4> lhs, unsigned char rhs)
3486 {
3487 RR_DEBUG_INFO_UPDATE_LOC();
3488 if(emulateIntrinsics)
3489 {
3490 Int4 result;
3491 result = Insert(result, Extract(lhs, 0) << Int(rhs), 0);
3492 result = Insert(result, Extract(lhs, 1) << Int(rhs), 1);
3493 result = Insert(result, Extract(lhs, 2) << Int(rhs), 2);
3494 result = Insert(result, Extract(lhs, 3) << Int(rhs), 3);
3495
3496 return result;
3497 }
3498 else
3499 {
3500 return RValue<Int4>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
3501 }
3502 }
3503
operator >>(RValue<Int4> lhs,unsigned char rhs)3504 RValue<Int4> operator>>(RValue<Int4> lhs, unsigned char rhs)
3505 {
3506 RR_DEBUG_INFO_UPDATE_LOC();
3507 if(emulateIntrinsics)
3508 {
3509 Int4 result;
3510 result = Insert(result, Extract(lhs, 0) >> Int(rhs), 0);
3511 result = Insert(result, Extract(lhs, 1) >> Int(rhs), 1);
3512 result = Insert(result, Extract(lhs, 2) >> Int(rhs), 2);
3513 result = Insert(result, Extract(lhs, 3) >> Int(rhs), 3);
3514
3515 return result;
3516 }
3517 else
3518 {
3519 return RValue<Int4>(Nucleus::createAShr(lhs.value(), V(::context->getConstantInt32(rhs))));
3520 }
3521 }
3522
CmpEQ(RValue<Int4> x,RValue<Int4> y)3523 RValue<Int4> CmpEQ(RValue<Int4> x, RValue<Int4> y)
3524 {
3525 RR_DEBUG_INFO_UPDATE_LOC();
3526 return RValue<Int4>(Nucleus::createICmpEQ(x.value(), y.value()));
3527 }
3528
CmpLT(RValue<Int4> x,RValue<Int4> y)3529 RValue<Int4> CmpLT(RValue<Int4> x, RValue<Int4> y)
3530 {
3531 RR_DEBUG_INFO_UPDATE_LOC();
3532 return RValue<Int4>(Nucleus::createICmpSLT(x.value(), y.value()));
3533 }
3534
CmpLE(RValue<Int4> x,RValue<Int4> y)3535 RValue<Int4> CmpLE(RValue<Int4> x, RValue<Int4> y)
3536 {
3537 RR_DEBUG_INFO_UPDATE_LOC();
3538 return RValue<Int4>(Nucleus::createICmpSLE(x.value(), y.value()));
3539 }
3540
CmpNEQ(RValue<Int4> x,RValue<Int4> y)3541 RValue<Int4> CmpNEQ(RValue<Int4> x, RValue<Int4> y)
3542 {
3543 RR_DEBUG_INFO_UPDATE_LOC();
3544 return RValue<Int4>(Nucleus::createICmpNE(x.value(), y.value()));
3545 }
3546
CmpNLT(RValue<Int4> x,RValue<Int4> y)3547 RValue<Int4> CmpNLT(RValue<Int4> x, RValue<Int4> y)
3548 {
3549 RR_DEBUG_INFO_UPDATE_LOC();
3550 return RValue<Int4>(Nucleus::createICmpSGE(x.value(), y.value()));
3551 }
3552
CmpNLE(RValue<Int4> x,RValue<Int4> y)3553 RValue<Int4> CmpNLE(RValue<Int4> x, RValue<Int4> y)
3554 {
3555 RR_DEBUG_INFO_UPDATE_LOC();
3556 return RValue<Int4>(Nucleus::createICmpSGT(x.value(), y.value()));
3557 }
3558
Abs(RValue<Int4> x)3559 RValue<Int4> Abs(RValue<Int4> x)
3560 {
3561 // TODO: Optimize.
3562 auto negative = x >> 31;
3563 return (x ^ negative) - negative;
3564 }
3565
Max(RValue<Int4> x,RValue<Int4> y)3566 RValue<Int4> Max(RValue<Int4> x, RValue<Int4> y)
3567 {
3568 RR_DEBUG_INFO_UPDATE_LOC();
3569 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v4i1);
3570 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Sle, condition, x.value(), y.value());
3571 ::basicBlock->appendInst(cmp);
3572
3573 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4i32);
3574 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
3575 ::basicBlock->appendInst(select);
3576
3577 return RValue<Int4>(V(result));
3578 }
3579
Min(RValue<Int4> x,RValue<Int4> y)3580 RValue<Int4> Min(RValue<Int4> x, RValue<Int4> y)
3581 {
3582 RR_DEBUG_INFO_UPDATE_LOC();
3583 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v4i1);
3584 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Sgt, condition, x.value(), y.value());
3585 ::basicBlock->appendInst(cmp);
3586
3587 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4i32);
3588 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
3589 ::basicBlock->appendInst(select);
3590
3591 return RValue<Int4>(V(result));
3592 }
3593
RoundInt(RValue<Float4> cast)3594 RValue<Int4> RoundInt(RValue<Float4> cast)
3595 {
3596 RR_DEBUG_INFO_UPDATE_LOC();
3597 if(emulateIntrinsics || CPUID::ARM)
3598 {
3599 // Push the fractional part off the mantissa. Accurate up to +/-2^22.
3600 return Int4((cast + Float4(0x00C00000)) - Float4(0x00C00000));
3601 }
3602 else
3603 {
3604 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4i32);
3605 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Nearbyint, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3606 auto nearbyint = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
3607 nearbyint->addArg(cast.value());
3608 ::basicBlock->appendInst(nearbyint);
3609
3610 return RValue<Int4>(V(result));
3611 }
3612 }
3613
RoundIntClamped(RValue<Float4> cast)3614 RValue<Int4> RoundIntClamped(RValue<Float4> cast)
3615 {
3616 RR_DEBUG_INFO_UPDATE_LOC();
3617
3618 // cvtps2dq produces 0x80000000, a negative value, for input larger than
3619 // 2147483520.0, so clamp to 2147483520. Values less than -2147483520.0
3620 // saturate to 0x80000000.
3621 RValue<Float4> clamped = Min(cast, Float4(0x7FFFFF80));
3622
3623 if(emulateIntrinsics || CPUID::ARM)
3624 {
3625 // Push the fractional part off the mantissa. Accurate up to +/-2^22.
3626 return Int4((clamped + Float4(0x00C00000)) - Float4(0x00C00000));
3627 }
3628 else
3629 {
3630 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4i32);
3631 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Nearbyint, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3632 auto nearbyint = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
3633 nearbyint->addArg(clamped.value());
3634 ::basicBlock->appendInst(nearbyint);
3635
3636 return RValue<Int4>(V(result));
3637 }
3638 }
3639
PackSigned(RValue<Int4> x,RValue<Int4> y)3640 RValue<Short8> PackSigned(RValue<Int4> x, RValue<Int4> y)
3641 {
3642 RR_DEBUG_INFO_UPDATE_LOC();
3643 if(emulateIntrinsics)
3644 {
3645 Short8 result;
3646 result = Insert(result, SaturateSigned(Extract(x, 0)), 0);
3647 result = Insert(result, SaturateSigned(Extract(x, 1)), 1);
3648 result = Insert(result, SaturateSigned(Extract(x, 2)), 2);
3649 result = Insert(result, SaturateSigned(Extract(x, 3)), 3);
3650 result = Insert(result, SaturateSigned(Extract(y, 0)), 4);
3651 result = Insert(result, SaturateSigned(Extract(y, 1)), 5);
3652 result = Insert(result, SaturateSigned(Extract(y, 2)), 6);
3653 result = Insert(result, SaturateSigned(Extract(y, 3)), 7);
3654
3655 return result;
3656 }
3657 else
3658 {
3659 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
3660 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::VectorPackSigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3661 auto pack = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
3662 pack->addArg(x.value());
3663 pack->addArg(y.value());
3664 ::basicBlock->appendInst(pack);
3665
3666 return RValue<Short8>(V(result));
3667 }
3668 }
3669
PackUnsigned(RValue<Int4> x,RValue<Int4> y)3670 RValue<UShort8> PackUnsigned(RValue<Int4> x, RValue<Int4> y)
3671 {
3672 RR_DEBUG_INFO_UPDATE_LOC();
3673 if(emulateIntrinsics || !(CPUID::SSE4_1 || CPUID::ARM))
3674 {
3675 RValue<Int4> sx = As<Int4>(x);
3676 RValue<Int4> bx = (sx & ~(sx >> 31)) - Int4(0x8000);
3677
3678 RValue<Int4> sy = As<Int4>(y);
3679 RValue<Int4> by = (sy & ~(sy >> 31)) - Int4(0x8000);
3680
3681 return As<UShort8>(PackSigned(bx, by) + Short8(0x8000u));
3682 }
3683 else
3684 {
3685 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v8i16);
3686 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::VectorPackUnsigned, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3687 auto pack = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
3688 pack->addArg(x.value());
3689 pack->addArg(y.value());
3690 ::basicBlock->appendInst(pack);
3691
3692 return RValue<UShort8>(V(result));
3693 }
3694 }
3695
SignMask(RValue<Int4> x)3696 RValue<Int> SignMask(RValue<Int4> x)
3697 {
3698 RR_DEBUG_INFO_UPDATE_LOC();
3699 if(emulateIntrinsics || CPUID::ARM)
3700 {
3701 Int4 xx = (x >> 31) & Int4(0x00000001, 0x00000002, 0x00000004, 0x00000008);
3702 return Extract(xx, 0) | Extract(xx, 1) | Extract(xx, 2) | Extract(xx, 3);
3703 }
3704 else
3705 {
3706 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
3707 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SignMask, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3708 auto movmsk = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
3709 movmsk->addArg(x.value());
3710 ::basicBlock->appendInst(movmsk);
3711
3712 return RValue<Int>(V(result));
3713 }
3714 }
3715
type()3716 Type *Int4::type()
3717 {
3718 return T(Ice::IceType_v4i32);
3719 }
3720
UInt4(RValue<Float4> cast)3721 UInt4::UInt4(RValue<Float4> cast)
3722 : XYZW(this)
3723 {
3724 RR_DEBUG_INFO_UPDATE_LOC();
3725 // Smallest positive value representable in UInt, but not in Int
3726 const unsigned int ustart = 0x80000000u;
3727 const float ustartf = float(ustart);
3728
3729 // Check if the value can be represented as an Int
3730 Int4 uiValue = CmpNLT(cast, Float4(ustartf));
3731 // If the value is too large, subtract ustart and re-add it after conversion.
3732 uiValue = (uiValue & As<Int4>(As<UInt4>(Int4(cast - Float4(ustartf))) + UInt4(ustart))) |
3733 // Otherwise, just convert normally
3734 (~uiValue & Int4(cast));
3735 // If the value is negative, store 0, otherwise store the result of the conversion
3736 storeValue((~(As<Int4>(cast) >> 31) & uiValue).value());
3737 }
3738
UInt4(RValue<UInt> rhs)3739 UInt4::UInt4(RValue<UInt> rhs)
3740 : XYZW(this)
3741 {
3742 RR_DEBUG_INFO_UPDATE_LOC();
3743 Value *vector = Nucleus::createBitCast(rhs.value(), UInt4::type());
3744
3745 int swizzle[4] = { 0, 0, 0, 0 };
3746 Value *replicate = Nucleus::createShuffleVector(vector, vector, swizzle);
3747
3748 storeValue(replicate);
3749 }
3750
operator <<(RValue<UInt4> lhs,unsigned char rhs)3751 RValue<UInt4> operator<<(RValue<UInt4> lhs, unsigned char rhs)
3752 {
3753 RR_DEBUG_INFO_UPDATE_LOC();
3754 if(emulateIntrinsics)
3755 {
3756 UInt4 result;
3757 result = Insert(result, Extract(lhs, 0) << UInt(rhs), 0);
3758 result = Insert(result, Extract(lhs, 1) << UInt(rhs), 1);
3759 result = Insert(result, Extract(lhs, 2) << UInt(rhs), 2);
3760 result = Insert(result, Extract(lhs, 3) << UInt(rhs), 3);
3761
3762 return result;
3763 }
3764 else
3765 {
3766 return RValue<UInt4>(Nucleus::createShl(lhs.value(), V(::context->getConstantInt32(rhs))));
3767 }
3768 }
3769
operator >>(RValue<UInt4> lhs,unsigned char rhs)3770 RValue<UInt4> operator>>(RValue<UInt4> lhs, unsigned char rhs)
3771 {
3772 RR_DEBUG_INFO_UPDATE_LOC();
3773 if(emulateIntrinsics)
3774 {
3775 UInt4 result;
3776 result = Insert(result, Extract(lhs, 0) >> UInt(rhs), 0);
3777 result = Insert(result, Extract(lhs, 1) >> UInt(rhs), 1);
3778 result = Insert(result, Extract(lhs, 2) >> UInt(rhs), 2);
3779 result = Insert(result, Extract(lhs, 3) >> UInt(rhs), 3);
3780
3781 return result;
3782 }
3783 else
3784 {
3785 return RValue<UInt4>(Nucleus::createLShr(lhs.value(), V(::context->getConstantInt32(rhs))));
3786 }
3787 }
3788
CmpEQ(RValue<UInt4> x,RValue<UInt4> y)3789 RValue<UInt4> CmpEQ(RValue<UInt4> x, RValue<UInt4> y)
3790 {
3791 RR_DEBUG_INFO_UPDATE_LOC();
3792 return RValue<UInt4>(Nucleus::createICmpEQ(x.value(), y.value()));
3793 }
3794
CmpLT(RValue<UInt4> x,RValue<UInt4> y)3795 RValue<UInt4> CmpLT(RValue<UInt4> x, RValue<UInt4> y)
3796 {
3797 RR_DEBUG_INFO_UPDATE_LOC();
3798 return RValue<UInt4>(Nucleus::createICmpULT(x.value(), y.value()));
3799 }
3800
CmpLE(RValue<UInt4> x,RValue<UInt4> y)3801 RValue<UInt4> CmpLE(RValue<UInt4> x, RValue<UInt4> y)
3802 {
3803 RR_DEBUG_INFO_UPDATE_LOC();
3804 return RValue<UInt4>(Nucleus::createICmpULE(x.value(), y.value()));
3805 }
3806
CmpNEQ(RValue<UInt4> x,RValue<UInt4> y)3807 RValue<UInt4> CmpNEQ(RValue<UInt4> x, RValue<UInt4> y)
3808 {
3809 RR_DEBUG_INFO_UPDATE_LOC();
3810 return RValue<UInt4>(Nucleus::createICmpNE(x.value(), y.value()));
3811 }
3812
CmpNLT(RValue<UInt4> x,RValue<UInt4> y)3813 RValue<UInt4> CmpNLT(RValue<UInt4> x, RValue<UInt4> y)
3814 {
3815 RR_DEBUG_INFO_UPDATE_LOC();
3816 return RValue<UInt4>(Nucleus::createICmpUGE(x.value(), y.value()));
3817 }
3818
CmpNLE(RValue<UInt4> x,RValue<UInt4> y)3819 RValue<UInt4> CmpNLE(RValue<UInt4> x, RValue<UInt4> y)
3820 {
3821 RR_DEBUG_INFO_UPDATE_LOC();
3822 return RValue<UInt4>(Nucleus::createICmpUGT(x.value(), y.value()));
3823 }
3824
Max(RValue<UInt4> x,RValue<UInt4> y)3825 RValue<UInt4> Max(RValue<UInt4> x, RValue<UInt4> y)
3826 {
3827 RR_DEBUG_INFO_UPDATE_LOC();
3828 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v4i1);
3829 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Ule, condition, x.value(), y.value());
3830 ::basicBlock->appendInst(cmp);
3831
3832 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4i32);
3833 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
3834 ::basicBlock->appendInst(select);
3835
3836 return RValue<UInt4>(V(result));
3837 }
3838
Min(RValue<UInt4> x,RValue<UInt4> y)3839 RValue<UInt4> Min(RValue<UInt4> x, RValue<UInt4> y)
3840 {
3841 RR_DEBUG_INFO_UPDATE_LOC();
3842 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v4i1);
3843 auto cmp = Ice::InstIcmp::create(::function, Ice::InstIcmp::Ugt, condition, x.value(), y.value());
3844 ::basicBlock->appendInst(cmp);
3845
3846 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4i32);
3847 auto select = Ice::InstSelect::create(::function, result, condition, y.value(), x.value());
3848 ::basicBlock->appendInst(select);
3849
3850 return RValue<UInt4>(V(result));
3851 }
3852
type()3853 Type *UInt4::type()
3854 {
3855 return T(Ice::IceType_v4i32);
3856 }
3857
type()3858 Type *Half::type()
3859 {
3860 return T(Ice::IceType_i16);
3861 }
3862
Rcp_pp(RValue<Float> x,bool exactAtPow2)3863 RValue<Float> Rcp_pp(RValue<Float> x, bool exactAtPow2)
3864 {
3865 RR_DEBUG_INFO_UPDATE_LOC();
3866 return 1.0f / x;
3867 }
3868
RcpSqrt_pp(RValue<Float> x)3869 RValue<Float> RcpSqrt_pp(RValue<Float> x)
3870 {
3871 RR_DEBUG_INFO_UPDATE_LOC();
3872 return Rcp_pp(Sqrt(x));
3873 }
3874
Sqrt(RValue<Float> x)3875 RValue<Float> Sqrt(RValue<Float> x)
3876 {
3877 RR_DEBUG_INFO_UPDATE_LOC();
3878 Ice::Variable *result = ::function->makeVariable(Ice::IceType_f32);
3879 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Sqrt, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
3880 auto sqrt = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
3881 sqrt->addArg(x.value());
3882 ::basicBlock->appendInst(sqrt);
3883
3884 return RValue<Float>(V(result));
3885 }
3886
Round(RValue<Float> x)3887 RValue<Float> Round(RValue<Float> x)
3888 {
3889 RR_DEBUG_INFO_UPDATE_LOC();
3890 return Float4(Round(Float4(x))).x;
3891 }
3892
Trunc(RValue<Float> x)3893 RValue<Float> Trunc(RValue<Float> x)
3894 {
3895 RR_DEBUG_INFO_UPDATE_LOC();
3896 return Float4(Trunc(Float4(x))).x;
3897 }
3898
Frac(RValue<Float> x)3899 RValue<Float> Frac(RValue<Float> x)
3900 {
3901 RR_DEBUG_INFO_UPDATE_LOC();
3902 return Float4(Frac(Float4(x))).x;
3903 }
3904
Floor(RValue<Float> x)3905 RValue<Float> Floor(RValue<Float> x)
3906 {
3907 RR_DEBUG_INFO_UPDATE_LOC();
3908 return Float4(Floor(Float4(x))).x;
3909 }
3910
Ceil(RValue<Float> x)3911 RValue<Float> Ceil(RValue<Float> x)
3912 {
3913 RR_DEBUG_INFO_UPDATE_LOC();
3914 return Float4(Ceil(Float4(x))).x;
3915 }
3916
type()3917 Type *Float::type()
3918 {
3919 return T(Ice::IceType_f32);
3920 }
3921
type()3922 Type *Float2::type()
3923 {
3924 return T(Type_v2f32);
3925 }
3926
Float4(RValue<Float> rhs)3927 Float4::Float4(RValue<Float> rhs)
3928 : XYZW(this)
3929 {
3930 RR_DEBUG_INFO_UPDATE_LOC();
3931 Value *vector = Nucleus::createBitCast(rhs.value(), Float4::type());
3932
3933 int swizzle[4] = { 0, 0, 0, 0 };
3934 Value *replicate = Nucleus::createShuffleVector(vector, vector, swizzle);
3935
3936 storeValue(replicate);
3937 }
3938
3939 // Call single arg function on a vector type
3940 template<typename Func, typename T>
call4(Func func,const RValue<T> & x)3941 static RValue<T> call4(Func func, const RValue<T> &x)
3942 {
3943 T result;
3944 result = Insert(result, Call(func, Extract(x, 0)), 0);
3945 result = Insert(result, Call(func, Extract(x, 1)), 1);
3946 result = Insert(result, Call(func, Extract(x, 2)), 2);
3947 result = Insert(result, Call(func, Extract(x, 3)), 3);
3948 return result;
3949 }
3950
3951 // Call two arg function on a vector type
3952 template<typename Func, typename T>
call4(Func func,const RValue<T> & x,const RValue<T> & y)3953 static RValue<T> call4(Func func, const RValue<T> &x, const RValue<T> &y)
3954 {
3955 T result;
3956 result = Insert(result, Call(func, Extract(x, 0), Extract(y, 0)), 0);
3957 result = Insert(result, Call(func, Extract(x, 1), Extract(y, 1)), 1);
3958 result = Insert(result, Call(func, Extract(x, 2), Extract(y, 2)), 2);
3959 result = Insert(result, Call(func, Extract(x, 3), Extract(y, 3)), 3);
3960 return result;
3961 }
3962
3963 // Call three arg function on a vector type
3964 template<typename Func, typename T>
call4(Func func,const RValue<T> & x,const RValue<T> & y,const RValue<T> & z)3965 static RValue<T> call4(Func func, const RValue<T> &x, const RValue<T> &y, const RValue<T> &z)
3966 {
3967 T result;
3968 result = Insert(result, Call(func, Extract(x, 0), Extract(y, 0), Extract(z, 0)), 0);
3969 result = Insert(result, Call(func, Extract(x, 1), Extract(y, 1), Extract(z, 1)), 1);
3970 result = Insert(result, Call(func, Extract(x, 2), Extract(y, 2), Extract(z, 2)), 2);
3971 result = Insert(result, Call(func, Extract(x, 3), Extract(y, 3), Extract(z, 3)), 3);
3972 return result;
3973 }
3974
operator %(RValue<Float4> lhs,RValue<Float4> rhs)3975 RValue<Float4> operator%(RValue<Float4> lhs, RValue<Float4> rhs)
3976 {
3977 return call4(fmodf, lhs, rhs);
3978 }
3979
MulAdd(RValue<Float4> x,RValue<Float4> y,RValue<Float4> z)3980 RValue<Float4> MulAdd(RValue<Float4> x, RValue<Float4> y, RValue<Float4> z)
3981 {
3982 // TODO(b/214591655): Use FMA when available.
3983 return x * y + z;
3984 }
3985
FMA(RValue<Float4> x,RValue<Float4> y,RValue<Float4> z)3986 RValue<Float4> FMA(RValue<Float4> x, RValue<Float4> y, RValue<Float4> z)
3987 {
3988 // TODO(b/214591655): Use FMA instructions when available.
3989 return call4(fmaf, x, y, z);
3990 }
3991
Abs(RValue<Float4> x)3992 RValue<Float4> Abs(RValue<Float4> x)
3993 {
3994 // TODO: Optimize.
3995 Value *vector = Nucleus::createBitCast(x.value(), Int4::type());
3996 int64_t constantVector[4] = { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
3997 Value *result = Nucleus::createAnd(vector, Nucleus::createConstantVector(constantVector, Int4::type()));
3998
3999 return As<Float4>(result);
4000 }
4001
Max(RValue<Float4> x,RValue<Float4> y)4002 RValue<Float4> Max(RValue<Float4> x, RValue<Float4> y)
4003 {
4004 RR_DEBUG_INFO_UPDATE_LOC();
4005 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v4i1);
4006 auto cmp = Ice::InstFcmp::create(::function, Ice::InstFcmp::Ogt, condition, x.value(), y.value());
4007 ::basicBlock->appendInst(cmp);
4008
4009 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4010 auto select = Ice::InstSelect::create(::function, result, condition, x.value(), y.value());
4011 ::basicBlock->appendInst(select);
4012
4013 return RValue<Float4>(V(result));
4014 }
4015
Min(RValue<Float4> x,RValue<Float4> y)4016 RValue<Float4> Min(RValue<Float4> x, RValue<Float4> y)
4017 {
4018 RR_DEBUG_INFO_UPDATE_LOC();
4019 Ice::Variable *condition = ::function->makeVariable(Ice::IceType_v4i1);
4020 auto cmp = Ice::InstFcmp::create(::function, Ice::InstFcmp::Olt, condition, x.value(), y.value());
4021 ::basicBlock->appendInst(cmp);
4022
4023 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4024 auto select = Ice::InstSelect::create(::function, result, condition, x.value(), y.value());
4025 ::basicBlock->appendInst(select);
4026
4027 return RValue<Float4>(V(result));
4028 }
4029
Rcp_pp(RValue<Float4> x,bool exactAtPow2)4030 RValue<Float4> Rcp_pp(RValue<Float4> x, bool exactAtPow2)
4031 {
4032 RR_DEBUG_INFO_UPDATE_LOC();
4033 return Float4(1.0f) / x;
4034 }
4035
RcpSqrt_pp(RValue<Float4> x)4036 RValue<Float4> RcpSqrt_pp(RValue<Float4> x)
4037 {
4038 RR_DEBUG_INFO_UPDATE_LOC();
4039 return Rcp_pp(Sqrt(x));
4040 }
4041
HasRcpApprox()4042 bool HasRcpApprox()
4043 {
4044 // TODO(b/175612820): Update once we implement x86 SSE rcp_ss and rsqrt_ss intrinsics in Subzero
4045 return false;
4046 }
4047
RcpApprox(RValue<Float4> x,bool exactAtPow2)4048 RValue<Float4> RcpApprox(RValue<Float4> x, bool exactAtPow2)
4049 {
4050 // TODO(b/175612820): Update once we implement x86 SSE rcp_ss and rsqrt_ss intrinsics in Subzero
4051 UNREACHABLE("RValue<Float4> RcpApprox()");
4052 return { 0.0f };
4053 }
4054
RcpApprox(RValue<Float> x,bool exactAtPow2)4055 RValue<Float> RcpApprox(RValue<Float> x, bool exactAtPow2)
4056 {
4057 // TODO(b/175612820): Update once we implement x86 SSE rcp_ss and rsqrt_ss intrinsics in Subzero
4058 UNREACHABLE("RValue<Float> RcpApprox()");
4059 return { 0.0f };
4060 }
4061
HasRcpSqrtApprox()4062 bool HasRcpSqrtApprox()
4063 {
4064 return false;
4065 }
4066
RcpSqrtApprox(RValue<Float4> x)4067 RValue<Float4> RcpSqrtApprox(RValue<Float4> x)
4068 {
4069 // TODO(b/175612820): Update once we implement x86 SSE rcp_ss and rsqrt_ss intrinsics in Subzero
4070 UNREACHABLE("RValue<Float4> RcpSqrtApprox()");
4071 return { 0.0f };
4072 }
4073
RcpSqrtApprox(RValue<Float> x)4074 RValue<Float> RcpSqrtApprox(RValue<Float> x)
4075 {
4076 // TODO(b/175612820): Update once we implement x86 SSE rcp_ss and rsqrt_ss intrinsics in Subzero
4077 UNREACHABLE("RValue<Float> RcpSqrtApprox()");
4078 return { 0.0f };
4079 }
4080
Sqrt(RValue<Float4> x)4081 RValue<Float4> Sqrt(RValue<Float4> x)
4082 {
4083 RR_DEBUG_INFO_UPDATE_LOC();
4084 if(emulateIntrinsics || CPUID::ARM)
4085 {
4086 Float4 result;
4087 result.x = Sqrt(Float(Float4(x).x));
4088 result.y = Sqrt(Float(Float4(x).y));
4089 result.z = Sqrt(Float(Float4(x).z));
4090 result.w = Sqrt(Float(Float4(x).w));
4091
4092 return result;
4093 }
4094 else
4095 {
4096 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4097 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Sqrt, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4098 auto sqrt = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
4099 sqrt->addArg(x.value());
4100 ::basicBlock->appendInst(sqrt);
4101
4102 return RValue<Float4>(V(result));
4103 }
4104 }
4105
SignMask(RValue<Float4> x)4106 RValue<Int> SignMask(RValue<Float4> x)
4107 {
4108 RR_DEBUG_INFO_UPDATE_LOC();
4109 if(emulateIntrinsics || CPUID::ARM)
4110 {
4111 Int4 xx = (As<Int4>(x) >> 31) & Int4(0x00000001, 0x00000002, 0x00000004, 0x00000008);
4112 return Extract(xx, 0) | Extract(xx, 1) | Extract(xx, 2) | Extract(xx, 3);
4113 }
4114 else
4115 {
4116 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
4117 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::SignMask, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4118 auto movmsk = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
4119 movmsk->addArg(x.value());
4120 ::basicBlock->appendInst(movmsk);
4121
4122 return RValue<Int>(V(result));
4123 }
4124 }
4125
CmpEQ(RValue<Float4> x,RValue<Float4> y)4126 RValue<Int4> CmpEQ(RValue<Float4> x, RValue<Float4> y)
4127 {
4128 RR_DEBUG_INFO_UPDATE_LOC();
4129 return RValue<Int4>(Nucleus::createFCmpOEQ(x.value(), y.value()));
4130 }
4131
CmpLT(RValue<Float4> x,RValue<Float4> y)4132 RValue<Int4> CmpLT(RValue<Float4> x, RValue<Float4> y)
4133 {
4134 RR_DEBUG_INFO_UPDATE_LOC();
4135 return RValue<Int4>(Nucleus::createFCmpOLT(x.value(), y.value()));
4136 }
4137
CmpLE(RValue<Float4> x,RValue<Float4> y)4138 RValue<Int4> CmpLE(RValue<Float4> x, RValue<Float4> y)
4139 {
4140 RR_DEBUG_INFO_UPDATE_LOC();
4141 return RValue<Int4>(Nucleus::createFCmpOLE(x.value(), y.value()));
4142 }
4143
CmpNEQ(RValue<Float4> x,RValue<Float4> y)4144 RValue<Int4> CmpNEQ(RValue<Float4> x, RValue<Float4> y)
4145 {
4146 RR_DEBUG_INFO_UPDATE_LOC();
4147 return RValue<Int4>(Nucleus::createFCmpONE(x.value(), y.value()));
4148 }
4149
CmpNLT(RValue<Float4> x,RValue<Float4> y)4150 RValue<Int4> CmpNLT(RValue<Float4> x, RValue<Float4> y)
4151 {
4152 RR_DEBUG_INFO_UPDATE_LOC();
4153 return RValue<Int4>(Nucleus::createFCmpOGE(x.value(), y.value()));
4154 }
4155
CmpNLE(RValue<Float4> x,RValue<Float4> y)4156 RValue<Int4> CmpNLE(RValue<Float4> x, RValue<Float4> y)
4157 {
4158 RR_DEBUG_INFO_UPDATE_LOC();
4159 return RValue<Int4>(Nucleus::createFCmpOGT(x.value(), y.value()));
4160 }
4161
CmpUEQ(RValue<Float4> x,RValue<Float4> y)4162 RValue<Int4> CmpUEQ(RValue<Float4> x, RValue<Float4> y)
4163 {
4164 RR_DEBUG_INFO_UPDATE_LOC();
4165 return RValue<Int4>(Nucleus::createFCmpUEQ(x.value(), y.value()));
4166 }
4167
CmpULT(RValue<Float4> x,RValue<Float4> y)4168 RValue<Int4> CmpULT(RValue<Float4> x, RValue<Float4> y)
4169 {
4170 RR_DEBUG_INFO_UPDATE_LOC();
4171 return RValue<Int4>(Nucleus::createFCmpULT(x.value(), y.value()));
4172 }
4173
CmpULE(RValue<Float4> x,RValue<Float4> y)4174 RValue<Int4> CmpULE(RValue<Float4> x, RValue<Float4> y)
4175 {
4176 RR_DEBUG_INFO_UPDATE_LOC();
4177 return RValue<Int4>(Nucleus::createFCmpULE(x.value(), y.value()));
4178 }
4179
CmpUNEQ(RValue<Float4> x,RValue<Float4> y)4180 RValue<Int4> CmpUNEQ(RValue<Float4> x, RValue<Float4> y)
4181 {
4182 RR_DEBUG_INFO_UPDATE_LOC();
4183 return RValue<Int4>(Nucleus::createFCmpUNE(x.value(), y.value()));
4184 }
4185
CmpUNLT(RValue<Float4> x,RValue<Float4> y)4186 RValue<Int4> CmpUNLT(RValue<Float4> x, RValue<Float4> y)
4187 {
4188 RR_DEBUG_INFO_UPDATE_LOC();
4189 return RValue<Int4>(Nucleus::createFCmpUGE(x.value(), y.value()));
4190 }
4191
CmpUNLE(RValue<Float4> x,RValue<Float4> y)4192 RValue<Int4> CmpUNLE(RValue<Float4> x, RValue<Float4> y)
4193 {
4194 RR_DEBUG_INFO_UPDATE_LOC();
4195 return RValue<Int4>(Nucleus::createFCmpUGT(x.value(), y.value()));
4196 }
4197
Round(RValue<Float4> x)4198 RValue<Float4> Round(RValue<Float4> x)
4199 {
4200 RR_DEBUG_INFO_UPDATE_LOC();
4201 if(emulateIntrinsics || CPUID::ARM)
4202 {
4203 // Push the fractional part off the mantissa. Accurate up to +/-2^22.
4204 return (x + Float4(0x00C00000)) - Float4(0x00C00000);
4205 }
4206 else if(CPUID::SSE4_1)
4207 {
4208 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4209 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Round, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4210 auto round = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
4211 round->addArg(x.value());
4212 round->addArg(::context->getConstantInt32(0));
4213 ::basicBlock->appendInst(round);
4214
4215 return RValue<Float4>(V(result));
4216 }
4217 else
4218 {
4219 return Float4(RoundInt(x));
4220 }
4221 }
4222
Trunc(RValue<Float4> x)4223 RValue<Float4> Trunc(RValue<Float4> x)
4224 {
4225 RR_DEBUG_INFO_UPDATE_LOC();
4226 if(CPUID::SSE4_1)
4227 {
4228 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4229 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Round, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4230 auto round = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
4231 round->addArg(x.value());
4232 round->addArg(::context->getConstantInt32(3));
4233 ::basicBlock->appendInst(round);
4234
4235 return RValue<Float4>(V(result));
4236 }
4237 else
4238 {
4239 return Float4(Int4(x));
4240 }
4241 }
4242
Frac(RValue<Float4> x)4243 RValue<Float4> Frac(RValue<Float4> x)
4244 {
4245 RR_DEBUG_INFO_UPDATE_LOC();
4246 Float4 frc;
4247
4248 if(CPUID::SSE4_1)
4249 {
4250 frc = x - Floor(x);
4251 }
4252 else
4253 {
4254 frc = x - Float4(Int4(x)); // Signed fractional part.
4255
4256 frc += As<Float4>(As<Int4>(CmpNLE(Float4(0.0f), frc)) & As<Int4>(Float4(1, 1, 1, 1))); // Add 1.0 if negative.
4257 }
4258
4259 // x - floor(x) can be 1.0 for very small negative x.
4260 // Clamp against the value just below 1.0.
4261 return Min(frc, As<Float4>(Int4(0x3F7FFFFF)));
4262 }
4263
Floor(RValue<Float4> x)4264 RValue<Float4> Floor(RValue<Float4> x)
4265 {
4266 RR_DEBUG_INFO_UPDATE_LOC();
4267 if(CPUID::SSE4_1)
4268 {
4269 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4270 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Round, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4271 auto round = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
4272 round->addArg(x.value());
4273 round->addArg(::context->getConstantInt32(1));
4274 ::basicBlock->appendInst(round);
4275
4276 return RValue<Float4>(V(result));
4277 }
4278 else
4279 {
4280 return x - Frac(x);
4281 }
4282 }
4283
Ceil(RValue<Float4> x)4284 RValue<Float4> Ceil(RValue<Float4> x)
4285 {
4286 RR_DEBUG_INFO_UPDATE_LOC();
4287 if(CPUID::SSE4_1)
4288 {
4289 Ice::Variable *result = ::function->makeVariable(Ice::IceType_v4f32);
4290 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Round, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4291 auto round = Ice::InstIntrinsic::create(::function, 2, result, intrinsic);
4292 round->addArg(x.value());
4293 round->addArg(::context->getConstantInt32(2));
4294 ::basicBlock->appendInst(round);
4295
4296 return RValue<Float4>(V(result));
4297 }
4298 else
4299 {
4300 return -Floor(-x);
4301 }
4302 }
4303
type()4304 Type *Float4::type()
4305 {
4306 return T(Ice::IceType_v4f32);
4307 }
4308
Ticks()4309 RValue<Long> Ticks()
4310 {
4311 RR_DEBUG_INFO_UPDATE_LOC();
4312 UNIMPLEMENTED_NO_BUG("RValue<Long> Ticks()");
4313 return Long(Int(0));
4314 }
4315
ConstantPointer(void const * ptr)4316 RValue<Pointer<Byte>> ConstantPointer(void const *ptr)
4317 {
4318 RR_DEBUG_INFO_UPDATE_LOC();
4319 return RValue<Pointer<Byte>>{ V(sz::getConstantPointer(::context, ptr)) };
4320 }
4321
ConstantData(void const * data,size_t size)4322 RValue<Pointer<Byte>> ConstantData(void const *data, size_t size)
4323 {
4324 RR_DEBUG_INFO_UPDATE_LOC();
4325 return RValue<Pointer<Byte>>{ V(IceConstantData(data, size)) };
4326 }
4327
Call(RValue<Pointer<Byte>> fptr,Type * retTy,std::initializer_list<Value * > args,std::initializer_list<Type * > argTys)4328 Value *Call(RValue<Pointer<Byte>> fptr, Type *retTy, std::initializer_list<Value *> args, std::initializer_list<Type *> argTys)
4329 {
4330 RR_DEBUG_INFO_UPDATE_LOC();
4331 return V(sz::Call(::function, ::basicBlock, T(retTy), V(fptr.value()), V(args), false));
4332 }
4333
Breakpoint()4334 void Breakpoint()
4335 {
4336 RR_DEBUG_INFO_UPDATE_LOC();
4337 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Trap, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4338 auto trap = Ice::InstIntrinsic::create(::function, 0, nullptr, intrinsic);
4339 ::basicBlock->appendInst(trap);
4340 }
4341
createFence(std::memory_order memoryOrder)4342 void Nucleus::createFence(std::memory_order memoryOrder)
4343 {
4344 RR_DEBUG_INFO_UPDATE_LOC();
4345 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::AtomicFence, Ice::Intrinsics::SideEffects_T, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4346 auto inst = Ice::InstIntrinsic::create(::function, 0, nullptr, intrinsic);
4347 auto order = ::context->getConstantInt32(stdToIceMemoryOrder(memoryOrder));
4348 inst->addArg(order);
4349 ::basicBlock->appendInst(inst);
4350 }
4351
createMaskedLoad(Value * ptr,Type * elTy,Value * mask,unsigned int alignment,bool zeroMaskedLanes)4352 Value *Nucleus::createMaskedLoad(Value *ptr, Type *elTy, Value *mask, unsigned int alignment, bool zeroMaskedLanes)
4353 {
4354 RR_DEBUG_INFO_UPDATE_LOC();
4355 UNIMPLEMENTED("b/155867273 Subzero createMaskedLoad()");
4356 return nullptr;
4357 }
4358
createMaskedStore(Value * ptr,Value * val,Value * mask,unsigned int alignment)4359 void Nucleus::createMaskedStore(Value *ptr, Value *val, Value *mask, unsigned int alignment)
4360 {
4361 RR_DEBUG_INFO_UPDATE_LOC();
4362 UNIMPLEMENTED("b/155867273 Subzero createMaskedStore()");
4363 }
4364
4365 template<typename T>
4366 struct UnderlyingType
4367 {
4368 using Type = typename decltype(rr::Extract(std::declval<RValue<T>>(), 0))::rvalue_underlying_type;
4369 };
4370
4371 template<typename T>
4372 using UnderlyingTypeT = typename UnderlyingType<T>::Type;
4373
4374 template<typename T, typename EL = UnderlyingTypeT<T>>
gather(T & out,RValue<Pointer<EL>> base,RValue<Int4> offsets,RValue<Int4> mask,unsigned int alignment,bool zeroMaskedLanes)4375 static void gather(T &out, RValue<Pointer<EL>> base, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment, bool zeroMaskedLanes)
4376 {
4377 constexpr bool atomic = false;
4378 constexpr std::memory_order order = std::memory_order_relaxed;
4379
4380 Pointer<Byte> baseBytePtr = base;
4381
4382 out = T(0);
4383 for(int i = 0; i < 4; i++)
4384 {
4385 If(Extract(mask, i) != 0)
4386 {
4387 auto offset = Extract(offsets, i);
4388 auto el = Load(Pointer<EL>(&baseBytePtr[offset]), alignment, atomic, order);
4389 out = Insert(out, el, i);
4390 }
4391 Else If(zeroMaskedLanes)
4392 {
4393 out = Insert(out, EL(0), i);
4394 }
4395 }
4396 }
4397
4398 template<typename T, typename EL = UnderlyingTypeT<T>>
scatter(RValue<Pointer<EL>> base,RValue<T> val,RValue<Int4> offsets,RValue<Int4> mask,unsigned int alignment)4399 static void scatter(RValue<Pointer<EL>> base, RValue<T> val, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment)
4400 {
4401 constexpr bool atomic = false;
4402 constexpr std::memory_order order = std::memory_order_relaxed;
4403
4404 Pointer<Byte> baseBytePtr = base;
4405
4406 for(int i = 0; i < 4; i++)
4407 {
4408 If(Extract(mask, i) != 0)
4409 {
4410 auto offset = Extract(offsets, i);
4411 Store(Extract(val, i), Pointer<EL>(&baseBytePtr[offset]), alignment, atomic, order);
4412 }
4413 }
4414 }
4415
Gather(RValue<Pointer<Float>> base,RValue<Int4> offsets,RValue<Int4> mask,unsigned int alignment,bool zeroMaskedLanes)4416 RValue<Float4> Gather(RValue<Pointer<Float>> base, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment, bool zeroMaskedLanes /* = false */)
4417 {
4418 RR_DEBUG_INFO_UPDATE_LOC();
4419 Float4 result{};
4420 gather(result, base, offsets, mask, alignment, zeroMaskedLanes);
4421 return result;
4422 }
4423
Gather(RValue<Pointer<Int>> base,RValue<Int4> offsets,RValue<Int4> mask,unsigned int alignment,bool zeroMaskedLanes)4424 RValue<Int4> Gather(RValue<Pointer<Int>> base, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment, bool zeroMaskedLanes /* = false */)
4425 {
4426 RR_DEBUG_INFO_UPDATE_LOC();
4427 Int4 result{};
4428 gather(result, base, offsets, mask, alignment, zeroMaskedLanes);
4429 return result;
4430 }
4431
Scatter(RValue<Pointer<Float>> base,RValue<Float4> val,RValue<Int4> offsets,RValue<Int4> mask,unsigned int alignment)4432 void Scatter(RValue<Pointer<Float>> base, RValue<Float4> val, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment)
4433 {
4434 RR_DEBUG_INFO_UPDATE_LOC();
4435 scatter(base, val, offsets, mask, alignment);
4436 }
4437
Scatter(RValue<Pointer<Int>> base,RValue<Int4> val,RValue<Int4> offsets,RValue<Int4> mask,unsigned int alignment)4438 void Scatter(RValue<Pointer<Int>> base, RValue<Int4> val, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment)
4439 {
4440 RR_DEBUG_INFO_UPDATE_LOC();
4441 scatter<Int4>(base, val, offsets, mask, alignment);
4442 }
4443
Exp2(RValue<Float> x)4444 RValue<Float> Exp2(RValue<Float> x)
4445 {
4446 RR_DEBUG_INFO_UPDATE_LOC();
4447 return Call(exp2f, x);
4448 }
4449
Log2(RValue<Float> x)4450 RValue<Float> Log2(RValue<Float> x)
4451 {
4452 RR_DEBUG_INFO_UPDATE_LOC();
4453 return Call(log2f, x);
4454 }
4455
Sin(RValue<Float4> x)4456 RValue<Float4> Sin(RValue<Float4> x)
4457 {
4458 RR_DEBUG_INFO_UPDATE_LOC();
4459 return call4(sinf, x);
4460 }
4461
Cos(RValue<Float4> x)4462 RValue<Float4> Cos(RValue<Float4> x)
4463 {
4464 RR_DEBUG_INFO_UPDATE_LOC();
4465 return call4(cosf, x);
4466 }
4467
Tan(RValue<Float4> x)4468 RValue<Float4> Tan(RValue<Float4> x)
4469 {
4470 RR_DEBUG_INFO_UPDATE_LOC();
4471 return call4(tanf, x);
4472 }
4473
Asin(RValue<Float4> x)4474 RValue<Float4> Asin(RValue<Float4> x)
4475 {
4476 RR_DEBUG_INFO_UPDATE_LOC();
4477 return call4(asinf, x);
4478 }
4479
Acos(RValue<Float4> x)4480 RValue<Float4> Acos(RValue<Float4> x)
4481 {
4482 RR_DEBUG_INFO_UPDATE_LOC();
4483 return call4(acosf, x);
4484 }
4485
Atan(RValue<Float4> x)4486 RValue<Float4> Atan(RValue<Float4> x)
4487 {
4488 RR_DEBUG_INFO_UPDATE_LOC();
4489 return call4(atanf, x);
4490 }
4491
Sinh(RValue<Float4> x)4492 RValue<Float4> Sinh(RValue<Float4> x)
4493 {
4494 RR_DEBUG_INFO_UPDATE_LOC();
4495 return call4(sinhf, x);
4496 }
4497
Cosh(RValue<Float4> x)4498 RValue<Float4> Cosh(RValue<Float4> x)
4499 {
4500 RR_DEBUG_INFO_UPDATE_LOC();
4501 return call4(coshf, x);
4502 }
4503
Tanh(RValue<Float4> x)4504 RValue<Float4> Tanh(RValue<Float4> x)
4505 {
4506 RR_DEBUG_INFO_UPDATE_LOC();
4507 return call4(tanhf, x);
4508 }
4509
Asinh(RValue<Float4> x)4510 RValue<Float4> Asinh(RValue<Float4> x)
4511 {
4512 RR_DEBUG_INFO_UPDATE_LOC();
4513 return call4(asinhf, x);
4514 }
4515
Acosh(RValue<Float4> x)4516 RValue<Float4> Acosh(RValue<Float4> x)
4517 {
4518 RR_DEBUG_INFO_UPDATE_LOC();
4519 return call4(acoshf, x);
4520 }
4521
Atanh(RValue<Float4> x)4522 RValue<Float4> Atanh(RValue<Float4> x)
4523 {
4524 RR_DEBUG_INFO_UPDATE_LOC();
4525 return call4(atanhf, x);
4526 }
4527
Atan2(RValue<Float4> x,RValue<Float4> y)4528 RValue<Float4> Atan2(RValue<Float4> x, RValue<Float4> y)
4529 {
4530 RR_DEBUG_INFO_UPDATE_LOC();
4531 return call4(atan2f, x, y);
4532 }
4533
Pow(RValue<Float4> x,RValue<Float4> y)4534 RValue<Float4> Pow(RValue<Float4> x, RValue<Float4> y)
4535 {
4536 RR_DEBUG_INFO_UPDATE_LOC();
4537 return call4(powf, x, y);
4538 }
4539
Exp(RValue<Float4> x)4540 RValue<Float4> Exp(RValue<Float4> x)
4541 {
4542 RR_DEBUG_INFO_UPDATE_LOC();
4543 return call4(expf, x);
4544 }
4545
Log(RValue<Float4> x)4546 RValue<Float4> Log(RValue<Float4> x)
4547 {
4548 RR_DEBUG_INFO_UPDATE_LOC();
4549 return call4(logf, x);
4550 }
4551
Exp2(RValue<Float4> x)4552 RValue<Float4> Exp2(RValue<Float4> x)
4553 {
4554 RR_DEBUG_INFO_UPDATE_LOC();
4555 return call4(exp2f, x);
4556 }
4557
Log2(RValue<Float4> x)4558 RValue<Float4> Log2(RValue<Float4> x)
4559 {
4560 RR_DEBUG_INFO_UPDATE_LOC();
4561 return call4(log2f, x);
4562 }
4563
Ctlz(RValue<UInt> x,bool isZeroUndef)4564 RValue<UInt> Ctlz(RValue<UInt> x, bool isZeroUndef)
4565 {
4566 RR_DEBUG_INFO_UPDATE_LOC();
4567 if(emulateIntrinsics)
4568 {
4569 UNIMPLEMENTED_NO_BUG("Subzero Ctlz()");
4570 return UInt(0);
4571 }
4572 else
4573 {
4574 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
4575 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Ctlz, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4576 auto ctlz = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
4577 ctlz->addArg(x.value());
4578 ::basicBlock->appendInst(ctlz);
4579
4580 return RValue<UInt>(V(result));
4581 }
4582 }
4583
Ctlz(RValue<UInt4> x,bool isZeroUndef)4584 RValue<UInt4> Ctlz(RValue<UInt4> x, bool isZeroUndef)
4585 {
4586 RR_DEBUG_INFO_UPDATE_LOC();
4587 if(emulateIntrinsics)
4588 {
4589 UNIMPLEMENTED_NO_BUG("Subzero Ctlz()");
4590 return UInt4(0);
4591 }
4592 else
4593 {
4594 // TODO: implement vectorized version in Subzero
4595 UInt4 result;
4596 result = Insert(result, Ctlz(Extract(x, 0), isZeroUndef), 0);
4597 result = Insert(result, Ctlz(Extract(x, 1), isZeroUndef), 1);
4598 result = Insert(result, Ctlz(Extract(x, 2), isZeroUndef), 2);
4599 result = Insert(result, Ctlz(Extract(x, 3), isZeroUndef), 3);
4600 return result;
4601 }
4602 }
4603
Cttz(RValue<UInt> x,bool isZeroUndef)4604 RValue<UInt> Cttz(RValue<UInt> x, bool isZeroUndef)
4605 {
4606 RR_DEBUG_INFO_UPDATE_LOC();
4607 if(emulateIntrinsics)
4608 {
4609 UNIMPLEMENTED_NO_BUG("Subzero Cttz()");
4610 return UInt(0);
4611 }
4612 else
4613 {
4614 Ice::Variable *result = ::function->makeVariable(Ice::IceType_i32);
4615 const Ice::Intrinsics::IntrinsicInfo intrinsic = { Ice::Intrinsics::Cttz, Ice::Intrinsics::SideEffects_F, Ice::Intrinsics::ReturnsTwice_F, Ice::Intrinsics::MemoryWrite_F };
4616 auto ctlz = Ice::InstIntrinsic::create(::function, 1, result, intrinsic);
4617 ctlz->addArg(x.value());
4618 ::basicBlock->appendInst(ctlz);
4619
4620 return RValue<UInt>(V(result));
4621 }
4622 }
4623
Cttz(RValue<UInt4> x,bool isZeroUndef)4624 RValue<UInt4> Cttz(RValue<UInt4> x, bool isZeroUndef)
4625 {
4626 RR_DEBUG_INFO_UPDATE_LOC();
4627 if(emulateIntrinsics)
4628 {
4629 UNIMPLEMENTED_NO_BUG("Subzero Cttz()");
4630 return UInt4(0);
4631 }
4632 else
4633 {
4634 // TODO: implement vectorized version in Subzero
4635 UInt4 result;
4636 result = Insert(result, Cttz(Extract(x, 0), isZeroUndef), 0);
4637 result = Insert(result, Cttz(Extract(x, 1), isZeroUndef), 1);
4638 result = Insert(result, Cttz(Extract(x, 2), isZeroUndef), 2);
4639 result = Insert(result, Cttz(Extract(x, 3), isZeroUndef), 3);
4640 return result;
4641 }
4642 }
4643
4644 // TODO(b/148276653): Both atomicMin and atomicMax use a static (global) mutex that makes all min
4645 // operations for a given T mutually exclusive, rather than only the ones on the value pointed to
4646 // by ptr. Use a CAS loop, as is done for LLVMReactor's min/max atomic for Android.
4647 // TODO(b/148207274): Or, move this down into Subzero as a CAS-based operation.
4648 template<typename T>
atomicMin(T * ptr,T value)4649 static T atomicMin(T *ptr, T value)
4650 {
4651 static std::mutex m;
4652
4653 std::lock_guard<std::mutex> lock(m);
4654 T origValue = *ptr;
4655 *ptr = std::min(origValue, value);
4656 return origValue;
4657 }
4658
4659 template<typename T>
atomicMax(T * ptr,T value)4660 static T atomicMax(T *ptr, T value)
4661 {
4662 static std::mutex m;
4663
4664 std::lock_guard<std::mutex> lock(m);
4665 T origValue = *ptr;
4666 *ptr = std::max(origValue, value);
4667 return origValue;
4668 }
4669
MinAtomic(RValue<Pointer<Int>> x,RValue<Int> y,std::memory_order memoryOrder)4670 RValue<Int> MinAtomic(RValue<Pointer<Int>> x, RValue<Int> y, std::memory_order memoryOrder)
4671 {
4672 RR_DEBUG_INFO_UPDATE_LOC();
4673 return Call(atomicMin<int32_t>, x, y);
4674 }
4675
MinAtomic(RValue<Pointer<UInt>> x,RValue<UInt> y,std::memory_order memoryOrder)4676 RValue<UInt> MinAtomic(RValue<Pointer<UInt>> x, RValue<UInt> y, std::memory_order memoryOrder)
4677 {
4678 RR_DEBUG_INFO_UPDATE_LOC();
4679 return Call(atomicMin<uint32_t>, x, y);
4680 }
4681
MaxAtomic(RValue<Pointer<Int>> x,RValue<Int> y,std::memory_order memoryOrder)4682 RValue<Int> MaxAtomic(RValue<Pointer<Int>> x, RValue<Int> y, std::memory_order memoryOrder)
4683 {
4684 RR_DEBUG_INFO_UPDATE_LOC();
4685 return Call(atomicMax<int32_t>, x, y);
4686 }
4687
MaxAtomic(RValue<Pointer<UInt>> x,RValue<UInt> y,std::memory_order memoryOrder)4688 RValue<UInt> MaxAtomic(RValue<Pointer<UInt>> x, RValue<UInt> y, std::memory_order memoryOrder)
4689 {
4690 RR_DEBUG_INFO_UPDATE_LOC();
4691 return Call(atomicMax<uint32_t>, x, y);
4692 }
4693
EmitDebugLocation()4694 void EmitDebugLocation()
4695 {
4696 #ifdef ENABLE_RR_DEBUG_INFO
4697 emitPrintLocation(getCallerBacktrace());
4698 #endif // ENABLE_RR_DEBUG_INFO
4699 }
EmitDebugVariable(Value * value)4700 void EmitDebugVariable(Value *value) {}
FlushDebug()4701 void FlushDebug() {}
4702
4703 namespace {
4704 namespace coro {
4705
4706 // Instance data per generated coroutine
4707 // This is the "handle" type used for Coroutine functions
4708 // Lifetime: from yield to when CoroutineEntryDestroy generated function is called.
4709 struct CoroutineData
4710 {
4711 bool useInternalScheduler = false;
4712 bool done = false; // the coroutine should stop at the next yield()
4713 bool terminated = false; // the coroutine has finished.
4714 bool inRoutine = false; // is the coroutine currently executing?
4715 marl::Scheduler::Fiber *mainFiber = nullptr;
4716 marl::Scheduler::Fiber *routineFiber = nullptr;
4717 void *promisePtr = nullptr;
4718 };
4719
createCoroutineData()4720 CoroutineData *createCoroutineData()
4721 {
4722 return new CoroutineData{};
4723 }
4724
destroyCoroutineData(CoroutineData * coroData)4725 void destroyCoroutineData(CoroutineData *coroData)
4726 {
4727 delete coroData;
4728 }
4729
4730 // suspend() pauses execution of the coroutine, and resumes execution from the
4731 // caller's call to await().
4732 // Returns true if await() is called again, or false if coroutine_destroy()
4733 // is called.
suspend(Nucleus::CoroutineHandle handle)4734 bool suspend(Nucleus::CoroutineHandle handle)
4735 {
4736 auto *coroData = reinterpret_cast<CoroutineData *>(handle);
4737 ASSERT(marl::Scheduler::Fiber::current() == coroData->routineFiber);
4738 ASSERT(coroData->inRoutine);
4739 coroData->inRoutine = false;
4740 coroData->mainFiber->notify();
4741 while(!coroData->inRoutine)
4742 {
4743 coroData->routineFiber->wait();
4744 }
4745 return !coroData->done;
4746 }
4747
4748 // resume() is called by await(), blocking until the coroutine calls yield()
4749 // or the coroutine terminates.
resume(Nucleus::CoroutineHandle handle)4750 void resume(Nucleus::CoroutineHandle handle)
4751 {
4752 auto *coroData = reinterpret_cast<CoroutineData *>(handle);
4753 ASSERT(marl::Scheduler::Fiber::current() == coroData->mainFiber);
4754 ASSERT(!coroData->inRoutine);
4755 coroData->inRoutine = true;
4756 coroData->routineFiber->notify();
4757 while(coroData->inRoutine)
4758 {
4759 coroData->mainFiber->wait();
4760 }
4761 }
4762
4763 // stop() is called by coroutine_destroy(), signalling that it's done, then blocks
4764 // until the coroutine ends, and deletes the coroutine data.
stop(Nucleus::CoroutineHandle handle)4765 void stop(Nucleus::CoroutineHandle handle)
4766 {
4767 auto *coroData = reinterpret_cast<CoroutineData *>(handle);
4768 ASSERT(marl::Scheduler::Fiber::current() == coroData->mainFiber);
4769 ASSERT(!coroData->inRoutine);
4770 if(!coroData->terminated)
4771 {
4772 coroData->done = true;
4773 coroData->inRoutine = true;
4774 coroData->routineFiber->notify();
4775 while(!coroData->terminated)
4776 {
4777 coroData->mainFiber->wait();
4778 }
4779 }
4780 if(coroData->useInternalScheduler)
4781 {
4782 ::getOrCreateScheduler().unbind();
4783 }
4784 coro::destroyCoroutineData(coroData); // free the coroutine data.
4785 }
4786
4787 namespace detail {
4788 thread_local rr::Nucleus::CoroutineHandle coroHandle{};
4789 } // namespace detail
4790
setHandleParam(Nucleus::CoroutineHandle handle)4791 void setHandleParam(Nucleus::CoroutineHandle handle)
4792 {
4793 ASSERT(!detail::coroHandle);
4794 detail::coroHandle = handle;
4795 }
4796
getHandleParam()4797 Nucleus::CoroutineHandle getHandleParam()
4798 {
4799 ASSERT(detail::coroHandle);
4800 auto handle = detail::coroHandle;
4801 detail::coroHandle = {};
4802 return handle;
4803 }
4804
isDone(Nucleus::CoroutineHandle handle)4805 bool isDone(Nucleus::CoroutineHandle handle)
4806 {
4807 auto *coroData = reinterpret_cast<CoroutineData *>(handle);
4808 return coroData->done;
4809 }
4810
setPromisePtr(Nucleus::CoroutineHandle handle,void * promisePtr)4811 void setPromisePtr(Nucleus::CoroutineHandle handle, void *promisePtr)
4812 {
4813 auto *coroData = reinterpret_cast<CoroutineData *>(handle);
4814 coroData->promisePtr = promisePtr;
4815 }
4816
getPromisePtr(Nucleus::CoroutineHandle handle)4817 void *getPromisePtr(Nucleus::CoroutineHandle handle)
4818 {
4819 auto *coroData = reinterpret_cast<CoroutineData *>(handle);
4820 return coroData->promisePtr;
4821 }
4822
4823 } // namespace coro
4824 } // namespace
4825
4826 // Used to generate coroutines.
4827 // Lifetime: from yield to acquireCoroutine
4828 class CoroutineGenerator
4829 {
4830 public:
CoroutineGenerator()4831 CoroutineGenerator()
4832 {
4833 }
4834
4835 // Inserts instructions at the top of the current function to make it a coroutine.
generateCoroutineBegin()4836 void generateCoroutineBegin()
4837 {
4838 // Begin building the main coroutine_begin() function.
4839 // We insert these instructions at the top of the entry node,
4840 // before existing reactor-generated instructions.
4841
4842 // CoroutineHandle coroutine_begin(<Arguments>)
4843 // {
4844 // this->handle = coro::getHandleParam();
4845 //
4846 // YieldType promise;
4847 // coro::setPromisePtr(handle, &promise); // For await
4848 //
4849 // ... <REACTOR CODE> ...
4850 //
4851
4852 // this->handle = coro::getHandleParam();
4853 this->handle = sz::Call(::function, ::entryBlock, coro::getHandleParam);
4854
4855 // YieldType promise;
4856 // coro::setPromisePtr(handle, &promise); // For await
4857 this->promise = sz::allocateStackVariable(::function, T(::coroYieldType));
4858 sz::Call(::function, ::entryBlock, coro::setPromisePtr, this->handle, this->promise);
4859 }
4860
4861 // Adds instructions for Yield() calls at the current location of the main coroutine function.
generateYield(Value * val)4862 void generateYield(Value *val)
4863 {
4864 // ... <REACTOR CODE> ...
4865 //
4866 // promise = val;
4867 // if (!coro::suspend(handle)) {
4868 // return false; // coroutine has been stopped by the caller.
4869 // }
4870 //
4871 // ... <REACTOR CODE> ...
4872
4873 // promise = val;
4874 Nucleus::createStore(val, V(this->promise), ::coroYieldType);
4875
4876 // if (!coro::suspend(handle)) {
4877 auto result = sz::Call(::function, ::basicBlock, coro::suspend, this->handle);
4878 auto doneBlock = Nucleus::createBasicBlock();
4879 auto resumeBlock = Nucleus::createBasicBlock();
4880 Nucleus::createCondBr(V(result), resumeBlock, doneBlock);
4881
4882 // return false; // coroutine has been stopped by the caller.
4883 ::basicBlock = doneBlock;
4884 Nucleus::createRetVoid(); // coroutine return value is ignored.
4885
4886 // ... <REACTOR CODE> ...
4887 ::basicBlock = resumeBlock;
4888 }
4889
4890 using FunctionUniquePtr = std::unique_ptr<Ice::Cfg>;
4891
4892 // Generates the await function for the current coroutine.
4893 // Cannot use Nucleus functions that modify ::function and ::basicBlock.
generateAwaitFunction()4894 static FunctionUniquePtr generateAwaitFunction()
4895 {
4896 // bool coroutine_await(CoroutineHandle handle, YieldType* out)
4897 // {
4898 // if (coro::isDone())
4899 // {
4900 // return false;
4901 // }
4902 // else // resume
4903 // {
4904 // YieldType* promise = coro::getPromisePtr(handle);
4905 // *out = *promise;
4906 // coro::resume(handle);
4907 // return true;
4908 // }
4909 // }
4910
4911 // Subzero doesn't support bool types (IceType_i1) as return type
4912 const Ice::Type ReturnType = Ice::IceType_i32;
4913 const Ice::Type YieldPtrType = sz::getPointerType(T(::coroYieldType));
4914 const Ice::Type HandleType = sz::getPointerType(Ice::IceType_void);
4915
4916 Ice::Cfg *awaitFunc = sz::createFunction(::context, ReturnType, std::vector<Ice::Type>{ HandleType, YieldPtrType });
4917 Ice::CfgLocalAllocatorScope scopedAlloc{ awaitFunc };
4918
4919 Ice::Variable *handle = awaitFunc->getArgs()[0];
4920 Ice::Variable *outPtr = awaitFunc->getArgs()[1];
4921
4922 auto doneBlock = awaitFunc->makeNode();
4923 {
4924 // return false;
4925 Ice::InstRet *ret = Ice::InstRet::create(awaitFunc, ::context->getConstantInt32(0));
4926 doneBlock->appendInst(ret);
4927 }
4928
4929 auto resumeBlock = awaitFunc->makeNode();
4930 {
4931 // YieldType* promise = coro::getPromisePtr(handle);
4932 Ice::Variable *promise = sz::Call(awaitFunc, resumeBlock, coro::getPromisePtr, handle);
4933
4934 // *out = *promise;
4935 // Load promise value
4936 Ice::Variable *promiseVal = awaitFunc->makeVariable(T(::coroYieldType));
4937 auto load = Ice::InstLoad::create(awaitFunc, promiseVal, promise);
4938 resumeBlock->appendInst(load);
4939 // Then store it in output param
4940 auto store = Ice::InstStore::create(awaitFunc, promiseVal, outPtr);
4941 resumeBlock->appendInst(store);
4942
4943 // coro::resume(handle);
4944 sz::Call(awaitFunc, resumeBlock, coro::resume, handle);
4945
4946 // return true;
4947 Ice::InstRet *ret = Ice::InstRet::create(awaitFunc, ::context->getConstantInt32(1));
4948 resumeBlock->appendInst(ret);
4949 }
4950
4951 // if (coro::isDone())
4952 // {
4953 // <doneBlock>
4954 // }
4955 // else // resume
4956 // {
4957 // <resumeBlock>
4958 // }
4959 Ice::CfgNode *bb = awaitFunc->getEntryNode();
4960 Ice::Variable *done = sz::Call(awaitFunc, bb, coro::isDone, handle);
4961 auto br = Ice::InstBr::create(awaitFunc, done, doneBlock, resumeBlock);
4962 bb->appendInst(br);
4963
4964 return FunctionUniquePtr{ awaitFunc };
4965 }
4966
4967 // Generates the destroy function for the current coroutine.
4968 // Cannot use Nucleus functions that modify ::function and ::basicBlock.
generateDestroyFunction()4969 static FunctionUniquePtr generateDestroyFunction()
4970 {
4971 // void coroutine_destroy(Nucleus::CoroutineHandle handle)
4972 // {
4973 // coro::stop(handle); // signal and wait for coroutine to stop, and delete coroutine data
4974 // return;
4975 // }
4976
4977 const Ice::Type ReturnType = Ice::IceType_void;
4978 const Ice::Type HandleType = sz::getPointerType(Ice::IceType_void);
4979
4980 Ice::Cfg *destroyFunc = sz::createFunction(::context, ReturnType, std::vector<Ice::Type>{ HandleType });
4981 Ice::CfgLocalAllocatorScope scopedAlloc{ destroyFunc };
4982
4983 Ice::Variable *handle = destroyFunc->getArgs()[0];
4984
4985 auto *bb = destroyFunc->getEntryNode();
4986
4987 // coro::stop(handle); // signal and wait for coroutine to stop, and delete coroutine data
4988 sz::Call(destroyFunc, bb, coro::stop, handle);
4989
4990 // return;
4991 Ice::InstRet *ret = Ice::InstRet::create(destroyFunc);
4992 bb->appendInst(ret);
4993
4994 return FunctionUniquePtr{ destroyFunc };
4995 }
4996
4997 private:
4998 Ice::Variable *handle{};
4999 Ice::Variable *promise{};
5000 };
5001
invokeCoroutineBegin(std::function<Nucleus::CoroutineHandle ()> beginFunc)5002 static Nucleus::CoroutineHandle invokeCoroutineBegin(std::function<Nucleus::CoroutineHandle()> beginFunc)
5003 {
5004 // This doubles up as our coroutine handle
5005 auto coroData = coro::createCoroutineData();
5006
5007 coroData->useInternalScheduler = (marl::Scheduler::get() == nullptr);
5008 if(coroData->useInternalScheduler)
5009 {
5010 ::getOrCreateScheduler().bind();
5011 }
5012
5013 auto run = [=] {
5014 // Store handle in TLS so that the coroutine can grab it right away, before
5015 // any fiber switch occurs.
5016 coro::setHandleParam(coroData);
5017
5018 ASSERT(!coroData->routineFiber);
5019 coroData->routineFiber = marl::Scheduler::Fiber::current();
5020
5021 beginFunc();
5022
5023 ASSERT(coroData->inRoutine);
5024 coroData->done = true; // coroutine is done.
5025 coroData->terminated = true; // signal that the coroutine data is ready for freeing.
5026 coroData->inRoutine = false;
5027 coroData->mainFiber->notify();
5028 };
5029
5030 ASSERT(!coroData->mainFiber);
5031 coroData->mainFiber = marl::Scheduler::Fiber::current();
5032
5033 // block until the first yield or coroutine end
5034 ASSERT(!coroData->inRoutine);
5035 coroData->inRoutine = true;
5036 marl::schedule(marl::Task(run, marl::Task::Flags::SameThread));
5037 while(coroData->inRoutine)
5038 {
5039 coroData->mainFiber->wait();
5040 }
5041
5042 return coroData;
5043 }
5044
createCoroutine(Type * yieldType,const std::vector<Type * > & params)5045 void Nucleus::createCoroutine(Type *yieldType, const std::vector<Type *> ¶ms)
5046 {
5047 // Start by creating a regular function
5048 createFunction(yieldType, params);
5049
5050 // Save in case yield() is called
5051 ASSERT(::coroYieldType == nullptr); // Only one coroutine can be generated at once
5052 ::coroYieldType = yieldType;
5053 }
5054
yield(Value * val)5055 void Nucleus::yield(Value *val)
5056 {
5057 RR_DEBUG_INFO_UPDATE_LOC();
5058 Variable::materializeAll();
5059
5060 // On first yield, we start generating coroutine functions
5061 if(!::coroGen)
5062 {
5063 ::coroGen = std::make_shared<CoroutineGenerator>();
5064 ::coroGen->generateCoroutineBegin();
5065 }
5066
5067 ASSERT(::coroGen);
5068 ::coroGen->generateYield(val);
5069 }
5070
coroutineEntryAwaitStub(Nucleus::CoroutineHandle,void * yieldValue)5071 static bool coroutineEntryAwaitStub(Nucleus::CoroutineHandle, void *yieldValue)
5072 {
5073 return false;
5074 }
5075
coroutineEntryDestroyStub(Nucleus::CoroutineHandle handle)5076 static void coroutineEntryDestroyStub(Nucleus::CoroutineHandle handle)
5077 {
5078 }
5079
acquireCoroutine(const char * name,const Config::Edit * cfgEdit)5080 std::shared_ptr<Routine> Nucleus::acquireCoroutine(const char *name, const Config::Edit *cfgEdit /* = nullptr */)
5081 {
5082 if(::coroGen)
5083 {
5084 // Finish generating coroutine functions
5085 {
5086 Ice::CfgLocalAllocatorScope scopedAlloc{ ::function };
5087 finalizeFunction();
5088 }
5089
5090 auto awaitFunc = ::coroGen->generateAwaitFunction();
5091 auto destroyFunc = ::coroGen->generateDestroyFunction();
5092
5093 // At this point, we no longer need the CoroutineGenerator.
5094 ::coroGen.reset();
5095 ::coroYieldType = nullptr;
5096
5097 auto routine = rr::acquireRoutine({ ::function, awaitFunc.get(), destroyFunc.get() },
5098 { name, "await", "destroy" },
5099 cfgEdit);
5100
5101 return routine;
5102 }
5103 else
5104 {
5105 {
5106 Ice::CfgLocalAllocatorScope scopedAlloc{ ::function };
5107 finalizeFunction();
5108 }
5109
5110 ::coroYieldType = nullptr;
5111
5112 // Not an actual coroutine (no yields), so return stubs for await and destroy
5113 auto routine = rr::acquireRoutine({ ::function }, { name }, cfgEdit);
5114
5115 auto routineImpl = std::static_pointer_cast<ELFMemoryStreamer>(routine);
5116 routineImpl->setEntry(Nucleus::CoroutineEntryAwait, reinterpret_cast<const void *>(&coroutineEntryAwaitStub));
5117 routineImpl->setEntry(Nucleus::CoroutineEntryDestroy, reinterpret_cast<const void *>(&coroutineEntryDestroyStub));
5118 return routine;
5119 }
5120 }
5121
invokeCoroutineBegin(Routine & routine,std::function<Nucleus::CoroutineHandle ()> func)5122 Nucleus::CoroutineHandle Nucleus::invokeCoroutineBegin(Routine &routine, std::function<Nucleus::CoroutineHandle()> func)
5123 {
5124 const bool isCoroutine = routine.getEntry(Nucleus::CoroutineEntryAwait) != reinterpret_cast<const void *>(&coroutineEntryAwaitStub);
5125
5126 if(isCoroutine)
5127 {
5128 return rr::invokeCoroutineBegin(func);
5129 }
5130 else
5131 {
5132 // For regular routines, just invoke the begin func directly
5133 return func();
5134 }
5135 }
5136
5137 } // namespace rr
5138