/external/swiftshader/third_party/llvm-10.0/llvm/lib/MCA/HardwareUnits/ |
D | Scheduler.cpp | 40 Scheduler::Status Scheduler::isAvailable(const InstRef &IR) { in isAvailable() argument 42 Resources->canBeDispatched(IR.getInstruction()->getUsedBuffers()); in isAvailable() 55 LSUnit::Status LSS = LSU.isAvailable(IR); in isAvailable() 71 InstRef &IR, in issueInstructionImpl() argument 73 Instruction *IS = IR.getInstruction(); in issueInstructionImpl() 82 IS->execute(IR.getSourceIndex()); in issueInstructionImpl() 87 LSU.onInstructionIssued(IR); in issueInstructionImpl() 93 IssuedSet.emplace_back(IR); in issueInstructionImpl() 95 LSU.onInstructionExecuted(IR); in issueInstructionImpl() 100 InstRef &IR, in issueInstruction() argument [all …]
|
/external/llvm-project/llvm/lib/MCA/HardwareUnits/ |
D | Scheduler.cpp | 40 Scheduler::Status Scheduler::isAvailable(const InstRef &IR) { in isAvailable() argument 42 Resources->canBeDispatched(IR.getInstruction()->getUsedBuffers()); in isAvailable() 55 LSUnit::Status LSS = LSU.isAvailable(IR); in isAvailable() 71 InstRef &IR, in issueInstructionImpl() argument 73 Instruction *IS = IR.getInstruction(); in issueInstructionImpl() 82 IS->execute(IR.getSourceIndex()); in issueInstructionImpl() 87 LSU.onInstructionIssued(IR); in issueInstructionImpl() 93 IssuedSet.emplace_back(IR); in issueInstructionImpl() 95 LSU.onInstructionExecuted(IR); in issueInstructionImpl() 100 InstRef &IR, in issueInstruction() argument [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/MCA/Stages/ |
D | ExecuteStage.cpp | 43 bool ExecuteStage::isAvailable(const InstRef &IR) const { in isAvailable() 44 if (Scheduler::Status S = HWS.isAvailable(IR)) { in isAvailable() 46 notifyEvent<HWStallEvent>(HWStallEvent(ET, IR)); in isAvailable() 53 Error ExecuteStage::issueInstruction(InstRef &IR) { in issueInstruction() argument 58 HWS.issueInstruction(IR, Used, Pending, Ready); in issueInstruction() 59 Instruction &IS = *IR.getInstruction(); in issueInstruction() 62 notifyReservedOrReleasedBuffers(IR, /* Reserved */ false); in issueInstruction() 64 notifyInstructionIssued(IR, Used); in issueInstruction() 66 notifyInstructionExecuted(IR); in issueInstruction() 68 if (Error S = moveToTheNextStage(IR)) in issueInstruction() [all …]
|
D | DispatchStage.cpp | 38 void DispatchStage::notifyInstructionDispatched(const InstRef &IR, in notifyInstructionDispatched() argument 41 LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n'); in notifyInstructionDispatched() 43 HWInstructionDispatchedEvent(IR, UsedRegs, UOps)); in notifyInstructionDispatched() 46 bool DispatchStage::checkPRF(const InstRef &IR) const { in checkPRF() 48 for (const WriteState &RegDef : IR.getInstruction()->getDefs()) in checkPRF() 55 HWStallEvent(HWStallEvent::RegisterFileStall, IR)); in checkPRF() 62 bool DispatchStage::checkRCU(const InstRef &IR) const { in checkRCU() 63 const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps(); in checkRCU() 67 HWStallEvent(HWStallEvent::RetireControlUnitStall, IR)); in checkRCU() 71 bool DispatchStage::canDispatch(const InstRef &IR) const { in canDispatch() [all …]
|
/external/llvm-project/llvm/lib/MCA/Stages/ |
D | ExecuteStage.cpp | 43 bool ExecuteStage::isAvailable(const InstRef &IR) const { in isAvailable() 44 if (Scheduler::Status S = HWS.isAvailable(IR)) { in isAvailable() 46 notifyEvent<HWStallEvent>(HWStallEvent(ET, IR)); in isAvailable() 53 Error ExecuteStage::issueInstruction(InstRef &IR) { in issueInstruction() argument 58 HWS.issueInstruction(IR, Used, Pending, Ready); in issueInstruction() 59 Instruction &IS = *IR.getInstruction(); in issueInstruction() 62 notifyReservedOrReleasedBuffers(IR, /* Reserved */ false); in issueInstruction() 64 notifyInstructionIssued(IR, Used); in issueInstruction() 66 notifyInstructionExecuted(IR); in issueInstruction() 68 if (Error S = moveToTheNextStage(IR)) in issueInstruction() [all …]
|
D | DispatchStage.cpp | 38 void DispatchStage::notifyInstructionDispatched(const InstRef &IR, in notifyInstructionDispatched() argument 41 LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n'); in notifyInstructionDispatched() 43 HWInstructionDispatchedEvent(IR, UsedRegs, UOps)); in notifyInstructionDispatched() 46 bool DispatchStage::checkPRF(const InstRef &IR) const { in checkPRF() 48 for (const WriteState &RegDef : IR.getInstruction()->getDefs()) in checkPRF() 55 HWStallEvent(HWStallEvent::RegisterFileStall, IR)); in checkPRF() 62 bool DispatchStage::checkRCU(const InstRef &IR) const { in checkRCU() 63 const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps(); in checkRCU() 67 HWStallEvent(HWStallEvent::RetireControlUnitStall, IR)); in checkRCU() 71 bool DispatchStage::canDispatch(const InstRef &IR) const { in canDispatch() [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | srem64.ll | 3 … -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s 127 ; GCN-IR-LABEL: s_test_srem: 128 ; GCN-IR: ; %bb.0: ; %_udiv-special-cases 129 ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 130 ; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd 131 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) 132 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 133 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 134 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 135 ; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 [all …]
|
D | urem64.ll | 3 … -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s 127 ; GCN-IR-LABEL: s_test_urem_i64: 128 ; GCN-IR: ; %bb.0: ; %_udiv-special-cases 129 ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 130 ; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd 131 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) 132 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 133 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 134 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 135 ; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 [all …]
|
D | sdiv64.ll | 3 … -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s 144 ; GCN-IR-LABEL: s_test_sdiv: 145 ; GCN-IR: ; %bb.0: ; %_udiv-special-cases 146 ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 147 ; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd 148 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) 149 ; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 150 ; GCN-IR-NEXT: s_mov_b32 s3, s2 151 ; GCN-IR-NEXT: s_ashr_i32 s8, s1, 31 152 ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[2:3], s[6:7] [all …]
|
D | udiv64.ll | 3 … -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s 128 ; GCN-IR-LABEL: s_test_udiv_i64: 129 ; GCN-IR: ; %bb.0: ; %_udiv-special-cases 130 ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 131 ; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd 132 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) 133 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 134 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 135 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 136 ; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 [all …]
|
D | nested-loop-conditions.ll | 3 …t -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s 49 ; IR-LABEL: @reduced_nested_loop_conditions( 50 ; IR-NEXT: bb: 51 ; IR-NEXT: [[MY_TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() #4 52 ; IR-NEXT: [[MY_TMP1:%.*]] = getelementptr inbounds i64, i64 addrspace(3)* [[ARG:%.*]], i32 [[MY… 53 ; IR-NEXT: [[MY_TMP2:%.*]] = load volatile i64, i64 addrspace(3)* [[MY_TMP1]] 54 ; IR-NEXT: br label [[BB5:%.*]] 55 ; IR: bb3: 56 ; IR-NEXT: br i1 true, label [[BB4:%.*]], label [[BB13:%.*]] 57 ; IR: bb4: [all …]
|
D | multi-divergent-exit-region.ll | 1 …-nodes -verify -structurizecfg -verify -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s 4 ; Add an extra verifier runs. There were some cases where invalid IR 11 ; IR-LABEL: @multi_divergent_region_exit_ret_ret( 12 ; IR: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %Pivot.inv) 13 ; IR: %1 = extractvalue { i1, i64 } %0, 0 14 ; IR: %2 = extractvalue { i1, i64 } %0, 1 15 ; IR: br i1 %1, label %LeafBlock1, label %Flow 17 ; IR: Flow: 18 ; IR: %3 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] 19 ; IR: %4 = phi i1 [ %SwitchLeaf2.inv, %LeafBlock1 ], [ false, %entry ] [all …]
|
D | amdgpu-codegenprepare-fold-binop-select.ll | 2 … -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-codegenprepare %s | FileCheck -check-prefix=IR %s 6 ; IR-LABEL: @select_sdiv_lhs_const_i32( 7 ; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i32 200000, i32 125000 8 ; IR-NEXT: ret i32 [[OP]] 25 ; IR-LABEL: @select_sdiv_rhs_const_i32( 26 ; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i32 1000, i32 10000 27 ; IR-NEXT: ret i32 [[OP]] 44 ; IR-LABEL: @select_sdiv_lhs_const_v2i32( 45 ; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], <2 x i32> <i32 666, i32 poison>, <2 x i32> <i32 … 46 ; IR-NEXT: ret <2 x i32> [[OP]] [all …]
|
D | update-phi.ll | 2 … -mtriple=amdgcn-- -S -amdgpu-unify-divergent-exit-nodes -verify %s | FileCheck -check-prefix=IR %s 8 ; IR-LABEL: @_amdgpu_ps_main( 9 ; IR-NEXT: .entry: 10 ; IR-NEXT: br label [[DOTLOOPEXIT:%.*]] 11 ; IR: .loopexit: 12 ; IR-NEXT: br label [[N28:%.*]] 13 ; IR: n28: 14 ; IR-NEXT: [[DOT01:%.*]] = phi float [ 0.000000e+00, [[DOTLOOPEXIT]] ], [ [[N29:%.*]], [[TRANSIT… 15 ; IR-NEXT: [[N29]] = fadd float [[DOT01]], 1.000000e+00 16 ; IR-NEXT: [[N30:%.*]] = fcmp ogt float [[N29]], 4.000000e+00 [all …]
|
D | infinite-loop.ll | 4 … -mtriple=amdgcn-- -S -amdgpu-unify-divergent-exit-nodes -verify %s | FileCheck -check-prefix=IR %s 18 ; IR-LABEL: @infinite_loop( 19 ; IR-NEXT: entry: 20 ; IR-NEXT: br label [[LOOP:%.*]] 21 ; IR: loop: 22 ; IR-NEXT: store volatile i32 999, i32 addrspace(1)* [[OUT:%.*]], align 4 23 ; IR-NEXT: br label [[LOOP]] 52 ; IR-LABEL: @infinite_loop_ret( 53 ; IR-NEXT: entry: 54 ; IR-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() [all …]
|
/external/llvm-project/polly/test/Isl/CodeGen/OpenMP/ |
D | single_loop.ll | 2 …allel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR 5 …y-parallel-force -polly-import-jscop -polly-codegen -S < %s | FileCheck %s -check-prefix=IR-STRIDE4 7 …eduling-chunksize=43 -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-STATIC-CHUNKED 8 …LVM -polly-scheduling=static -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-STATIC 9 …M -polly-scheduling=dynamic -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-DYNAMIC 10 …scheduling-chunksize=4 -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-DYNAMIC-FOUR 11 …jscop -polly-codegen -polly-omp-backend=LLVM -S < %s | FileCheck %s -check-prefix=LIBOMP-IR-STRIDE4 35 ; IR-LABEL: single_parallel_loop() 36 ; IR-NEXT: entry 37 ; IR-NEXT: %polly.par.userContext = alloca [all …]
|
/external/llvm-project/polly/test/GPGPU/ |
D | host-control-flow.ll | 5 ; RUN: -polly-acc-dump-kernel-ir < %s | FileCheck %s -check-prefix=KERNEL-IR 8 ; RUN: -S < %s | FileCheck %s -check-prefix=IR 30 ; IR-LABEL: polly.loop_header: ; preds = %polly.loop_header, %polly.… 31 ; IR-NEXT: %polly.indvar = phi i64 [ 0, %polly.loop_preheader ], [ %polly.indvar_next, %polly.loo… 33 ; IR: store i64 %polly.indvar, i64* %polly_launch_0_param_1 34 ; IR-NEXT: [[REGA:%.+]] = getelementptr [2 x i8*], [2 x i8*]* %polly_launch_0_params, i64 0, i64 1 35 ; IR-NEXT: [[REGB:%.+]] = bitcast i64* %polly_launch_0_param_1 to i8* 36 ; IR-NEXT: store i8* [[REGB]], i8** [[REGA]] 37 ; IR: call i8* @polly_getKernel 39 ; IR: call void @polly_freeKernel [all …]
|
D | double-parallel-loop.ll | 11 ; RUN: FileCheck %s -check-prefix=IR 15 ; RUN: FileCheck %s -check-prefix=KERNEL-IR 91 ; IR: polly.split_new_and_old: 92 ; IR-NEXT: %0 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 1024) 93 ; IR-NEXT: %.obit = extractvalue { i64, i1 } %0, 1 94 ; IR-NEXT: %polly.overflow.state = or i1 false, %.obit 95 ; IR-NEXT: %.res = extractvalue { i64, i1 } %0, 0 96 ; IR-NEXT: %1 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %.res, i64 1024) 97 ; IR-NEXT: %.obit1 = extractvalue { i64, i1 } %1, 1 98 ; IR-NEXT: %polly.overflow.state2 = or i1 %polly.overflow.state, %.obit1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | atomic_optimizations_mul_one.ll | 3 …-mtriple=amdgcn-- -amdgpu-atomic-optimizer -verify-machineinstrs %s | FileCheck -check-prefix=IR %s 12 ; IR-LABEL: @atomic_add( 13 ; IR-NEXT: .entry: 14 ; IR-NEXT: [[TMP0:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) 15 ; IR-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP0]] to <2 x i32> 16 ; IR-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0 17 ; IR-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 18 ; IR-NEXT: [[TMP4:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP2]], i32 0) 19 ; IR-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP3]], i32 [[TMP4]]) 20 ; IR-NEXT: [[TMP6:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP0]]) [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vector-promotion.ll | 1 …v7-apple-ios %s -o - -mattr=+neon -S | FileCheck --check-prefix=IR-BOTH --check-prefix=IR-NORMAL %s 2 …r=+neon -S -stress-cgp-store-extract | FileCheck --check-prefix=IR-BOTH --check-prefix=IR-STRESS %s 5 ; IR-BOTH-LABEL: @simpleOneInstructionPromotion 6 ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 7 ; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], <i32 undef, i32 1> 8 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[VECTOR_OR]], i32 1 9 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], i32* %dest 10 ; IR-BOTH-NEXT: ret 26 ; IR-BOTH-LABEL: @unsupportedInstructionForPromotion 27 ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vector-promotion.ll | 1 …v7-apple-ios %s -o - -mattr=+neon -S | FileCheck --check-prefix=IR-BOTH --check-prefix=IR-NORMAL %s 2 …r=+neon -S -stress-cgp-store-extract | FileCheck --check-prefix=IR-BOTH --check-prefix=IR-STRESS %s 5 ; IR-BOTH-LABEL: @simpleOneInstructionPromotion 6 ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 7 ; IR-BOTH-NEXT: [[VECTOR_OR:%[a-zA-Z_0-9-]+]] = or <2 x i32> [[LOAD]], <i32 undef, i32 1> 8 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[VECTOR_OR]], i32 1 9 ; IR-BOTH-NEXT: store i32 [[EXTRACT]], i32* %dest 10 ; IR-BOTH-NEXT: ret 26 ; IR-BOTH-LABEL: @unsupportedInstructionForPromotion 27 ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 [all …]
|
/external/llvm-project/llvm/lib/Passes/ |
D | StandardInstrumentations.cpp | 92 unwrapModule(Any IR, bool Force = false) { in unwrapModule() argument 93 if (any_isa<const Module *>(IR)) in unwrapModule() 94 return std::make_pair(any_cast<const Module *>(IR), std::string()); in unwrapModule() 96 if (any_isa<const Function *>(IR)) { in unwrapModule() 97 const Function *F = any_cast<const Function *>(IR); in unwrapModule() 105 if (any_isa<const LazyCallGraph::SCC *>(IR)) { in unwrapModule() 106 const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR); in unwrapModule() 118 if (any_isa<const Loop *>(IR)) { in unwrapModule() 119 const Loop *L = any_cast<const Loop *>(IR); in unwrapModule() 198 void unwrapAndPrint(raw_ostream &OS, Any IR, StringRef Banner, in unwrapAndPrint() argument [all …]
|
/external/llvm-project/llvm/test/CodeGen/NVPTX/ |
D | lower-aggr-copies.ll | 2 ; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s --check-prefix IR 19 ; IR-LABEL: @memcpy_caller 20 ; IR: entry: 21 ; IR: [[Cond:%[0-9]+]] = icmp ne i64 %n, 0 22 ; IR: br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion 24 ; IR: loop-memcpy-expansion: 25 ; IR: %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ] 26 ; IR: [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index 27 ; IR: [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]] 28 ; IR: [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index [all …]
|
/external/llvm-project/llvm/test/Other/ |
D | change-printer.ll | 8 ; Check that only the passes that change the IR are printed and that the 33 ; Check that repeated passes that change the IR are printed and that the 35 ; instsimplify is run on f, it does not change the IR 60 ; CHECK-SIMPLE: *** IR Dump At Start: *** 62 ; CHECK-SIMPLE: *** IR Dump After VerifierPass (module) omitted because no change *** 63 ; CHECK-SIMPLE: *** IR Dump After InstSimplifyPass *** (function: g) 65 ; CHECK-SIMPLE: *** IR Pass PassManager{{.*}} (function: g) ignored *** 66 ; CHECK-SIMPLE: *** IR Dump After InstSimplifyPass *** (function: f) 68 ; CHECK-SIMPLE: *** IR Pass PassManager{{.*}} (function: f) ignored *** 69 ; CHECK-SIMPLE: *** IR Pass ModuleToFunctionPassAdaptor (module) ignored *** [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Passes/ |
D | StandardInstrumentations.cpp | 34 Optional<std::pair<const Module *, std::string>> unwrapModule(Any IR) { in unwrapModule() argument 35 if (any_isa<const Module *>(IR)) in unwrapModule() 36 return std::make_pair(any_cast<const Module *>(IR), std::string()); in unwrapModule() 38 if (any_isa<const Function *>(IR)) { in unwrapModule() 39 const Function *F = any_cast<const Function *>(IR); in unwrapModule() 46 if (any_isa<const LazyCallGraph::SCC *>(IR)) { in unwrapModule() 47 const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR); in unwrapModule() 58 if (any_isa<const Loop *>(IR)) { in unwrapModule() 59 const Loop *L = any_cast<const Loop *>(IR); in unwrapModule() 106 void unwrapAndPrint(Any IR, StringRef Banner, bool ForceModule = false) { in unwrapAndPrint() argument [all …]
|