/external/oboe/samples/RhythmGame/third_party/glm/simd/ |
D | geometric.h | 39 glm_vec4 const add0 = _mm_add_ps(mul0, swp0); in glm_vec4_dot() 40 glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); in glm_vec4_dot() 41 glm_vec4 const add1 = _mm_add_ps(add0, swp1); in glm_vec4_dot() 58 glm_vec4 const add0 = _mm_add_ps(mov0, mul0); in glm_vec1_dot() 59 glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1); in glm_vec1_dot() 60 glm_vec4 const add1 = _mm_add_ss(add0, swp1); in glm_vec1_dot()
|
D | common.h | 118 glm_vec4 const add0 = glm_vec4_add(x, or0); in glm_vec4_round() 119 glm_vec4 const sub0 = glm_vec4_sub(add0, or0); in glm_vec4_round() 150 glm_vec4 const add0 = glm_vec4_add(x, or0); in glm_vec4_roundEven() local 151 glm_vec4 const sub0 = glm_vec4_sub(add0, or0); in glm_vec4_roundEven() 163 glm_vec4 const add0 = glm_vec4_add(rnd0, and0); in glm_vec4_ceil() 164 return add0; in glm_vec4_ceil()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | fusion_node_indexing_evaluation_test.cc | 174 HloInstruction* add0 = fusion->mutable_operand(0); in TEST_F() local 175 EXPECT_EQ(add0->opcode(), HloOpcode::kAdd); in TEST_F() 177 EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4); in TEST_F() 178 instruction_fusion.Fuse(add0, fusion); in TEST_F() 205 HloInstruction* add0 = fusion->mutable_operand(0); in TEST_F() local 206 EXPECT_EQ(add0->opcode(), HloOpcode::kAdd); in TEST_F() 210 EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4); in TEST_F()
|
D | bfloat16_propagation_test.cc | 108 HloInstruction* add0 = builder.AddInstruction( in TEST_F() local 111 HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, b)); in TEST_F() 133 EXPECT_FALSE(OutputsBF16(add0)); in TEST_F() 273 HloInstruction* add0 = builder.AddInstruction( in TEST_F() local 284 builder.AddInstruction(HloInstruction::CreateTuple({add0, add1, add2})); in TEST_F() 292 add0->shape(), in TEST_F() 309 EXPECT_TRUE(OutputsBF16(add0)); in TEST_F() 324 HloInstruction* add0 = builder.AddInstruction( in TEST_F() local 333 builder.AddInstruction(HloInstruction::CreateTuple({add0, add1})); in TEST_F() 556 HloInstruction* add0 = builder.AddInstruction( in TEST_F() local [all …]
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | frame-08.ll | 41 %add0 = add i32 %l0, %l0 42 %add1 = add i32 %l1, %add0 54 store volatile i32 %add0, i32 *%ptr 93 %add0 = add i32 %l0, %l0 94 %add1 = add i32 %l1, %add0 98 store volatile i32 %add0, i32 *%ptr 144 %add0 = add i32 %l0, %l0 145 %add1 = add i32 %l1, %add0 157 store volatile i32 %add0, i32 *%ptr 195 %add0 = add i32 %l0, %l0 [all …]
|
D | frame-04.ll | 49 %add0 = fadd fp128 %l0, %l0 50 %add1 = fadd fp128 %l1, %add0 57 store volatile fp128 %add0, fp128 *%ptr 104 %add0 = fadd fp128 %l0, %l0 105 %add1 = fadd fp128 %l1, %add0 111 store volatile fp128 %add0, fp128 *%ptr 147 %add0 = fadd fp128 %l0, %l0 148 %add1 = fadd fp128 %l1, %add0 152 store volatile fp128 %add0, fp128 *%ptr 178 %add0 = fadd fp128 %l0, %l0 [all …]
|
D | frame-05.ll | 49 %add0 = add i32 %l0, %l0 50 %add1 = add i32 %l1, %add0 63 store volatile i32 %add0, i32 *%ptr 116 %add0 = add i32 %l0, %l0 117 %add1 = add i32 %l1, %add0 129 store volatile i32 %add0, i32 *%ptr 172 %add0 = add i32 %l0, %l0 173 %add1 = add i32 %l1, %add0 178 store volatile i32 %add0, i32 *%ptr 207 %add0 = add i32 %l0, %l0 [all …]
|
D | frame-06.ll | 46 %add0 = add i64 %l0, %l0 47 %add1 = add i64 %l1, %add0 60 store volatile i64 %add0, i64 *%ptr 113 %add0 = add i64 %l0, %l0 114 %add1 = add i64 %l1, %add0 126 store volatile i64 %add0, i64 *%ptr 169 %add0 = add i64 %l0, %l0 170 %add1 = add i64 %l1, %add0 175 store volatile i64 %add0, i64 *%ptr 204 %add0 = add i64 %l0, %l0 [all …]
|
D | frame-02.ll | 56 %add0 = fadd float %l0, %l0 57 %add1 = fadd float %l1, %add0 72 store volatile float %add0, float *%ptr 137 %add0 = fadd float %l0, %l0 138 %add1 = fadd float %l1, %add0 152 store volatile float %add0, float *%ptr 197 %add0 = fadd float %l0, %l0 198 %add1 = fadd float %l1, %add0 206 store volatile float %add0, float *%ptr 240 %add0 = fadd float %l0, %l0 [all …]
|
D | frame-03.ll | 58 %add0 = fadd double %l0, %l0 59 %add1 = fadd double %l1, %add0 74 store volatile double %add0, double *%ptr 139 %add0 = fadd double %l0, %l0 140 %add1 = fadd double %l1, %add0 154 store volatile double %add0, double *%ptr 199 %add0 = fadd double %l0, %l0 200 %add1 = fadd double %l1, %add0 208 store volatile double %add0, double *%ptr 242 %add0 = fadd double %l0, %l0 [all …]
|
D | cond-move-10.ll | 39 %add0 = add i64 %ret, %val0 40 %add1 = add i64 %add0, %val1 85 %add0 = add i32 %ret, %val0 86 %add1 = add i32 %add0, %val1
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-08.ll | 41 %add0 = add i32 %l0, %l0 42 %add1 = add i32 %l1, %add0 54 store volatile i32 %add0, i32 *%ptr 93 %add0 = add i32 %l0, %l0 94 %add1 = add i32 %l1, %add0 98 store volatile i32 %add0, i32 *%ptr 144 %add0 = add i32 %l0, %l0 145 %add1 = add i32 %l1, %add0 157 store volatile i32 %add0, i32 *%ptr 195 %add0 = add i32 %l0, %l0 [all …]
|
D | frame-04.ll | 49 %add0 = fadd fp128 %l0, %l0 50 %add1 = fadd fp128 %l1, %add0 57 store volatile fp128 %add0, fp128 *%ptr 104 %add0 = fadd fp128 %l0, %l0 105 %add1 = fadd fp128 %l1, %add0 111 store volatile fp128 %add0, fp128 *%ptr 147 %add0 = fadd fp128 %l0, %l0 148 %add1 = fadd fp128 %l1, %add0 152 store volatile fp128 %add0, fp128 *%ptr 178 %add0 = fadd fp128 %l0, %l0 [all …]
|
D | frame-05.ll | 49 %add0 = add i32 %l0, %l0 50 %add1 = add i32 %l1, %add0 63 store volatile i32 %add0, i32 *%ptr 116 %add0 = add i32 %l0, %l0 117 %add1 = add i32 %l1, %add0 129 store volatile i32 %add0, i32 *%ptr 172 %add0 = add i32 %l0, %l0 173 %add1 = add i32 %l1, %add0 178 store volatile i32 %add0, i32 *%ptr 207 %add0 = add i32 %l0, %l0 [all …]
|
D | frame-06.ll | 46 %add0 = add i64 %l0, %l0 47 %add1 = add i64 %l1, %add0 60 store volatile i64 %add0, i64 *%ptr 113 %add0 = add i64 %l0, %l0 114 %add1 = add i64 %l1, %add0 126 store volatile i64 %add0, i64 *%ptr 169 %add0 = add i64 %l0, %l0 170 %add1 = add i64 %l1, %add0 175 store volatile i64 %add0, i64 *%ptr 204 %add0 = add i64 %l0, %l0 [all …]
|
D | frame-03.ll | 58 %add0 = fadd double %l0, %l0 59 %add1 = fadd double %l1, %add0 74 store volatile double %add0, double *%ptr 139 %add0 = fadd double %l0, %l0 140 %add1 = fadd double %l1, %add0 154 store volatile double %add0, double *%ptr 199 %add0 = fadd double %l0, %l0 200 %add1 = fadd double %l1, %add0 208 store volatile double %add0, double *%ptr 242 %add0 = fadd double %l0, %l0 [all …]
|
D | frame-02.ll | 56 %add0 = fadd float %l0, %l0 57 %add1 = fadd float %l1, %add0 72 store volatile float %add0, float *%ptr 137 %add0 = fadd float %l0, %l0 138 %add1 = fadd float %l1, %add0 152 store volatile float %add0, float *%ptr 197 %add0 = fadd float %l0, %l0 198 %add1 = fadd float %l1, %add0 206 store volatile float %add0, float *%ptr 240 %add0 = fadd float %l0, %l0 [all …]
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/segment/ |
D | segment_test.cc | 147 auto add0 = ops::Add(s.WithOpName("add0"), feed, feed); in TEST_F() local 149 auto add2 = ops::Add(s.WithOpName("add2"), add0, add1); in TEST_F() 150 auto add3 = ops::Add(s.WithOpName("add3"), add0, add2); in TEST_F() 195 auto add0 = ops::Add(s.WithOpName("add0"), feed, feed); in TEST_F() local 197 auto add2 = ops::Add(s.WithOpName("add2"), add0, add1); in TEST_F() 198 auto add3 = ops::Add(s.WithOpName("add3"), add0, add2); in TEST_F() 223 add0.node()->set_assigned_device_name(kGpu0); in TEST_F() 258 auto add0 = ops::Add(s.WithOpName("add0"), feed, feed); in TEST_F() local 260 auto add2 = ops::Add(s.WithOpName("add2"), add0, add1); in TEST_F() 261 auto add3 = ops::Add(s.WithOpName("add3"), add0, add2); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/xla/tests/translate/ |
D | location.hlotxt | 11 %add0 = f32[4] add(f32[4] %arg0, f32[4] %arg1) 13 …%add1 = f32[4] add(f32[4] %add0, f32[4] %arg1), metadata={op_type="Add" op_name="embedded_inferenc… 17 // CHECK: [[LOC0]] = loc("add0")
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | reassociate-nuw.ll | 9 %add0 = add nuw i32 %x, 4 10 %add1 = add nuw i32 %add0, 64 41 %add0 = add i32 %x, 4 42 %add1 = add nuw i32 %add0, 64 51 %add0 = add nuw i32 %x, 4 52 %add1 = add i32 %add0, 64 62 %add0 = add nuw i32 %x, 4 64 %add2 = add nuw i32 %add0, %add1
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | cluster_scoping_pass_test.cc | 107 Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0")); in TEST() local 110 Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0")); in TEST() 153 Node* add0 = ops::BinaryOp("Add", a, b, in TEST() local 159 Node* relu0 = ops::UnaryOp("Relu", add0, in TEST()
|
D | deadness_analysis_test.cc | 412 Output add0 = ops::Add(root.WithOpName("add0"), m0.output, m1.output); in TEST() local 415 Output add2 = ops::Add(root.WithOpName("add2"), add0, add1); in TEST() 434 Output add0 = in TEST() local 439 ops::Merge m0(root.WithOpName("m0"), {add0, add1}); in TEST() 440 ops::Merge m1(root.WithOpName("m1"), {add0, add1}); in TEST() 459 Output add0 = in TEST() local 463 ops::Merge or2(root.WithOpName("or2"), {add0, add1}); in TEST() 485 Output add0 = ops::Add(root.WithOpName("add0"), m0.output, sw_2.output_false); in TEST() local 493 Output add3 = ops::Add(root.WithOpName("add3"), add0, m1.output); in TEST() 572 Output add0 = ops::Add(root.WithOpName("add0"), iv0, iv1); in TEST() local [all …]
|
/external/llvm-project/llvm/test/Transforms/Inline/AMDGPU/ |
D | amdgpu-inline-alloca-argument.ll | 9 %add0 = fadd float %div, 1.0 10 %add1 = fadd float %add0, 1.0 28 %add0 = fadd float %div, 1.0 29 %add1 = fadd float %add0, 1.0
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | fold-imm-f16-f32.mir | 7 %f16.add0 = fadd half %f16.val0, 0xH3C00 9 store volatile half %f16.add0, half addrspace(1)* undef 18 %f16.add0 = fadd half %f16.val0, 0xH3C00 20 store volatile half %f16.add0, half addrspace(1)* undef 29 %f16.add0 = fadd half %f16.val0, 0xH3C00 31 store volatile half %f16.add0, half addrspace(1)* undef 40 %f16.add0 = fadd half %f16.val0, 0xH3C00 43 store volatile half %f16.add0, half addrspace(1)* undef 52 %f16.add0 = fadd half %f16.val0, 0xH0001 54 store volatile half %f16.add0, half addrspace(1)* undef [all …]
|
D | mad_int24.ll | 39 %add0 = add nsw i32 %mul0, %c 41 %shl.2 = shl i32 %add0, 8 63 %add0 = add nsw i32 %mul0, %c 65 %shl.2 = shl i32 %add0, 8
|