/external/swiftshader/third_party/llvm-7.0/llvm/docs/ |
D | AMDGPUAsmGFX7.rst | 21 …ds_add_rtn_u32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 22 …ds_add_rtn_u64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 25 …ds_add_u32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 26 …ds_add_u64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 27 …ds_and_b32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 28 …ds_and_b64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 29 …ds_and_rtn_b32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 30 …ds_and_rtn_b64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 34 …ds_cmpst_b32 src0, src1, src2 :ref:`ds_offset16<amdgpu_synid_ds_of… 35 …ds_cmpst_b64 src0, src1, src2 :ref:`ds_offset16<amdgpu_synid_ds_of… [all …]
|
D | AMDGPUAsmGFX9.rst | 21 …ds_add_f32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 22 …ds_add_rtn_f32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 23 …ds_add_rtn_u32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 24 …ds_add_rtn_u64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 28 …ds_add_u32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 29 …ds_add_u64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 30 …ds_and_b32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 31 …ds_and_b64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 32 …ds_and_rtn_b32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 33 …ds_and_rtn_b64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… [all …]
|
D | AMDGPUAsmGFX8.rst | 21 …ds_add_f32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 22 …ds_add_rtn_f32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 23 …ds_add_rtn_u32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 24 …ds_add_rtn_u64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 28 …ds_add_u32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 29 …ds_add_u64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 30 …ds_and_b32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 31 …ds_and_b64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 32 …ds_and_rtn_b32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… 33 …ds_and_rtn_b64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/ |
D | HexagonMapAsm2IntrinV65.gen.td | 10 …at<(int_hexagon_A6_vcmpbeq_notany DoubleRegs:$src1, DoubleRegs:$src2), (A6_vcmpbeq_notany DoubleRe… 11 …ef: Pat<(int_hexagon_V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat … 12 …at<(int_hexagon_V6_vasruwuhsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat … 13 …ef: Pat<(int_hexagon_V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat … 14 …at<(int_hexagon_V6_vasruhubsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat … 15 …Pat<(int_hexagon_V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsa… 16 …int_hexagon_V6_vasruhubrndsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsa… 17 def: Pat<(int_hexagon_V6_vabsb HvxVR:$src1), (V6_vabsb HvxVR:$src1)>, Requires<[HasV65, UseHVX]>; 18 def: Pat<(int_hexagon_V6_vabsb_128B HvxVR:$src1), (V6_vabsb HvxVR:$src1)>, Requires<[HasV65, UseHVX… 19 def: Pat<(int_hexagon_V6_vabsb_sat HvxVR:$src1), (V6_vabsb_sat HvxVR:$src1)>, Requires<[HasV65, Use… [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 11 def: Pat<(IntID HvxVR:$src1, IntRegs:$src2), 12 (MI HvxVR:$src1, IntRegs:$src2)>; 13 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, IntRegs:$src2), 14 (MI HvxVR:$src1, IntRegs:$src2)>; 18 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 19 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 20 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2, 22 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 26 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2), 27 (MI HvxVR:$src1, HvxVR:$src2)>; [all …]
|
D | HexagonIntrinsicsV60.td | 16 def : Pat < (v16i32 (int_hexagon_V6_lo (v32i32 HvxWR:$src1))), 17 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_lo)) >; 19 def : Pat < (v16i32 (int_hexagon_V6_hi (v32i32 HvxWR:$src1))), 20 (v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_hi)) >; 22 def : Pat < (v32i32 (int_hexagon_V6_lo_128B (v64i32 HvxWR:$src1))), 23 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_lo)) >; 25 def : Pat < (v32i32 (int_hexagon_V6_hi_128B (v64i32 HvxWR:$src1))), 26 (v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_hi)) >; 29 def : Pat <(v512i1 (bitconvert (v16i32 HvxVR:$src1))), 30 (v512i1 (V6_vandvrt(v16i32 HvxVR:$src1), (A2_tfrsi 0x01010101)))>; [all …]
|
/external/mesa3d/prebuilt-intermediates/nir/ |
D | nir_builder_opcodes.h | 39 nir_ball_fequal2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_fequal2() argument 41 return nir_build_alu(build, nir_op_ball_fequal2, src0, src1, NULL, NULL); in nir_ball_fequal2() 44 nir_ball_fequal3(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_fequal3() argument 46 return nir_build_alu(build, nir_op_ball_fequal3, src0, src1, NULL, NULL); in nir_ball_fequal3() 49 nir_ball_fequal4(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_fequal4() argument 51 return nir_build_alu(build, nir_op_ball_fequal4, src0, src1, NULL, NULL); in nir_ball_fequal4() 54 nir_ball_iequal2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_iequal2() argument 56 return nir_build_alu(build, nir_op_ball_iequal2, src0, src1, NULL, NULL); in nir_ball_iequal2() 59 nir_ball_iequal3(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_iequal3() argument 61 return nir_build_alu(build, nir_op_ball_iequal3, src0, src1, NULL, NULL); in nir_ball_iequal3() [all …]
|
D | nir_constant_expressions.c | 438 const struct float16_vec src1 = { in evaluate_ball_fequal2() local 447 dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y)); in evaluate_ball_fequal2() 465 const struct float32_vec src1 = { in evaluate_ball_fequal2() local 474 dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y)); in evaluate_ball_fequal2() 492 const struct float64_vec src1 = { in evaluate_ball_fequal2() local 501 dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y)); in evaluate_ball_fequal2() 533 const struct float16_vec src1 = { in evaluate_ball_fequal3() local 542 … dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y) && (src0.z == src1.z)); in evaluate_ball_fequal3() 560 const struct float32_vec src1 = { in evaluate_ball_fequal3() local 569 … dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y) && (src0.z == src1.z)); in evaluate_ball_fequal3() [all …]
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfoV60.td | 60 : V6_LDInst <(outs VectorRegs:$dst), (ins IntRegs:$src1, s4_6Imm:$src2), 65 : V6_LDInst <(outs VectorRegs128B:$dst), (ins IntRegs:$src1, s4_7Imm:$src2), 69 def V6_vL32b_ai : T_vload_ai <"$dst = vmem($src1+#$src2)">, 71 def V6_vL32b_nt_ai : T_vload_ai <"$dst = vmem($src1+#$src2):nt">, 74 def V6_vL32b_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2)">, 76 def V6_vL32b_nt_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2):nt">, 81 def V6_vL32Ub_ai : T_vload_ai <"$dst = vmemu($src1+#$src2)">, 83 def V6_vL32Ub_ai_128B : T_vload_ai_128B <"$dst = vmemu($src1+#$src2)">, 89 def V6_vL32b_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2)">, 91 def V6_vL32b_nt_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2):nt">, [all …]
|
D | HexagonInstrInfoV5.td | 48 (sra (i64 (add (i64 (sra I64:$src1, u6ImmPred:$src2)), 1)), 57 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), 58 "$dst = asrrnd($src1, #$src2)">; 82 def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1), 83 "$dst = CONST64(#$src1)", 84 [(set F64:$dst, fpimm:$src1)]>, 88 def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1), 89 "$dst = CONST32(#$src1)", 90 [(set F32:$dst, fpimm:$src1)]>, 102 def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1), [all …]
|
D | HexagonIntrinsicsV60.td | 29 (ins VecDblRegs:$src1), 30 "$dst=vassignp_W($src1)", 31 [(set VecDblRegs:$dst, (int_hexagon_V6_vassignp VecDblRegs:$src1))]>; 35 (ins VecDblRegs128B:$src1), 36 "$dst=vassignp_W_128B($src1)", 38 VecDblRegs128B:$src1))]>; 42 (ins VecDblRegs:$src1), 43 "$dst=lo_W($src1)", 44 [(set VectorRegs:$dst, (int_hexagon_V6_lo VecDblRegs:$src1))]>; 48 (ins VecDblRegs:$src1), [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 89 (ins VR128:$src1, VR128:$src2), 90 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 92 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 95 (ins VR128:$src1, i128mem:$src2), 96 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 98 (vt128 (OpNode (vt128 VR128:$src1), 102 (ins i128mem:$src1, VR128:$src2), 103 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 105 (vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))), 128 (ins VR128:$src1, u8imm:$src2), [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86InstrSSE.td | 26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>; 32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), 35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>; 44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 50 RC:$src1, RC:$src2))]>; 51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2), [all …]
|
/external/pcre/dist2/src/sljit/ |
D | sljitNativePPC_32.c | 45 sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) in emit_single_op() argument 52 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 59 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 74 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 86 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 90 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 95 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op() 101 …return push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2… in emit_single_op() 109 return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); in emit_single_op() 112 …FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compi… in emit_single_op() [all …]
|
/external/v8/src/ia32/ |
D | assembler-ia32.h | 1157 void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd132sd() argument 1158 vfmadd132sd(dst, src1, Operand(src2)); in vfmadd132sd() 1160 void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd213sd() argument 1161 vfmadd213sd(dst, src1, Operand(src2)); in vfmadd213sd() 1163 void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd231sd() argument 1164 vfmadd231sd(dst, src1, Operand(src2)); in vfmadd231sd() 1166 void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) { in vfmadd132sd() argument 1167 vfmasd(0x99, dst, src1, src2); in vfmadd132sd() 1169 void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) { in vfmadd213sd() argument 1170 vfmasd(0xa9, dst, src1, src2); in vfmadd213sd() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 98 (ins VR128:$src1, VR128:$src2), 99 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 101 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 104 (ins VR128:$src1, i128mem:$src2), 105 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 107 (vt128 (OpNode (vt128 VR128:$src1), 111 (ins i128mem:$src1, VR128:$src2), 112 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 114 (vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))), 120 (ins VR128:$src1, VR128:$src2), [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | mad-mix.ll | 11 define float @v_mad_mix_f32_f16lo_f16lo_f16lo(half %src0, half %src1, half %src2) #0 { 13 %src1.ext = fpext half %src1 to float 15 %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.ext) 23 define float @v_mad_mix_f32_f16hi_f16hi_f16hi_int(i32 %src0, i32 %src1, i32 %src2) #0 { 25 %src1.hi = lshr i32 %src1, 16 28 %src1.i16 = trunc i32 %src1.hi to i16 31 %src1.fp16 = bitcast i16 %src1.i16 to half 34 %src1.ext = fpext half %src1.fp16 to float 36 %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.ext) 45 define float @v_mad_mix_f32_f16hi_f16hi_f16hi_elt(<2 x half> %src0, <2 x half> %src1, <2 x half> %s… [all …]
|
D | fdot2.ll | 21 define amdgpu_kernel void @dotproduct_f16(<2 x half> addrspace(1)* %src1, 25 %src1.vec = load <2 x half>, <2 x half> addrspace(1)* %src1 28 %src1.el1 = extractelement <2 x half> %src1.vec, i64 0 31 %src1.el2 = extractelement <2 x half> %src1.vec, i64 1 34 %mul2 = fmul half %src1.el2, %src2.el2 35 %mul1 = fmul half %src1.el1, %src2.el1 58 define amdgpu_kernel void @dotproduct_f16_f32(<2 x half> addrspace(1)* %src1, 62 %src1.vec = load <2 x half>, <2 x half> addrspace(1)* %src1 65 %src1.el1 = extractelement <2 x half> %src1.vec, i64 0 66 %csrc1.el1 = fpext half %src1.el1 to float [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-cmp-38.ll | 10 define i32 @f1(i32 %src1) { 17 %cond = icmp slt i32 %src1, %src2 20 %mul = mul i32 %src1, %src1 23 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 28 define i32 @f2(i32 %src1) { 35 %cond = icmp ult i32 %src1, %src2 38 %mul = mul i32 %src1, %src1 41 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 46 define i32 @f3(i32 %src1) { 53 %cond = icmp eq i32 %src1, %src2 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | int-cmp-38.ll | 10 define i32 @f1(i32 %src1) { 17 %cond = icmp slt i32 %src1, %src2 20 %mul = mul i32 %src1, %src1 23 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 28 define i32 @f2(i32 %src1) { 35 %cond = icmp ult i32 %src1, %src2 38 %mul = mul i32 %src1, %src1 41 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ] 46 define i32 @f3(i32 %src1) { 53 %cond = icmp eq i32 %src1, %src2 [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local 26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa() 27 INSERT_D2_SH(src0, src1, diff0); in vpx_sum_squares_2d_i16_msa() 35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 38 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 50 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() [all …]
|
/external/swiftshader/src/Pipeline/ |
D | ShaderCore.cpp | 752 void ShaderCore::add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in add() argument 754 dst.x = src0.x + src1.x; in add() 755 dst.y = src0.y + src1.y; in add() 756 dst.z = src0.z + src1.z; in add() 757 dst.w = src0.w + src1.w; in add() 760 void ShaderCore::iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in iadd() argument 762 dst.x = As<Float4>(As<Int4>(src0.x) + As<Int4>(src1.x)); in iadd() 763 dst.y = As<Float4>(As<Int4>(src0.y) + As<Int4>(src1.y)); in iadd() 764 dst.z = As<Float4>(As<Int4>(src0.z) + As<Int4>(src1.z)); in iadd() 765 dst.w = As<Float4>(As<Int4>(src0.w) + As<Int4>(src1.w)); in iadd() [all …]
|
D | ShaderCore.hpp | 251 void add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 252 void iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 253 void sub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 254 void isub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 255 void mad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2); 256 void imad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2); 257 void mul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 258 void imul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 260 void div(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 261 void idiv(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); [all …]
|
/external/swiftshader/src/Shader/ |
D | ShaderCore.cpp | 752 void ShaderCore::add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in add() argument 754 dst.x = src0.x + src1.x; in add() 755 dst.y = src0.y + src1.y; in add() 756 dst.z = src0.z + src1.z; in add() 757 dst.w = src0.w + src1.w; in add() 760 void ShaderCore::iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in iadd() argument 762 dst.x = As<Float4>(As<Int4>(src0.x) + As<Int4>(src1.x)); in iadd() 763 dst.y = As<Float4>(As<Int4>(src0.y) + As<Int4>(src1.y)); in iadd() 764 dst.z = As<Float4>(As<Int4>(src0.z) + As<Int4>(src1.z)); in iadd() 765 dst.w = As<Float4>(As<Int4>(src0.w) + As<Int4>(src1.w)); in iadd() [all …]
|
D | ShaderCore.hpp | 251 void add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 252 void iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 253 void sub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 254 void isub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 255 void mad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2); 256 void imad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2); 257 void mul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 258 void imul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 260 void div(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); 261 void idiv(Vector4f &dst, const Vector4f &src0, const Vector4f &src1); [all …]
|