/external/libpng/arm/ |
D | filter_neon_intrinsics.c | 78 uint8x16_t vtmp = vld1q_u8(rp); in png_read_filter_row_sub3_neon() local 79 uint8x8x2_t *vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 101 vtmp = vld1q_u8(rp + 12); in png_read_filter_row_sub3_neon() 102 vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 132 uint32x2x4_t vtmp = vld4_u32(png_ptr(uint32_t,rp)); in png_read_filter_row_sub4_neon() local 133 uint8x8x4_t *vrpt = png_ptr(uint8x8x4_t,&vtmp); in png_read_filter_row_sub4_neon() 158 uint8x16_t vtmp; in png_read_filter_row_avg3_neon() local 164 vtmp = vld1q_u8(rp); in png_read_filter_row_avg3_neon() 165 vrpt = png_ptr(uint8x8x2_t,&vtmp); in png_read_filter_row_avg3_neon() 179 vtmp = vld1q_u8(pp); in png_read_filter_row_avg3_neon() [all …]
|
/external/pdfium/third_party/libpng16/arm/ |
D | filter_neon_intrinsics.c | 78 uint8x16_t vtmp = vld1q_u8(rp); in png_read_filter_row_sub3_neon() local 79 uint8x8x2_t *vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 101 vtmp = vld1q_u8(rp + 12); in png_read_filter_row_sub3_neon() 102 vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 132 uint32x2x4_t vtmp = vld4_u32(png_ptr(uint32_t,rp)); in png_read_filter_row_sub4_neon() local 133 uint8x8x4_t *vrpt = png_ptr(uint8x8x4_t,&vtmp); in png_read_filter_row_sub4_neon() 158 uint8x16_t vtmp; in png_read_filter_row_avg3_neon() local 164 vtmp = vld1q_u8(rp); in png_read_filter_row_avg3_neon() 165 vrpt = png_ptr(uint8x8x2_t,&vtmp); in png_read_filter_row_avg3_neon() 179 vtmp = vld1q_u8(pp); in png_read_filter_row_avg3_neon() [all …]
|
/external/XNNPACK/src/x32-pad/ |
D | wasmsimd.c | 54 const v128_t vtmp = wasm_v128_load(input); in xnn_x32_pad_ukernel__wasmsimd() local 57 wasm_v128_store(output, vtmp); in xnn_x32_pad_ukernel__wasmsimd() 61 v128_t vtmp = wasm_v128_load(input); in xnn_x32_pad_ukernel__wasmsimd() local 64 *((double*) output) = wasm_f64x2_extract_lane(vtmp, 0); in xnn_x32_pad_ukernel__wasmsimd() 67 vtmp = wasm_v32x4_shuffle(vtmp, vtmp, 2, 3, 2, 3); in xnn_x32_pad_ukernel__wasmsimd() 70 *((float*) output) = wasm_f32x4_extract_lane(vtmp, 0); in xnn_x32_pad_ukernel__wasmsimd()
|
D | sse.c | 56 const __m128 vtmp = _mm_loadu_ps(i); in xnn_x32_pad_ukernel__sse() local 59 _mm_storeu_ps(o, vtmp); in xnn_x32_pad_ukernel__sse() 63 __m128 vtmp = _mm_loadu_ps(i); in xnn_x32_pad_ukernel__sse() local 66 _mm_storel_pi((__m64*) o, vtmp); in xnn_x32_pad_ukernel__sse() 69 vtmp = _mm_movehl_ps(vtmp, vtmp); in xnn_x32_pad_ukernel__sse() 72 _mm_store_ss(o, vtmp); in xnn_x32_pad_ukernel__sse()
|
D | neon.c | 51 const uint32x4_t vtmp = vld1q_u32(input); input += 4; in xnn_x32_pad_ukernel__neon() local 52 vst1q_u32(output, vtmp); output += 4; in xnn_x32_pad_ukernel__neon() 55 uint32x4_t vtmp = vld1q_u32(input); in xnn_x32_pad_ukernel__neon() local 58 uint32x2_t vtmp_lo = vget_low_u32(vtmp); in xnn_x32_pad_ukernel__neon() 61 vtmp_lo = vget_high_u32(vtmp); in xnn_x32_pad_ukernel__neon()
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | copymem_neon.c | 17 uint8x8_t vtmp; in vp8_copy_mem8x4_neon() local 21 vtmp = vld1_u8(src); in vp8_copy_mem8x4_neon() 22 vst1_u8(dst, vtmp); in vp8_copy_mem8x4_neon() 30 uint8x8_t vtmp; in vp8_copy_mem8x8_neon() local 34 vtmp = vld1_u8(src); in vp8_copy_mem8x8_neon() 35 vst1_u8(dst, vtmp); in vp8_copy_mem8x8_neon()
|
/external/llvm-project/llvm/test/CodeGen/Hexagon/intrinsics/ |
D | v65-gather.ll | 4 ; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 5 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 7 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 8 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 10 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h 11 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 13 ; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 14 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 16 ; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 17 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new [all …]
|
D | v65-gather-double.ll | 4 ; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 5 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 7 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 8 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 10 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h 11 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 13 ; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 14 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 16 ; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 17 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new [all …]
|
/external/llvm-project/llvm/test/MC/Hexagon/ |
D | v65_all.s | 16 if (Q0) vtmp.w=vgather(R0,M0,V0.w).w 17 # CHECK: 2f00c400 { if (q0) vtmp.w = vgather(r0,m0,v0.w).w } 36 if (Q0) vtmp.h=vgather(R0,M0,V1:0.w).h 37 # CHECK: 2f00c600 { if (q0) vtmp.h = vgather(r0,m0,v1:0.w).h } 46 vtmp.h=vgather(R0,M0,V0.h).h 47 # CHECK: 2f00c100 { vtmp.h = vgather(r0,m0,v0.h).h } 81 vtmp.w=vgather(R0,M0,V0.w).w 82 # CHECK: 2f00c000 { vtmp.w = vgather(r0,m0,v0.w).w } 96 if (Q0) vtmp.h=vgather(R0,M0,V0.h).h 97 # CHECK: 2f00c500 { if (q0) vtmp.h = vgather(r0,m0,v0.h).h } [all …]
|
D | vgather-new.s | 9 if (q0) vtmp.h = vgather(r5,m0,v31.h).h 10 vmem(r1+#0) = vtmp.new
|
/external/arm-optimized-routines/networking/arm/ |
D | chksum_simd.c | 42 uint32x2_t vtmp = vreinterpret_u32_u64(vword64); in __chksum_arm_simd() local 44 vsum = vpaddl_u32(vtmp); in __chksum_arm_simd() 95 uint32x2_t vtmp = vmovn_u64(vsum0); in __chksum_arm_simd() local 97 vsum = vpadal_u32(vsum, vtmp); in __chksum_arm_simd() 103 uint32x2_t vtmp = vld1_u32(ptr32); in __chksum_arm_simd() local 105 vsum = vpadal_u32(vsum, vtmp); in __chksum_arm_simd()
|
/external/llvm-project/libc/AOR_v20.02/networking/arm/ |
D | chksum_simd.c | 43 uint32x2_t vtmp = vreinterpret_u32_u64(vword64); in __chksum_arm_simd() local 45 vsum = vpaddl_u32(vtmp); in __chksum_arm_simd() 96 uint32x2_t vtmp = vmovn_u64(vsum0); in __chksum_arm_simd() local 98 vsum = vpadal_u32(vsum, vtmp); in __chksum_arm_simd() 104 uint32x2_t vtmp = vld1_u32(ptr32); in __chksum_arm_simd() local 106 vsum = vpadal_u32(vsum, vtmp); in __chksum_arm_simd()
|
/external/boringssl/src/crypto/x509v3/ |
D | v3_info.c | 122 CONF_VALUE *vtmp; variable 133 vtmp = sk_CONF_VALUE_value(tret, i); 135 nlen = strlen(objtmp) + strlen(vtmp->name) + 5; 141 OPENSSL_strlcat(ntmp, vtmp->name, nlen); 142 OPENSSL_free(vtmp->name); 143 vtmp->name = ntmp;
|
D | v3_utl.c | 94 CONF_VALUE *vtmp = NULL; in X509V3_add_value() local 100 if (!(vtmp = CONF_VALUE_new())) in X509V3_add_value() 104 vtmp->section = NULL; in X509V3_add_value() 105 vtmp->name = tname; in X509V3_add_value() 106 vtmp->value = tvalue; in X509V3_add_value() 107 if (!sk_CONF_VALUE_push(*extlist, vtmp)) in X509V3_add_value() 112 if (vtmp) in X509V3_add_value() 113 OPENSSL_free(vtmp); in X509V3_add_value() 328 char *ntmp, *vtmp; in STACK_OF() local 374 vtmp = strip_spaces(q); in STACK_OF() [all …]
|
/external/rust/crates/quiche/deps/boringssl/src/crypto/x509v3/ |
D | v3_info.c | 122 CONF_VALUE *vtmp; variable 133 vtmp = sk_CONF_VALUE_value(tret, i); 135 nlen = strlen(objtmp) + strlen(vtmp->name) + 5; 141 OPENSSL_strlcat(ntmp, vtmp->name, nlen); 142 OPENSSL_free(vtmp->name); 143 vtmp->name = ntmp;
|
D | v3_utl.c | 94 CONF_VALUE *vtmp = NULL; in X509V3_add_value() local 100 if (!(vtmp = CONF_VALUE_new())) in X509V3_add_value() 104 vtmp->section = NULL; in X509V3_add_value() 105 vtmp->name = tname; in X509V3_add_value() 106 vtmp->value = tvalue; in X509V3_add_value() 107 if (!sk_CONF_VALUE_push(*extlist, vtmp)) in X509V3_add_value() 112 if (vtmp) in X509V3_add_value() 113 OPENSSL_free(vtmp); in X509V3_add_value() 328 char *ntmp, *vtmp; in STACK_OF() local 374 vtmp = strip_spaces(q); in STACK_OF() [all …]
|
/external/mesa3d/src/amd/compiler/ |
D | aco_lower_to_hw_instr.cpp | 308 …lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op) in emit_int64_op() argument 320 assert(vtmp.reg() != 0); in emit_int64_op() 321 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]); in emit_int64_op() 322 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]); in emit_int64_op() 323 src0_reg = vtmp; in emit_int64_op() 324 src0[0] = Operand(vtmp, v1); in emit_int64_op() 325 src0[1] = Operand(PhysReg{vtmp+1}, v1); in emit_int64_op() 326 src0_64 = Operand(vtmp, v2); in emit_int64_op() 328 assert(vtmp.reg() != 0); in emit_int64_op() 329 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]); in emit_int64_op() [all …]
|
D | aco_reduce_assign.cpp | 58 Temp vtmp(0, RegClass(RegType::vgpr, maxSize).as_linear()); in setup_reduce_temp() local 73 end->operands[1] = Operand(vtmp); in setup_reduce_temp() 138 vtmp = program->allocateTmp(vtmp.regClass()); in setup_reduce_temp() 140 create->definitions[0] = Definition(vtmp); in setup_reduce_temp() 154 instr->operands[2] = Operand(vtmp); in setup_reduce_temp()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sum_squares_sse2.c | 207 __m128i vtmp = _mm_srli_si128(vec_a, 8); in mm_accumulate_epi16() local 208 vec_a = _mm_add_epi16(vec_a, vtmp); in mm_accumulate_epi16() 209 vtmp = _mm_srli_si128(vec_a, 4); in mm_accumulate_epi16() 210 vec_a = _mm_add_epi16(vec_a, vtmp); in mm_accumulate_epi16() 211 vtmp = _mm_srli_si128(vec_a, 2); in mm_accumulate_epi16() 212 vec_a = _mm_add_epi16(vec_a, vtmp); in mm_accumulate_epi16() 218 __m128i vtmp = _mm_srli_si128(vec_a, 8); in mm_accumulate_epi32() local 219 vec_a = _mm_add_epi32(vec_a, vtmp); in mm_accumulate_epi32() 220 vtmp = _mm_srli_si128(vec_a, 4); in mm_accumulate_epi32() 221 vec_a = _mm_add_epi32(vec_a, vtmp); in mm_accumulate_epi32()
|
/external/arm-optimized-routines/networking/aarch64/ |
D | chksum_simd.c | 120 uint32x4_t vtmp = vld1q_u32(ptr32); in __chksum_aarch64_simd() local 121 vsum0 = vpadalq_u32(vsum0, vtmp); in __chksum_aarch64_simd() 130 uint32x2_t vtmp = vld1_u32(ptr32); in __chksum_aarch64_simd() local 131 vsum0 = vaddw_u32(vsum0, vtmp); in __chksum_aarch64_simd()
|
/external/llvm-project/libc/AOR_v20.02/networking/aarch64/ |
D | chksum_simd.c | 121 uint32x4_t vtmp = vld1q_u32(ptr32); in __chksum_aarch64_simd() local 122 vsum0 = vpadalq_u32(vsum0, vtmp); in __chksum_aarch64_simd() 131 uint32x2_t vtmp = vld1_u32(ptr32); in __chksum_aarch64_simd() local 132 vsum0 = vaddw_u32(vsum0, vtmp); in __chksum_aarch64_simd()
|
/external/llvm-project/llvm/test/CodeGen/Hexagon/ |
D | vgather-packetize.mir | 7 # CHECK-NEXT: vtmp.h = vgather 8 # CHECK-NEXT: vmem(r0+#0) = vtmp.new 24 V6_vgathermhw_pseudo $r0, $r2, $m0, $w0, implicit-def $vtmp
|
/external/llvm-project/polly/lib/External/isl/imath/ |
D | imath.c | 432 mpz_t vtmp; in mp_int_init_value() local 435 s_fake(&vtmp, value, vbuf); in mp_int_init_value() 436 return mp_int_init_copy(z, &vtmp); in mp_int_init_value() 441 mpz_t vtmp; in mp_int_init_uvalue() local 444 s_ufake(&vtmp, uvalue, vbuf); in mp_int_init_uvalue() 445 return mp_int_init_copy(z, &vtmp); in mp_int_init_uvalue() 450 mpz_t vtmp; in mp_int_set_value() local 453 s_fake(&vtmp, value, vbuf); in mp_int_set_value() 454 return mp_int_copy(&vtmp, z); in mp_int_set_value() 459 mpz_t vtmp; in mp_int_set_uvalue() local [all …]
|
/external/boringssl/src/crypto/asn1/ |
D | tasn_fre.c | 173 ASN1_VALUE *vtmp; in ASN1_template_free() local 174 vtmp = sk_ASN1_VALUE_value(sk, i); in ASN1_template_free() 175 asn1_item_combine_free(&vtmp, ASN1_ITEM_ptr(tt->item), 0); in ASN1_template_free()
|
/external/rust/crates/quiche/deps/boringssl/src/crypto/asn1/ |
D | tasn_fre.c | 179 ASN1_VALUE *vtmp; in ASN1_template_free() local 180 vtmp = sk_ASN1_VALUE_value(sk, i); in ASN1_template_free() 181 asn1_item_combine_free(&vtmp, ASN1_ITEM_ptr(tt->item), 0); in ASN1_template_free()
|