/external/libpng/arm/ |
D | filter_neon_intrinsics.c | 78 uint8x16_t vtmp = vld1q_u8(rp); in png_read_filter_row_sub3_neon() local 79 uint8x8x2_t *vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 101 vtmp = vld1q_u8(rp + 12); in png_read_filter_row_sub3_neon() 102 vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 132 uint32x2x4_t vtmp = vld4_u32(png_ptr(uint32_t,rp)); in png_read_filter_row_sub4_neon() local 133 uint8x8x4_t *vrpt = png_ptr(uint8x8x4_t,&vtmp); in png_read_filter_row_sub4_neon() 158 uint8x16_t vtmp; in png_read_filter_row_avg3_neon() local 164 vtmp = vld1q_u8(rp); in png_read_filter_row_avg3_neon() 165 vrpt = png_ptr(uint8x8x2_t,&vtmp); in png_read_filter_row_avg3_neon() 179 vtmp = vld1q_u8(pp); in png_read_filter_row_avg3_neon() [all …]
|
/external/pdfium/third_party/libpng16/arm/ |
D | filter_neon_intrinsics.c | 78 uint8x16_t vtmp = vld1q_u8(rp); in png_read_filter_row_sub3_neon() local 79 uint8x8x2_t *vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 101 vtmp = vld1q_u8(rp + 12); in png_read_filter_row_sub3_neon() 102 vrpt = png_ptr(uint8x8x2_t, &vtmp); in png_read_filter_row_sub3_neon() 132 uint32x2x4_t vtmp = vld4_u32(png_ptr(uint32_t,rp)); in png_read_filter_row_sub4_neon() local 133 uint8x8x4_t *vrpt = png_ptr(uint8x8x4_t,&vtmp); in png_read_filter_row_sub4_neon() 158 uint8x16_t vtmp; in png_read_filter_row_avg3_neon() local 164 vtmp = vld1q_u8(rp); in png_read_filter_row_avg3_neon() 165 vrpt = png_ptr(uint8x8x2_t,&vtmp); in png_read_filter_row_avg3_neon() 179 vtmp = vld1q_u8(pp); in png_read_filter_row_avg3_neon() [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | copymem_neon.c | 17 uint8x8_t vtmp; in vp8_copy_mem8x4_neon() local 21 vtmp = vld1_u8(src); in vp8_copy_mem8x4_neon() 22 vst1_u8(dst, vtmp); in vp8_copy_mem8x4_neon() 30 uint8x8_t vtmp; in vp8_copy_mem8x8_neon() local 34 vtmp = vld1_u8(src); in vp8_copy_mem8x8_neon() 35 vst1_u8(dst, vtmp); in vp8_copy_mem8x8_neon()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/intrinsics/ |
D | v65-gather.ll | 4 ; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 5 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 7 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 8 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 10 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h 11 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 13 ; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 14 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 16 ; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 17 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new [all …]
|
D | v65-gather-double.ll | 4 ; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 5 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 7 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 8 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 10 ; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h 11 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 13 ; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w 14 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new 16 ; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h 17 ; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new [all …]
|
D | v65-scatter-gather.ll | 5 ; CHECK: vtmp.h = vgather(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h 6 ; CHECK-NEXT: vmem(r{{[0-9]+}}+#0) = vtmp.new
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/Hexagon/ |
D | v65_all.s | 16 if (Q0) vtmp.w=vgather(R0,M0,V0.w).w 17 # CHECK: 2f00c400 { if (q0) vtmp.w = vgather(r0,m0,v0.w).w } 36 if (Q0) vtmp.h=vgather(R0,M0,V1:0.w).h 37 # CHECK: 2f00c600 { if (q0) vtmp.h = vgather(r0,m0,v1:0.w).h } 46 vtmp.h=vgather(R0,M0,V0.h).h 47 # CHECK: 2f00c100 { vtmp.h = vgather(r0,m0,v0.h).h } 81 vtmp.w=vgather(R0,M0,V0.w).w 82 # CHECK: 2f00c000 { vtmp.w = vgather(r0,m0,v0.w).w } 96 if (Q0) vtmp.h=vgather(R0,M0,V0.h).h 97 # CHECK: 2f00c500 { if (q0) vtmp.h = vgather(r0,m0,v0.h).h } [all …]
|
D | vgather-new.s | 9 if (q0) vtmp.h = vgather(r5,m0,v31.h).h 10 vmem(r1+#0) = vtmp.new
|
/external/boringssl/src/crypto/x509v3/ |
D | v3_info.c | 123 CONF_VALUE *vtmp; variable 134 vtmp = sk_CONF_VALUE_value(tret, i); 136 nlen = strlen(objtmp) + strlen(vtmp->name) + 5; 142 BUF_strlcat(ntmp, vtmp->name, nlen); 143 OPENSSL_free(vtmp->name); 144 vtmp->name = ntmp;
|
D | v3_utl.c | 95 CONF_VALUE *vtmp = NULL; in X509V3_add_value() local 101 if (!(vtmp = CONF_VALUE_new())) in X509V3_add_value() 105 vtmp->section = NULL; in X509V3_add_value() 106 vtmp->name = tname; in X509V3_add_value() 107 vtmp->value = tvalue; in X509V3_add_value() 108 if (!sk_CONF_VALUE_push(*extlist, vtmp)) in X509V3_add_value() 113 if (vtmp) in X509V3_add_value() 114 OPENSSL_free(vtmp); in X509V3_add_value() 329 char *ntmp, *vtmp; in STACK_OF() local 375 vtmp = strip_spaces(q); in STACK_OF() [all …]
|
/external/mesa3d/src/amd/compiler/ |
D | aco_lower_to_hw_instr.cpp | 308 …lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op) in emit_int64_op() argument 320 assert(vtmp.reg() != 0); in emit_int64_op() 321 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]); in emit_int64_op() 322 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]); in emit_int64_op() 323 src0_reg = vtmp; in emit_int64_op() 324 src0[0] = Operand(vtmp, v1); in emit_int64_op() 325 src0[1] = Operand(PhysReg{vtmp+1}, v1); in emit_int64_op() 326 src0_64 = Operand(vtmp, v2); in emit_int64_op() 328 assert(vtmp.reg() != 0); in emit_int64_op() 329 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]); in emit_int64_op() [all …]
|
D | aco_reduce_assign.cpp | 58 Temp vtmp(0, RegClass(RegType::vgpr, maxSize).as_linear()); in setup_reduce_temp() local 73 end->operands[1] = Operand(vtmp); in setup_reduce_temp() 138 vtmp = program->allocateTmp(vtmp.regClass()); in setup_reduce_temp() 140 create->definitions[0] = Definition(vtmp); in setup_reduce_temp() 154 instr->operands[2] = Operand(vtmp); in setup_reduce_temp()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sum_squares_sse2.c | 207 __m128i vtmp = _mm_srli_si128(vec_a, 8); in mm_accumulate_epi16() local 208 vec_a = _mm_add_epi16(vec_a, vtmp); in mm_accumulate_epi16() 209 vtmp = _mm_srli_si128(vec_a, 4); in mm_accumulate_epi16() 210 vec_a = _mm_add_epi16(vec_a, vtmp); in mm_accumulate_epi16() 211 vtmp = _mm_srli_si128(vec_a, 2); in mm_accumulate_epi16() 212 vec_a = _mm_add_epi16(vec_a, vtmp); in mm_accumulate_epi16() 218 __m128i vtmp = _mm_srli_si128(vec_a, 8); in mm_accumulate_epi32() local 219 vec_a = _mm_add_epi32(vec_a, vtmp); in mm_accumulate_epi32() 220 vtmp = _mm_srli_si128(vec_a, 4); in mm_accumulate_epi32() 221 vec_a = _mm_add_epi32(vec_a, vtmp); in mm_accumulate_epi32()
|
/external/boringssl/src/crypto/asn1/ |
D | tasn_fre.c | 179 ASN1_VALUE *vtmp; in ASN1_template_free() local 180 vtmp = sk_ASN1_VALUE_value(sk, i); in ASN1_template_free() 181 asn1_item_combine_free(&vtmp, ASN1_ITEM_ptr(tt->item), 0); in ASN1_template_free()
|
D | tasn_dec.c | 658 ASN1_VALUE *vtmp; in asn1_template_noexp_d2i() local 660 vtmp = sk_ASN1_VALUE_pop(sktmp); in asn1_template_noexp_d2i() 661 ASN1_item_ex_free(&vtmp, ASN1_ITEM_ptr(tt->item)); in asn1_template_noexp_d2i()
|
/external/webrtc/webrtc/modules/video_processing/util/ |
D | denoiser_filter_neon.cc | 82 uint8x8_t vtmp; in CopyMem8x8() local 85 vtmp = vld1_u8(src); in CopyMem8x8() 86 vst1_u8(dst, vtmp); in CopyMem8x8()
|
/external/boringssl/src/crypto/x509/ |
D | asn1_gen.c | 652 CONF_VALUE vtmp; in asn1_str2type() local 681 vtmp.name = NULL; in asn1_str2type() 682 vtmp.section = NULL; in asn1_str2type() 683 vtmp.value = (char *)str; in asn1_str2type() 684 if (!X509V3_get_value_bool(&vtmp, &atmp->value.boolean)) { in asn1_str2type()
|
/external/libaom/libaom/av1/encoder/x86/ |
D | temporal_filter_sse2.c | 77 __m128i vtmp = _mm_loadu_si128((__m128i *)src); in xx_load_and_pad() local 79 __m128i vtmp1 = _mm_unpacklo_epi16(vtmp, vzero); in xx_load_and_pad() 80 __m128i vtmp2 = _mm_unpackhi_epi16(vtmp, vzero); in xx_load_and_pad()
|
D | temporal_filter_avx2.c | 112 __m256i vtmp = _mm256_and_si256(vsum, *(__m256i *)sse_bytemask[i]); in xx_mask_and_hadd() local 115 v128a = _mm256_castsi256_si128(vtmp); in xx_mask_and_hadd() 116 v128b = _mm256_extracti128_si256(vtmp, 1); in xx_mask_and_hadd()
|
/external/python/cpython3/Modules/_decimal/libmpdec/ |
D | mpdecimal.c | 4686 MPD_NEW_STATIC(vtmp,0,0,0,0); in _mpd_qln() 4795 mpd_qshiftr(&vtmp, &v, shift, status); in _mpd_qln() 4796 vtmp.exp += shift; in _mpd_qln() 4797 mpd_qmul(&tmp, &vtmp, &tmp, &varcontext, status); in _mpd_qln() 4842 mpd_del(&vtmp); in _mpd_qln() 5494 mpd_uint_t *c1 = NULL, *c2 = NULL, *c3 = NULL, *vtmp = NULL; in _mpd_fntmul() local 5529 if ((vtmp = mpd_calloc(n, sizeof *vtmp)) == NULL) { in _mpd_fntmul() 5533 memcpy(vtmp, v, vlen * (sizeof *vtmp)); in _mpd_fntmul() 5534 if (!fnt_convolute(c1, vtmp, n, P1)) { in _mpd_fntmul() 5535 mpd_free(vtmp); in _mpd_fntmul() [all …]
|
/external/mesa3d/src/gallium/drivers/nouveau/codegen/ |
D | nv50_ir_peephole.cpp | 3550 Value *vtmp; in handleMADforNV50() local 3556 vtmp = i->getSrc(1); in handleMADforNV50() 3574 if (post_ra_dead(vtmp->getInsn())) { in handleMADforNV50() 3575 Value *src = vtmp->getInsn()->getSrc(0); in handleMADforNV50() 3578 if (vtmp->getInsn()->bb) in handleMADforNV50() 3579 delete_Instruction(prog, vtmp->getInsn()); in handleMADforNV50()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/ |
D | HexagonRegisterInfo.td | 187 def VTMP : Ri<0, "vtmp">, DwarfRegNum<[131]>;
|
D | HexagonDepInstrInfo.td | 31826 "vtmp.h = vgather($Rt32,$Mu2,$Vv32.h).h", 31842 "if ($Qs4) vtmp.h = vgather($Rt32,$Mu2,$Vv32.h).h", 31858 "vtmp.h = vgather($Rt32,$Mu2,$Vvv32.w).h", 31874 "if ($Qs4) vtmp.h = vgather($Rt32,$Mu2,$Vvv32.w).h", 31890 "vtmp.w = vgather($Rt32,$Mu2,$Vv32.w).w", 31906 "if ($Qs4) vtmp.w = vgather($Rt32,$Mu2,$Vv32.w).w",
|