/external/deqp/framework/opengl/ |
D | gluES3PlusWrapperFuncs.inl | 6 dst->activeShaderProgram = src.activeShaderProgram; 7 dst->activeTexture = src.activeTexture; 8 dst->attachShader = src.attachShader; 9 dst->beginQuery = src.beginQuery; 10 dst->beginTransformFeedback = src.beginTransformFeedback; 11 dst->bindAttribLocation = src.bindAttribLocation; 12 dst->bindBuffer = src.bindBuffer; 13 dst->bindBufferBase = src.bindBufferBase; 14 dst->bindBufferRange = src.bindBufferRange; 15 dst->bindFramebuffer = src.bindFramebuffer; [all …]
|
/external/mesa3d/prebuilt-intermediates/main/ |
D | format_unpack.c | 57 unpack_float_a8b8g8r8_unorm(const void *void_src, float dst[4]) in unpack_float_a8b8g8r8_unorm() 67 dst[0] = _mesa_unorm_to_float(r, 8); in unpack_float_a8b8g8r8_unorm() 70 dst[1] = _mesa_unorm_to_float(g, 8); in unpack_float_a8b8g8r8_unorm() 73 dst[2] = _mesa_unorm_to_float(b, 8); in unpack_float_a8b8g8r8_unorm() 76 dst[3] = _mesa_unorm_to_float(a, 8); in unpack_float_a8b8g8r8_unorm() 80 unpack_float_x8b8g8r8_unorm(const void *void_src, float dst[4]) in unpack_float_x8b8g8r8_unorm() 89 dst[0] = _mesa_unorm_to_float(r, 8); in unpack_float_x8b8g8r8_unorm() 92 dst[1] = _mesa_unorm_to_float(g, 8); in unpack_float_x8b8g8r8_unorm() 95 dst[2] = _mesa_unorm_to_float(b, 8); in unpack_float_x8b8g8r8_unorm() 97 dst[3] = 1.0f; in unpack_float_x8b8g8r8_unorm() [all …]
|
/external/angle/src/image_util/ |
D | imageformats.cpp | 17 void L8::readColor(gl::ColorF *dst, const L8 *src) in readColor() argument 20 dst->red = lum; in readColor() 21 dst->green = lum; in readColor() 22 dst->blue = lum; in readColor() 23 dst->alpha = 1.0f; in readColor() 26 void L8::writeColor(L8 *dst, const gl::ColorF *src) in writeColor() argument 28 dst->L = gl::floatToNormalized<uint8_t>(src->red); in writeColor() 31 void L8::average(L8 *dst, const L8 *src1, const L8 *src2) in average() argument 33 dst->L = gl::average(src1->L, src2->L); in average() 36 void R8::readColor(gl::ColorUI *dst, const R8 *src) in readColor() argument [all …]
|
D | imageformats.h | 28 static void readColor(gl::ColorF *dst, const L8 *src); 29 static void writeColor(L8 *dst, const gl::ColorF *src); 30 static void average(L8 *dst, const L8 *src1, const L8 *src2); 37 static void readColor(gl::ColorF *dst, const R8 *src); 38 static void readColor(gl::ColorUI *dst, const R8 *src); 39 static void writeColor(R8 *dst, const gl::ColorF *src); 40 static void writeColor(R8 *dst, const gl::ColorUI *src); 41 static void average(R8 *dst, const R8 *src1, const R8 *src2); 48 static void readColor(gl::ColorF *dst, const A8 *src); 49 static void writeColor(A8 *dst, const gl::ColorF *src); [all …]
|
/external/wpa_supplicant_8/wpa_supplicant/ |
D | ctrl_iface_named_pipe.c | 87 static void ctrl_close_pipe(struct wpa_ctrl_dst *dst); 93 static void global_close_pipe(struct wpa_global_dst *dst); 116 struct wpa_ctrl_dst *dst, *next; in ctrl_flush_broken_pipes() local 118 dst = priv->ctrl_dst; in ctrl_flush_broken_pipes() 120 while (dst) { in ctrl_flush_broken_pipes() 121 next = dst->next; in ctrl_flush_broken_pipes() 122 if (ctrl_broken_pipe(dst->pipe, dst->used)) { in ctrl_flush_broken_pipes() 124 dst); in ctrl_flush_broken_pipes() 125 ctrl_close_pipe(dst); in ctrl_flush_broken_pipes() 127 dst = next; in ctrl_flush_broken_pipes() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrShiftRotate.td | 17 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { 19 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1), 20 "shl{b}\t{%cl, $dst|$dst, cl}", 21 [(set GR8:$dst, (shl GR8:$src1, CL))]>; 22 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1), 23 "shl{w}\t{%cl, $dst|$dst, cl}", 24 [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize16; 25 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1), 26 "shl{l}\t{%cl, $dst|$dst, cl}", 27 [(set GR32:$dst, (shl GR32:$src1, CL))]>, OpSize32; [all …]
|
/external/deqp/external/vulkancts/framework/vulkan/generated/vulkan/ |
D | vkSupportedExtensions.inl | 7 void getCoreDeviceExtensionsImpl (uint32_t coreVersion, ::std::vector<const char*>& dst) argument 11 dst.push_back("VK_KHR_sampler_mirror_clamp_to_edge"); 12 dst.push_back("VK_KHR_shader_float16_int8"); 13 dst.push_back("VK_KHR_imageless_framebuffer"); 14 dst.push_back("VK_KHR_create_renderpass2"); 15 dst.push_back("VK_EXT_sampler_filter_minmax"); 16 dst.push_back("VK_KHR_image_format_list"); 17 dst.push_back("VK_EXT_descriptor_indexing"); 18 dst.push_back("VK_EXT_shader_viewport_index_layer"); 19 dst.push_back("VK_KHR_draw_indirect_count"); [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86InstrShiftRotate.td | 18 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in { 20 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1), 21 "shl{b}\t{%cl, $dst|$dst, cl}", 22 [(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>; 23 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1), 24 "shl{w}\t{%cl, $dst|$dst, cl}", 25 [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize16; 26 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1), 27 "shl{l}\t{%cl, $dst|$dst, cl}", 28 [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>, OpSize32; [all …]
|
/external/libaom/aom_dsp/x86/ |
D | highbd_intrapred_sse2.c | 19 void aom_highbd_h_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride, in aom_highbd_h_predictor_4x4_sse2() argument 29 _mm_storel_epi64((__m128i *)dst, row0); in aom_highbd_h_predictor_4x4_sse2() 30 dst += stride; in aom_highbd_h_predictor_4x4_sse2() 31 _mm_storel_epi64((__m128i *)dst, row1); in aom_highbd_h_predictor_4x4_sse2() 32 dst += stride; in aom_highbd_h_predictor_4x4_sse2() 33 _mm_storel_epi64((__m128i *)dst, row2); in aom_highbd_h_predictor_4x4_sse2() 34 dst += stride; in aom_highbd_h_predictor_4x4_sse2() 35 _mm_storel_epi64((__m128i *)dst, row3); in aom_highbd_h_predictor_4x4_sse2() 38 void aom_highbd_h_predictor_4x8_sse2(uint16_t *dst, ptrdiff_t stride, in aom_highbd_h_predictor_4x8_sse2() argument 41 aom_highbd_h_predictor_4x4_sse2(dst, stride, above, left, bd); in aom_highbd_h_predictor_4x8_sse2() [all …]
|
D | aom_convolve_copy_sse2.c | 15 static INLINE void copy_128(const uint8_t *src, uint8_t *dst) { in copy_128() argument 25 _mm_store_si128((__m128i *)(dst + 0 * 16), s[0]); in copy_128() 26 _mm_store_si128((__m128i *)(dst + 1 * 16), s[1]); in copy_128() 27 _mm_store_si128((__m128i *)(dst + 2 * 16), s[2]); in copy_128() 28 _mm_store_si128((__m128i *)(dst + 3 * 16), s[3]); in copy_128() 29 _mm_store_si128((__m128i *)(dst + 4 * 16), s[4]); in copy_128() 30 _mm_store_si128((__m128i *)(dst + 5 * 16), s[5]); in copy_128() 31 _mm_store_si128((__m128i *)(dst + 6 * 16), s[6]); in copy_128() 32 _mm_store_si128((__m128i *)(dst + 7 * 16), s[7]); in copy_128() 36 uint8_t *dst, ptrdiff_t dst_stride, int w, int h) { in aom_convolve_copy_sse2() argument [all …]
|
D | aom_convolve_copy_avx2.c | 15 static INLINE void copy_128(const uint8_t *src, uint8_t *dst) { in copy_128() argument 21 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]); in copy_128() 22 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]); in copy_128() 23 _mm256_storeu_si256((__m256i *)(dst + 2 * 32), s[2]); in copy_128() 24 _mm256_storeu_si256((__m256i *)(dst + 3 * 32), s[3]); in copy_128() 28 uint8_t *dst, ptrdiff_t dst_stride, int w, int h) { in aom_convolve_copy_avx2() argument 30 assert(!((intptr_t)dst % 16)); in aom_convolve_copy_avx2() 36 memmove(dst, src, 2 * sizeof(*src)); in aom_convolve_copy_avx2() 38 dst += dst_stride; in aom_convolve_copy_avx2() 39 memmove(dst, src, 2 * sizeof(*src)); in aom_convolve_copy_avx2() [all …]
|
/external/capstone/arch/X86/ |
D | X86MappingInsnOp.inc | 28 { /* X86_ADC16mi, X86_INS_ADC: adc{w} $dst, $src */ 32 { /* X86_ADC16mi8, X86_INS_ADC: adc{w} $dst, $src */ 36 { /* X86_ADC16mr, X86_INS_ADC: adc{w} $dst, $src */ 56 { /* X86_ADC16rr_REV, X86_INS_ADC: adc{w} $dst, $src2 */ 64 { /* X86_ADC32mi, X86_INS_ADC: adc{l} $dst, $src */ 68 { /* X86_ADC32mi8, X86_INS_ADC: adc{l} $dst, $src */ 72 { /* X86_ADC32mr, X86_INS_ADC: adc{l} $dst, $src */ 92 { /* X86_ADC32rr_REV, X86_INS_ADC: adc{l} $dst, $src2 */ 100 { /* X86_ADC64mi32, X86_INS_ADC: adc{q} $dst, $src */ 104 { /* X86_ADC64mi8, X86_INS_ADC: adc{q} $dst, $src */ [all …]
|
/external/tensorflow/tensorflow/core/profiler/convert/ |
D | op_metrics_db_combiner.cc | 31 void CombinePrecisionStats(const PrecisionStats& src, PrecisionStats* dst) { in CombinePrecisionStats() argument 32 dst->set_compute_16bit_ps(src.compute_16bit_ps() + dst->compute_16bit_ps()); in CombinePrecisionStats() 33 dst->set_compute_32bit_ps(src.compute_32bit_ps() + dst->compute_32bit_ps()); in CombinePrecisionStats() 38 void CopyOpMetricsMetadata(const OpMetrics& src, OpMetrics* dst) { in CopyOpMetricsMetadata() argument 39 DCHECK(dst != nullptr); in CopyOpMetricsMetadata() 40 DCHECK_EQ(src.hlo_module_id(), dst->hlo_module_id()); in CopyOpMetricsMetadata() 41 DCHECK_EQ(src.name(), dst->name()); in CopyOpMetricsMetadata() 42 if (dst->long_name().empty()) { in CopyOpMetricsMetadata() 43 dst->set_long_name(src.long_name()); in CopyOpMetricsMetadata() 45 if (dst->category().empty()) { in CopyOpMetricsMetadata() [all …]
|
/external/deqp/external/vulkancts/framework/vulkan/generated/vulkansc/ |
D | vkSupportedExtensions.inl | 7 void getCoreDeviceExtensionsImpl (uint32_t coreVersion, ::std::vector<const char*>& dst) argument 11 dst.push_back("VK_EXT_descriptor_indexing"); 12 dst.push_back("VK_EXT_host_query_reset"); 13 dst.push_back("VK_EXT_sampler_filter_minmax"); 14 dst.push_back("VK_EXT_scalar_block_layout"); 15 dst.push_back("VK_EXT_separate_stencil_usage"); 16 dst.push_back("VK_EXT_shader_viewport_index_layer"); 17 dst.push_back("VK_KHR_8bit_storage"); 18 dst.push_back("VK_KHR_buffer_device_address"); 19 dst.push_back("VK_KHR_create_renderpass2"); [all …]
|
/external/libvpx/vpx_dsp/loongarch/ |
D | vpx_convolve_copy_lsx.c | 16 uint8_t *dst, int32_t dst_stride, int32_t height) { in copy_width8_lsx() argument 34 __lsx_vstelm_d(src0, dst, 0, 0); in copy_width8_lsx() 35 dst += dst_stride; in copy_width8_lsx() 36 __lsx_vstelm_d(src1, dst, 0, 0); in copy_width8_lsx() 37 dst += dst_stride; in copy_width8_lsx() 38 __lsx_vstelm_d(src2, dst, 0, 0); in copy_width8_lsx() 39 dst += dst_stride; in copy_width8_lsx() 40 __lsx_vstelm_d(src3, dst, 0, 0); in copy_width8_lsx() 41 dst += dst_stride; in copy_width8_lsx() 43 __lsx_vstelm_d(src4, dst, 0, 0); in copy_width8_lsx() [all …]
|
/external/google-benchmark/test/ |
D | benchmark_gtest.cc | 12 std::vector<int> dst; in TEST() local 13 AddRange(&dst, 1, 2, 2); in TEST() 14 EXPECT_THAT(dst, testing::ElementsAre(1, 2)); in TEST() 18 std::vector<int64_t> dst; in TEST() local 19 AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2); in TEST() 20 EXPECT_THAT(dst, testing::ElementsAre(1, 2)); in TEST() 24 std::vector<int> dst; in TEST() local 25 AddRange(&dst, 5, 15, 2); in TEST() 26 EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); in TEST() 30 std::vector<int64_t> dst; in TEST() local [all …]
|
/external/mesa3d/src/gallium/auxiliary/rtasm/ |
D | rtasm_x86sse.c | 288 struct x86_reg dst, in emit_op_modrm() argument 291 switch (dst.mod) { in emit_op_modrm() 294 emit_modrm(p, dst, src); in emit_op_modrm() 301 emit_modrm(p, src, dst); in emit_op_modrm() 455 void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm ) in x86_mov_reg_imm() argument 457 DUMP_RI( dst, imm ); in x86_mov_reg_imm() 458 assert(dst.file == file_REG32); in x86_mov_reg_imm() 459 assert(dst.mod == mod_REG); in x86_mov_reg_imm() 460 emit_1ub(p, 0xb8 + dst.idx); in x86_mov_reg_imm() 464 void x86_mov_imm( struct x86_function *p, struct x86_reg dst, int imm ) in x86_mov_imm() argument [all …]
|
/external/protobuf/objectivec/Tests/ |
D | GPBMessageTests+Merge.m | 54 Message3 *dst = [[Message3 alloc] init]; 58 dst.optionalInt32 = 1; 59 dst.optionalInt64 = 1; 60 dst.optionalUint32 = 1; 61 dst.optionalUint64 = 1; 62 dst.optionalSint32 = 1; 63 dst.optionalSint64 = 1; 64 dst.optionalFixed32 = 1; 65 dst.optionalFixed64 = 1; 66 dst.optionalSfixed32 = 1; [all …]
|
/external/cronet/third_party/protobuf/objectivec/Tests/ |
D | GPBMessageTests+Merge.m | 54 Message3 *dst = [[Message3 alloc] init]; 58 dst.optionalInt32 = 1; 59 dst.optionalInt64 = 1; 60 dst.optionalUint32 = 1; 61 dst.optionalUint64 = 1; 62 dst.optionalSint32 = 1; 63 dst.optionalSint64 = 1; 64 dst.optionalFixed32 = 1; 65 dst.optionalFixed64 = 1; 66 dst.optionalSfixed32 = 1; [all …]
|
/external/deqp/framework/referencerenderer/ |
D | rrVertexAttrib.cpp | 60 inline void readOrder (typename tcu::Vector<DstScalarType, 4>& dst, const int size, const void* ptr) in readOrder() argument 65 dst[Order::T0] = DstScalarType(aligned[0]); in readOrder() 66 if (size >= 2) dst[Order::T1] = DstScalarType(aligned[1]); in readOrder() 67 if (size >= 3) dst[Order::T2] = DstScalarType(aligned[2]); in readOrder() 68 if (size >= 4) dst[Order::T3] = DstScalarType(aligned[3]); in readOrder() 72 inline void readUnormOrder (tcu::Vec4& dst, const int size, const void* ptr) in readUnormOrder() argument 79 dst[Order::T0] = float(aligned[0]) / float(range); in readUnormOrder() 80 if (size >= 2) dst[Order::T1] = float(aligned[1]) / float(range); in readUnormOrder() 81 if (size >= 3) dst[Order::T2] = float(aligned[2]) / float(range); in readUnormOrder() 82 if (size >= 4) dst[Order::T3] = float(aligned[3]) / float(range); in readUnormOrder() [all …]
|
/external/golang-protobuf/internal/impl/ |
D | merge_gen.go | 11 func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { argument 12 *dst.Bool() = *src.Bool() 15 func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { argument 18 *dst.Bool() = v 22 func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { argument 26 *dst.BoolPtr() = &v 30 func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { argument 31 ds := dst.BoolSlice() 36 func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { argument 37 *dst.Int32() = *src.Int32() [all …]
|
/external/arm-optimized-routines/string/arm/ |
D | memcpy.S | 64 #define dst ip macro 86 vstr \vreg, [dst, #\base] 88 vstr d0, [dst, #\base + 8] 90 vstr d1, [dst, #\base + 16] 92 vstr d2, [dst, #\base + 24] 94 vstr \vreg, [dst, #\base + 32] 96 vstr d0, [dst, #\base + 40] 98 vstr d1, [dst, #\base + 48] 100 vstr d2, [dst, #\base + 56] 105 vstr \vreg, [dst, #\base] [all …]
|
/external/libvpx/vpx_dsp/arm/ |
D | intrapred_neon.c | 26 static INLINE void dc_store_4x4(uint8_t *dst, ptrdiff_t stride, in dc_store_4x4() argument 30 for (i = 0; i < 4; ++i, dst += stride) { in dc_store_4x4() 31 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(dc_dup), 0); in dc_store_4x4() 35 void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_predictor_4x4_neon() argument 45 dc_store_4x4(dst, stride, dc); in vpx_dc_predictor_4x4_neon() 48 void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_left_predictor_4x4_neon() argument 53 dc_store_4x4(dst, stride, dc); in vpx_dc_left_predictor_4x4_neon() 56 void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_top_predictor_4x4_neon() argument 61 dc_store_4x4(dst, stride, dc); in vpx_dc_top_predictor_4x4_neon() 64 void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, in vpx_dc_128_predictor_4x4_neon() argument [all …]
|
/external/pdfium/core/fxcrt/ |
D | span_util_unittest.cpp | 12 std::vector<char> dst(4, 'B'); in TEST() local 13 fxcrt::spanset(pdfium::make_span(dst).first(2), 'A'); in TEST() 14 EXPECT_EQ(dst[0], 'A'); in TEST() 15 EXPECT_EQ(dst[1], 'A'); in TEST() 16 EXPECT_EQ(dst[2], 'B'); in TEST() 17 EXPECT_EQ(dst[3], 'B'); in TEST() 21 std::vector<char> dst(4, 'B'); in TEST() local 22 fxcrt::spanset(pdfium::make_span(dst).subspan(4), 'A'); in TEST() 23 EXPECT_EQ(dst[0], 'B'); in TEST() 24 EXPECT_EQ(dst[1], 'B'); in TEST() [all …]
|
/external/libvpx/vpx_dsp/ppc/ |
D | intrapred_vsx.c | 14 void vpx_v_predictor_16x16_vsx(uint8_t *dst, ptrdiff_t stride, in vpx_v_predictor_16x16_vsx() argument 20 for (i = 0; i < 16; i++, dst += stride) { in vpx_v_predictor_16x16_vsx() 21 vec_vsx_st(d, 0, dst); in vpx_v_predictor_16x16_vsx() 25 void vpx_v_predictor_32x32_vsx(uint8_t *dst, ptrdiff_t stride, in vpx_v_predictor_32x32_vsx() argument 32 for (i = 0; i < 32; i++, dst += stride) { in vpx_v_predictor_32x32_vsx() 33 vec_vsx_st(d0, 0, dst); in vpx_v_predictor_32x32_vsx() 34 vec_vsx_st(d1, 16, dst); in vpx_v_predictor_32x32_vsx() 42 void vpx_h_predictor_4x4_vsx(uint8_t *dst, ptrdiff_t stride, 52 vec_vsx_st(vec_sel(v0, vec_vsx_ld(0, dst), (uint8x16_t)mask4), 0, dst); 53 dst += stride; [all …]
|