/external/swiftshader/third_party/astc-encoder/Source/ |
D | astc_color_unquantize.cpp | 28 uint4* output0, in rgb_delta_unpack() argument 127 output0->x = r0e; in rgb_delta_unpack() 128 output0->y = g0e; in rgb_delta_unpack() 129 output0->z = b0e; in rgb_delta_unpack() 130 output0->w = 0xFF; in rgb_delta_unpack() 143 uint4* output0, in rgb_unpack() argument 161 output0->x = ri1b; in rgb_unpack() 162 output0->y = gi1b; in rgb_unpack() 163 output0->z = bi1b; in rgb_unpack() 164 output0->w = 255; in rgb_unpack() [all …]
|
D | astc_codec_internals.h | 376 uint4* output0,
|
/external/XNNPACK/src/f32-dwconv-spchw/ |
D | 3x3s2p1-scalar.c | 37 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__scalar() local 76 *output0 = voutput; output0 = (float *) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__scalar() 93 *output0 = voutput; in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__scalar() 99 output0 = (float*) ((uintptr_t) output0 + output_width_increment); in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__scalar()
|
D | 3x3p1-scalar.c | 37 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_3x3p1__scalar() local 79 *output0 = voutput; output0 = (float *) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_3x3p1__scalar() 93 *output0 = voutput; in xnn_f32_dwconv_spchw_ukernel_3x3p1__scalar() 99 output0 = (float*) ((uintptr_t) output0 + output_width_increment); in xnn_f32_dwconv_spchw_ukernel_3x3p1__scalar()
|
D | 5x5p2-scalar.c | 39 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar() local 139 *output0 = voutput; output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar() 168 *output0 = voutput; output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar() 184 *output0 = voutput;; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar() 192 output0 = (float*) ((uintptr_t) output0 + output_width_increment_single); in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
|
D | 3x3p1-neonfma.c | 44 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() local 45 float* output1 = (float *)((uintptr_t)output0 + output_width_stride); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 147 vst1q_f32(output0, vo0); output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 226 vst1q_f32(output0, vo0); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 230 float* output0_lo = output0; in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 257 output0 = (float*) ((uintptr_t) output0 + output_width_increment); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 313 vst1q_f32(output0, vo); output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 353 vst1q_f32(output0, vo); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 355 float* output0_lo = output0; in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma() 370 output0 = (float*) ((uintptr_t) output0 + output_width_increment_single); in xnn_f32_dwconv_spchw_ukernel_3x3p1__neonfma()
|
D | 5x5s2p2-scalar.c | 39 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar() local 128 *output0 = voutput; output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar() 148 *output0 = voutput; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar() 162 *output0 = voutput; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar() 170 output0 = (float*) ((uintptr_t) output0 + output_width_increment); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
|
D | 5x5s2p2-neonfma.c | 43 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() local 188 vst1q_f32(output0, vo0); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() 189 output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() 191 float* output0_lo = output0; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() 352 vst1q_f32(output0, vo0); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() 353 output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() 355 float* output0_lo = output0; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma() 372 output0 = (float*) ((uintptr_t) output0 + output_width_increment_single); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma()
|
D | 5x5p2-neonfma.c | 44 float* output0 = output; in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() local 45 float* output1 = (float *)((uintptr_t) output0 + output_width_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 246 vst1q_f32(output0, vo0); output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 435 vst1q_f32(output0, vo0); output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 604 vst1q_f32(output0, vo0); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 608 float* output0_lo = output0; in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 637 output0 = (float*) ((uintptr_t) output2 + output_width_increment_single); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 638 output1 = (float*) ((uintptr_t) output0 + output_width_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 795 vst1q_f32(output0, vo0); output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() 946 vst1q_f32(output0, vo0); output0 = (float*) ((uintptr_t) output0 + output_tuple_stride); in xnn_f32_dwconv_spchw_ukernel_5x5p2__neonfma() [all …]
|
/external/snakeyaml/src/test/java/org/yaml/snakeyaml/issues/issue124/ |
D | DumpTest.java | 30 String output0 = yaml.dump(bean); in testDumperOptionsSideEffect() local 33 output0); in testDumperOptionsSideEffect() 39 assertEquals("Yaml.dumpAs() should not have any side effects.", output0, output2); in testDumperOptionsSideEffect()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/ |
D | input_output_names_attr.mlir | 7 // CHECK: attributes {tf.entry_function = {inputs = "input0,input1", outputs = "output0,output1"}} 8 attributes {tf.entry_function = {inputs = "input0,input1", outputs = "output0,output1"}} {
|
/external/tensorflow/tensorflow/lite/micro/kernels/ |
D | unpack.cc | 36 const TfLiteTensor* output0 = &context->tensors[node->outputs->data[0]]; in UnpackImpl() local 38 const TfLiteIntArray* output_dims = output0->dims; in UnpackImpl()
|
D | split.cc | 35 const TfLiteTensor* output0 = &context->tensors[node->outputs->data[0]]; in SplitImpl() local 36 const TfLiteIntArray* output_dims = output0->dims; in SplitImpl()
|
/external/XNNPACK/src/q8-dwconv/ |
D | up8x9-neon.c | 52 uint8_t* output0 = output; in xnn_q8_dwconv_ukernel_up8x9__neon() local 53 uint8_t* output1 = output0 + channels + output_increment; in xnn_q8_dwconv_ukernel_up8x9__neon() 214 vst1_u8(output0, vout0); output0 += 8; in xnn_q8_dwconv_ukernel_up8x9__neon() 373 … vst1_lane_u32(__builtin_assume_aligned(output0, 1), vreinterpret_u32_u8(vout0), 0); output0 += 4; in xnn_q8_dwconv_ukernel_up8x9__neon() 381 … vst1_lane_u16(__builtin_assume_aligned(output0, 1), vreinterpret_u16_u8(vout0), 0); output0 += 2; in xnn_q8_dwconv_ukernel_up8x9__neon() 389 vst1_lane_u8(__builtin_assume_aligned(output0, 1), vout0, 0); output0++; in xnn_q8_dwconv_ukernel_up8x9__neon()
|
/external/deqp/external/vulkancts/modules/vulkan/shaderexecutor/ |
D | vktAtomicOperationTests.cpp | 270 Expected (T inout, T output0, T output1) in Expected() 273 m_output[0] = output0; in Expected() 277 bool compare (T inout, T output0, T output1) in compare() 280 && deMemCmp((const void*)&m_output[0], (const void*)&output0, sizeof(output0)) == 0 in compare()
|
/external/XNNPACK/src/f32-conv-hwc2spchw/ |
D | 3x3s2p1c3x4-neonfma-2x2.c | 45 float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start); in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__neonfma_2x2() local 46 float* output1 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__neonfma_2x2() 68 output1 = output0; in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__neonfma_2x2() 73 float* o0c0 = output0; in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__neonfma_2x2() 645 output0 = (float*) ((uintptr_t) output1 + output_height_stride); in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__neonfma_2x2() 646 output1 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__neonfma_2x2()
|
D | 3x3s2p1c3x4-scalar-1x1.c | 41 float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start); in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__scalar_1x1() local 58 float* o0c0 = output0; in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__scalar_1x1() 663 output0 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc2spchw_ukernel_3x3s2p1c3x4__scalar_1x1()
|
/external/XNNPACK/src/f32-conv-hwc/ |
D | 3x3s2p1c3x4-neonfma-2x2.c | 45 float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2() local 46 float* output1 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2() 68 output1 = output0; in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2() 73 float* o0 = output0; in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2() 659 output0 = (float*) ((uintptr_t) output1 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2() 660 output1 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neonfma_2x2()
|
D | 3x3s2p0p1c3x4-scalar-1x1.c | 42 float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start); in xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__scalar_1x1() local 59 float* o0 = output0; in xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__scalar_1x1() 678 output0 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__scalar_1x1()
|
D | 3x3s2p1c3x4-scalar-1x1.c | 42 float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__scalar_1x1() local 59 float* o0 = output0; in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__scalar_1x1() 678 output0 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__scalar_1x1()
|
D | 3x3s2p1c3x8-neonfma-2x2.c | 45 float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2() local 46 float* output1 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2() 68 output1 = output0; in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2() 73 float* o0 = output0; in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2() 1016 output0 = (float*) ((uintptr_t) output1 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2() 1017 output1 = (float*) ((uintptr_t) output0 + output_height_stride); in xnn_f32_conv_hwc_ukernel_3x3s2p1c3x8__neonfma_2x2()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/ |
D | graph-function-defs.pbtxt | 313 name: "output0" 360 input: "output0"
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | fake_quantize_ops.cc | 308 xla::XlaOp output0 = xla::Select(between_nudged_min_max, gradient, zeroes); in Compile() local 309 ctx->SetOutput(0, output0); in Compile()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | buffer_assignment_test.cc | 2079 auto output0 = builder.AddInstruction( in TEST_F() local 2090 HloInstruction::CreateTuple({input0, weights0, output0})); in TEST_F() 2399 auto output0 = builder.AddInstruction( in TEST_F() local 2408 HloInstruction::CreateTuple({input0, weights0, output0})); in TEST_F() 2538 auto output0 = builder.AddInstruction( in TEST_F() local 2553 HloInstruction::CreateTuple({input0, weights0, output0})); in TEST_F() 2588 input0, weights0, zero, output0, while0->mutable_operand(0), while0, in TEST_F() 2616 auto output0 = builder.AddInstruction( in TEST_F() local 2627 HloInstruction::CreateTuple({input0, weights0, output0})); in TEST_F()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_dct32x32_impl_sse2.h | 2996 int16_t *output0 = &intermediate[column_start * 32]; in FDCT32x32_2D() local 3099 _mm_storeu_si128((__m128i *)(output0 + 0 * 32), tr2_0); in FDCT32x32_2D() 3100 _mm_storeu_si128((__m128i *)(output0 + 1 * 32), tr2_1); in FDCT32x32_2D() 3101 _mm_storeu_si128((__m128i *)(output0 + 2 * 32), tr2_2); in FDCT32x32_2D() 3102 _mm_storeu_si128((__m128i *)(output0 + 3 * 32), tr2_3); in FDCT32x32_2D() 3103 _mm_storeu_si128((__m128i *)(output0 + 4 * 32), tr2_4); in FDCT32x32_2D() 3104 _mm_storeu_si128((__m128i *)(output0 + 5 * 32), tr2_5); in FDCT32x32_2D() 3105 _mm_storeu_si128((__m128i *)(output0 + 6 * 32), tr2_6); in FDCT32x32_2D() 3106 _mm_storeu_si128((__m128i *)(output0 + 7 * 32), tr2_7); in FDCT32x32_2D() 3108 output0 += 8; in FDCT32x32_2D()
|