/external/tensorflow/tensorflow/python/kernel_tests/ |
D | regex_replace_op_test.py | 54 input_vector = constant_op.constant(values, dtypes.string) 55 stripped = op(input_vector, "^(a:|b:)", "", replace_global=False) 63 input_vector = constant_op.constant(values, dtypes.string) 64 stripped = op(input_vector, "a.*a", "(\\0)") 71 input_vector = constant_op.constant(values, dtypes.string) 72 stripped = op(input_vector, "", "x") 79 input_vector = constant_op.constant(values, dtypes.string) 81 replace = op(input_vector, invalid_pattern, "x") 89 input_vector = constant_op.constant(values, dtypes.string) 90 stripped = op(input_vector, "ab", "abc", True) [all …]
|
D | unicode_script_op_test.py | 41 input_vector = constant_op.constant(inputs, dtypes.int32) 42 outputs = string_ops.unicode_script(input_vector).eval() 56 input_vector = constant_op.constant(inputs, dtypes.int32) 57 outputs = string_ops.unicode_script(input_vector).eval()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | propagator_debug_utils.cc | 43 void DumpPendingNodeState(const NodeItem& node_item, const Entry* input_vector, in DumpPendingNodeState() argument 49 const Entry& input = input_vector[input_base + i]; in DumpPendingNodeState() 62 const Entry& input = input_vector[input_base + i]; in DumpPendingNodeState() 75 void DumpActiveNodeState(const NodeItem& node_item, const Entry* input_vector) { in DumpActiveNodeState() argument 79 const Entry& input = input_vector[input_base + i]; in DumpActiveNodeState()
|
D | propagator_debug_utils.h | 29 void DumpPendingNodeState(const NodeItem& node_item, const Entry* input_vector, 34 void DumpActiveNodeState(const NodeItem& node_item, const Entry* input_vector);
|
/external/armnn/src/backends/reference/workloads/ |
D | LstmUtils.cpp | 40 void MeanStddevNormalization(armnn::Decoder<float>& input_vector, in MeanStddevNormalization() argument 50 sum += input_vector.Get(); in MeanStddevNormalization() 51 sum_sq += input_vector.Get() * input_vector.Get(); in MeanStddevNormalization() 52 ++input_vector; in MeanStddevNormalization() 54 input_vector -= v_size; in MeanStddevNormalization() 66 output_vector.Set((input_vector.Get() - mean) * stddev_inv); in MeanStddevNormalization() 68 ++input_vector; in MeanStddevNormalization() 73 input_vector -= v_size * n_batch; in MeanStddevNormalization()
|
D | LstmUtils.hpp | 23 void MeanStddevNormalization(armnn::Decoder<float>& input_vector,
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | reverse_test.cc | 83 std::vector<float> input_vector( in TEST_P() local 85 std::iota(input_vector.begin(), input_vector.end(), 0.0); in TEST_P() 86 auto r1_literal = LiteralUtil::CreateR1<float>(input_vector); in TEST_P()
|
D | reduce_window_test.cc | 509 std::vector<float> input_vector(128 * 9, 1); in XLA_TEST_P() local 511 LiteralUtil::CreateR1<float>(input_vector), &builder_); in XLA_TEST_P() 520 std::vector<float> input_vector{ in XLA_TEST_P() local 530 LiteralUtil::CreateR1<float>(input_vector), &builder_); in XLA_TEST_P() 537 std::vector<float> input_vector{ in XLA_TEST_P() local 547 LiteralUtil::CreateR1<float>(input_vector), &builder_); in XLA_TEST_P() 1448 std::vector<float> input_vector(param.base_bounds[0]); in XLA_TEST_P() local 1449 std::iota(std::begin(input_vector), std::end(input_vector), 0); in XLA_TEST_P() 1451 LiteralUtil::CreateR1(absl::Span<const float>(input_vector)); in XLA_TEST_P() 1479 /*operand=*/absl::Span<const float>(input_vector), in XLA_TEST_P()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | sse_tensor_utils.h | 290 void ReductionSumVector(const float* input_vector, float* output_vector, in ReductionSumVector() argument 292 NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 296 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 298 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 302 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 304 SSE_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 308 void MeanStddevNormalization(const float* __restrict__ input_vector, in MeanStddevNormalization() argument 311 PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch); in MeanStddevNormalization()
|
D | neon_tensor_utils.h | 275 void ReductionSumVector(const float* input_vector, float* output_vector, in ReductionSumVector() argument 277 NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 281 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 283 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 287 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 289 NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 293 void MeanStddevNormalization(const float* __restrict__ input_vector, in MeanStddevNormalization() argument 296 NEON_OR_PORTABLE(MeanStddevNormalization, input_vector, output_vector, v_size, in MeanStddevNormalization()
|
D | neon_tensor_utils_impl.h | 165 void NeonReductionSumVector(const float* input_vector, float* output_vector, 168 void NeonReductionSumVector(const int8_t* input_vector, int32_t* output_vector, 176 void NeonMeanStddevNormalization(const float* __restrict__ input_vector,
|
D | neon_tensor_utils.cc | 2462 void NeonReductionSumVector(const float* input_vector, float* output_vector, in NeonReductionSumVector() argument 2473 float32x4_t v1_f32x4 = vld1q_f32(input_vector + r); in NeonReductionSumVector() 2479 sum += input_vector[r]; in NeonReductionSumVector() 2482 input_vector += reduction_size; in NeonReductionSumVector() 2486 void NeonReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in NeonReductionSumVector() argument 2496 const int8x16_t s2_8x16 = vld1q_s8(input_vector + r); in NeonReductionSumVector() 2500 const int8x8_t s2_8x8 = vld1_s8(input_vector + r); in NeonReductionSumVector() 2506 sum += input_vector[r]; in NeonReductionSumVector() 2509 input_vector += reduction_size; in NeonReductionSumVector() 2573 void NeonMeanStddevNormalization(const float* __restrict__ input_vector, in NeonMeanStddevNormalization() argument [all …]
|
D | sse_tensor_utils_impl.h | 70 void SseReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
|
D | sse_tensor_utils.cc | 510 void SseReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in SseReductionSumVector() argument 514 const int8_t* __restrict__ row_ptr = input_vector + row * reduction_size; in SseReductionSumVector()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | portable_tensor_utils.h | 279 void ReductionSumVector(const float* input_vector, float* output_vector, in ReductionSumVector() argument 281 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 285 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 287 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 291 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 293 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 297 void MeanStddevNormalization(const float* __restrict__ input_vector, in MeanStddevNormalization() argument 300 PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch); in MeanStddevNormalization()
|
D | portable_tensor_utils_impl.h | 204 void PortableReductionSumVector(const INPUT* input_vector, in PortableReductionSumVector() argument 210 result += input_vector[r]; in PortableReductionSumVector() 213 input_vector += reduction_size; in PortableReductionSumVector() 218 void PortableMeanStddevNormalization(const float* __restrict__ input_vector,
|
D | portable_tensor_utils.cc | 718 void PortableMeanStddevNormalization(const float* __restrict__ input_vector, in PortableMeanStddevNormalization() argument 724 sum += input_vector[i]; in PortableMeanStddevNormalization() 729 const float diff = input_vector[i] - mean; in PortableMeanStddevNormalization() 737 output_vector[i] = (input_vector[i] - mean) * stddev_inv; in PortableMeanStddevNormalization() 739 input_vector += v_size; in PortableMeanStddevNormalization()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_state_ops.h | 75 std::vector<XRTTupleAllocation::ExpandedTupleInput>* input_vector, in ParseTupleNode() argument 85 tuple_node.tuples(i), input_tensor_list, input_vector, in ParseTupleNode() 91 if (input_index < 0 || input_index >= input_vector->size()) { in ParseTupleNode() 94 input_vector->size(), " inputs."); in ParseTupleNode() 98 input_vector->at(input_index); in ParseTupleNode() 127 std::vector<XRTTupleAllocation::ExpandedTupleInput>* input_vector, in ParseTupleTree() argument 135 input_vector, &tuple_tree_shape, rm)); in ParseTupleTree() 153 *element = input_vector->at(input_index); in ParseTupleTree() 444 std::vector<XRTTupleAllocation::ExpandedTupleInput> input_vector( in Compute() 456 tuple_proto, arg_list, &input_vector, in Compute() [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | tensor_utils_common.h | 356 void ReductionSumVector(const float* input_vector, float* output_vector, 360 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, 364 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, 368 void MeanStddevNormalization(const float* __restrict__ input_vector,
|
/external/webrtc/modules/audio_coding/neteq/ |
D | merge.cc | 60 AudioMultiVector input_vector(num_channels_); in Process() local 61 input_vector.PushBackInterleaved( in Process() 63 size_t input_length_per_channel = input_vector.Size(); in Process() 73 input_vector[channel].CopyTo(input_length_per_channel, 0, in Process()
|
/external/ComputeLibrary/src/runtime/CL/functions/ |
D | CLLSTMLayerQuantized.cpp | 143 std::vector<const ICLTensor *> input_vector; in configure() local 144 input_vector.emplace_back(input); in configure() 145 input_vector.emplace_back(output_state_in); in configure() 149 _concat_inputs.configure(compile_context, input_vector, &_input, Window::DimX); in configure() 355 std::vector<const ITensorInfo *> input_vector; in validate() local 356 input_vector.emplace_back(input); in validate() 357 input_vector.emplace_back(output_state_in); in validate() 359 …ARM_COMPUTE_RETURN_ON_ERROR(CLConcatenateLayer::validate(input_vector, &input_concatenated, Window… in validate()
|
/external/ComputeLibrary/src/runtime/NEON/functions/ |
D | NELSTMLayerQuantized.cpp | 123 std::vector<const ITensor *> input_vector{ input, output_state_in }; in configure() local 126 _concat_inputs.configure(input_vector, &_input, Window::DimX); in configure() 326 std::vector<const ITensorInfo *> input_vector; in validate() local 327 input_vector.emplace_back(input); in validate() 328 input_vector.emplace_back(output_state_in); in validate() 330 …ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(input_vector, &input_concatenated, Window… in validate()
|
/external/webrtc/modules/audio_processing/agc2/rnn_vad/ |
D | rnn_unittest.cc | 33 rtc::ArrayView<const float> input_vector, in TestFullyConnectedLayer() argument 36 fc->ComputeOutput(input_vector); in TestFullyConnectedLayer()
|