/external/tensorflow/tensorflow/python/kernel_tests/ |
D | regex_replace_op_test.py | 54 input_vector = constant_op.constant(values, dtypes.string) 55 stripped = op(input_vector, "^(a:|b:)", "", replace_global=False) 63 input_vector = constant_op.constant(values, dtypes.string) 64 stripped = op(input_vector, "a.*a", "(\\0)") 71 input_vector = constant_op.constant(values, dtypes.string) 72 stripped = op(input_vector, "", "x") 79 input_vector = constant_op.constant(values, dtypes.string) 81 replace = op(input_vector, invalid_pattern, "x") 89 input_vector = constant_op.constant(values, dtypes.string) 90 stripped = op(input_vector, "ab", "abc", True) [all …]
|
D | unicode_script_op_test.py | 41 input_vector = constant_op.constant(inputs, dtypes.int32) 42 outputs = string_ops.unicode_script(input_vector).eval() 56 input_vector = constant_op.constant(inputs, dtypes.int32) 57 outputs = string_ops.unicode_script(input_vector).eval()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | propagator_debug_utils.cc | 43 void DumpPendingNodeState(const NodeItem& node_item, const Entry* input_vector, in DumpPendingNodeState() argument 49 const Entry& input = input_vector[input_base + i]; in DumpPendingNodeState() 62 const Entry& input = input_vector[input_base + i]; in DumpPendingNodeState() 75 void DumpActiveNodeState(const NodeItem& node_item, const Entry* input_vector) { in DumpActiveNodeState() argument 79 const Entry& input = input_vector[input_base + i]; in DumpActiveNodeState()
|
D | propagator_debug_utils.h | 29 void DumpPendingNodeState(const NodeItem& node_item, const Entry* input_vector, 34 void DumpActiveNodeState(const NodeItem& node_item, const Entry* input_vector);
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | reverse_test.cc | 83 std::vector<float> input_vector( in TEST_P() local 85 std::iota(input_vector.begin(), input_vector.end(), 0.0); in TEST_P() 86 auto r1_literal = LiteralUtil::CreateR1<float>(input_vector); in TEST_P()
|
D | reduce_window_test.cc | 509 std::vector<float> input_vector(128 * 9, 1); in XLA_TEST_P() local 511 LiteralUtil::CreateR1<float>(input_vector), &builder_); in XLA_TEST_P() 520 std::vector<float> input_vector{ in XLA_TEST_P() local 530 LiteralUtil::CreateR1<float>(input_vector), &builder_); in XLA_TEST_P() 537 std::vector<float> input_vector{ in XLA_TEST_P() local 547 LiteralUtil::CreateR1<float>(input_vector), &builder_); in XLA_TEST_P() 1448 std::vector<float> input_vector(param.base_bounds[0]); in XLA_TEST_P() local 1449 std::iota(std::begin(input_vector), std::end(input_vector), 0); in XLA_TEST_P() 1451 LiteralUtil::CreateR1(absl::Span<const float>(input_vector)); in XLA_TEST_P() 1479 /*operand=*/absl::Span<const float>(input_vector), in XLA_TEST_P()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | portable_tensor_utils.h | 279 void ReductionSumVector(const float* input_vector, float* output_vector, in ReductionSumVector() argument 281 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 285 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 287 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 291 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 293 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 297 void MeanStddevNormalization(const float* __restrict__ input_vector, in MeanStddevNormalization() argument 300 PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch); in MeanStddevNormalization()
|
D | portable_tensor_utils_impl.h | 204 void PortableReductionSumVector(const IN* input_vector, OUT* output_vector, in PortableReductionSumVector() argument 209 result += input_vector[r]; in PortableReductionSumVector() 212 input_vector += reduction_size; in PortableReductionSumVector() 217 void PortableMeanStddevNormalization(const float* __restrict__ input_vector,
|
D | portable_tensor_utils.cc | 718 void PortableMeanStddevNormalization(const float* __restrict__ input_vector, in PortableMeanStddevNormalization() argument 724 sum += input_vector[i]; in PortableMeanStddevNormalization() 729 const float diff = input_vector[i] - mean; in PortableMeanStddevNormalization() 737 output_vector[i] = (input_vector[i] - mean) * stddev_inv; in PortableMeanStddevNormalization() 739 input_vector += v_size; in PortableMeanStddevNormalization()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | sse_tensor_utils.h | 285 void ReductionSumVector(const float* input_vector, float* output_vector, in ReductionSumVector() argument 287 NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 291 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 293 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 297 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 299 SSE_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 303 void MeanStddevNormalization(const float* __restrict__ input_vector, in MeanStddevNormalization() argument 306 PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch); in MeanStddevNormalization()
|
D | neon_tensor_utils.h | 275 void ReductionSumVector(const float* input_vector, float* output_vector, in ReductionSumVector() argument 277 NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 281 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 283 PortableReductionSumVector(input_vector, output_vector, output_size, in ReductionSumVector() 287 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in ReductionSumVector() argument 289 NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size, in ReductionSumVector() 293 void MeanStddevNormalization(const float* __restrict__ input_vector, in MeanStddevNormalization() argument 296 NEON_OR_PORTABLE(MeanStddevNormalization, input_vector, output_vector, v_size, in MeanStddevNormalization()
|
D | neon_tensor_utils_impl.h | 165 void NeonReductionSumVector(const float* input_vector, float* output_vector, 168 void NeonReductionSumVector(const int8_t* input_vector, int32_t* output_vector, 176 void NeonMeanStddevNormalization(const float* __restrict__ input_vector,
|
D | neon_tensor_utils.cc | 2466 void NeonReductionSumVector(const float* input_vector, float* output_vector, in NeonReductionSumVector() argument 2477 float32x4_t v1_f32x4 = vld1q_f32(input_vector + r); in NeonReductionSumVector() 2483 sum += input_vector[r]; in NeonReductionSumVector() 2486 input_vector += reduction_size; in NeonReductionSumVector() 2490 void NeonReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in NeonReductionSumVector() argument 2500 const int8x16_t s2_8x16 = vld1q_s8(input_vector + r); in NeonReductionSumVector() 2504 const int8x8_t s2_8x8 = vld1_s8(input_vector + r); in NeonReductionSumVector() 2510 sum += input_vector[r]; in NeonReductionSumVector() 2513 input_vector += reduction_size; in NeonReductionSumVector() 2577 void NeonMeanStddevNormalization(const float* __restrict__ input_vector, in NeonMeanStddevNormalization() argument [all …]
|
D | sse_tensor_utils_impl.h | 63 void SseReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
|
D | sse_tensor_utils.cc | 417 void SseReductionSumVector(const int8_t* input_vector, int32_t* output_vector, in SseReductionSumVector() argument 421 const int8_t* __restrict__ row_ptr = input_vector + row * reduction_size; in SseReductionSumVector()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_state_ops.h | 75 std::vector<XRTTupleAllocation::ExpandedTupleInput>* input_vector, in ParseTupleNode() argument 85 tuple_node.tuples(i), input_tensor_list, input_vector, in ParseTupleNode() 91 if (input_index < 0 || input_index >= input_vector->size()) { in ParseTupleNode() 94 input_vector->size(), " inputs."); in ParseTupleNode() 98 input_vector->at(input_index); in ParseTupleNode() 127 std::vector<XRTTupleAllocation::ExpandedTupleInput>* input_vector, in ParseTupleTree() argument 135 input_vector, &tuple_tree_shape, rm)); in ParseTupleTree() 153 *element = input_vector->at(input_index); in ParseTupleTree() 442 std::vector<XRTTupleAllocation::ExpandedTupleInput> input_vector( in Compute() 454 tuple_proto, arg_list, &input_vector, in Compute() [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | tensor_utils_common.h | 438 void ReductionSumVector(const float* input_vector, float* output_vector, 442 void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, 446 void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, 450 void MeanStddevNormalization(const float* __restrict__ input_vector,
|
/external/webrtc/modules/audio_coding/neteq/ |
D | merge.cc | 60 AudioMultiVector input_vector(num_channels_); in Process() local 61 input_vector.PushBackInterleaved( in Process() 63 size_t input_length_per_channel = input_vector.Size(); in Process() 73 input_vector[channel].CopyTo(input_length_per_channel, 0, in Process()
|
/external/webrtc/modules/audio_processing/agc2/rnn_vad/ |
D | rnn_unittest.cc | 33 rtc::ArrayView<const float> input_vector, in TestFullyConnectedLayer() argument 36 fc->ComputeOutput(input_vector); in TestFullyConnectedLayer()
|
/external/llvm-project/llvm/docs/ |
D | LangRef.rst | 15792 float sequential_fadd(start_value, input_vector) 15794 for i = 0 to length(input_vector) 15795 result = result + input_vector[i] 15866 float sequential_fmul(start_value, input_vector) 15868 for i = 0 to length(input_vector) 15869 result = result * input_vector[i]
|