/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_input_output_alias_config.cc | 23 const ShapeIndex& output_index) const { in OutputHasAlias() 24 return alias_.element(output_index).has_value(); in OutputHasAlias() 27 Status HloInputOutputAliasConfig::SetUpAlias(const ShapeIndex& output_index, in SetUpAlias() argument 33 TF_RET_CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index)) in SetUpAlias() 34 << "Trying to set up alias at " << output_index.ToString() in SetUpAlias() 38 TF_RET_CHECK(!OutputHasAlias(output_index)) in SetUpAlias() 39 << "Output index " << output_index << " already has an alias setup"; in SetUpAlias() 41 TF_RET_CHECK(!alias_.element(output_index)) << absl::StrFormat( in SetUpAlias() 44 param_number, param_index.ToString(), output_index.ToString(), in SetUpAlias() 45 alias_.element(output_index)->parameter_number, in SetUpAlias() [all …]
|
D | hlo_input_output_alias_config.h | 66 Status SetUpAlias(const ShapeIndex& output_index, int64 param_number, 82 bool OutputHasAlias(const ShapeIndex& output_index) const; 101 const ShapeIndex& output_index) const; 104 std::function<void(const ShapeIndex& output_index, const Alias&)>; 110 std::function<Status(const ShapeIndex& output_index, const Alias&)>;
|
D | hlo_input_output_alias_config_test.cc | 37 void expect_aliased(const ShapeIndex& output_index, int64 param_number, in expect_aliased() argument 44 EXPECT_EQ(aliased_output.value(), output_index); in expect_aliased() 47 config.GetAliasedParameter(output_index); in expect_aliased() 54 void expect_not_aliased(const ShapeIndex& output_index, int64 param_number, in expect_not_aliased() argument 60 EXPECT_FALSE(aliased_output && aliased_output == output_index); in expect_not_aliased() 63 config.GetAliasedParameter(output_index); in expect_not_aliased()
|
D | optimize_input_output_buffer_alias.cc | 80 const ShapeIndex& output_index = index; in Build() local 82 !alias_config->OutputHasAlias(output_index)) { in Build() 84 output_index, entry.param_number, input_index, in Build()
|
/external/tensorflow/tensorflow/java/src/main/native/ |
D | graph_operation_jni.cc | 83 jint output_index) { in Java_org_tensorflow_GraphOperation_shape() argument 90 if (output_index < 0 || output_index >= num_outputs) { in Java_org_tensorflow_GraphOperation_shape() 94 output_index, num_outputs); in Java_org_tensorflow_GraphOperation_shape() 98 TF_Output output{op, output_index}; in Java_org_tensorflow_GraphOperation_shape() 134 jint output_index) { in Java_org_tensorflow_GraphOperation_dtype() argument 141 if (output_index < 0 || output_index >= num_outputs) { in Java_org_tensorflow_GraphOperation_dtype() 145 output_index, num_outputs); in Java_org_tensorflow_GraphOperation_dtype() 149 return static_cast<jint>(TF_OperationOutputType(TF_Output{op, output_index})); in Java_org_tensorflow_GraphOperation_dtype()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/data/vectorization/ |
D | wrapped_tensor.h | 27 const int output_index; member 37 WrappedTensor(Node* node, int output_index, bool stacked) in WrappedTensor() 38 : node(node), output_index(output_index), stacked(stacked) {} in WrappedTensor()
|
/external/libtextclassifier/native/utils/ |
D | tflite-model-executor.h | 116 TensorView<T> OutputView(const int output_index, in OutputView() argument 119 interpreter->tensor(interpreter->outputs()[output_index]); in OutputView() 120 return TensorView<T>(interpreter->typed_output_tensor<T>(output_index), in OutputView() 127 std::vector<T> Output(const int output_index, in Output() argument 129 TensorView<T> output_view = OutputView<T>(output_index, interpreter); in Output() 151 const int output_index, const tflite::Interpreter* interpreter) const; 155 const int output_index, const tflite::Interpreter* interpreter) const;
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | parse_example_dataset_op.cc | 349 int output_index = in MapFunc() local 352 output_index); in MapFunc() 353 (*output)[output_index] = example_result.dense_values[d]; in MapFunc() 356 int output_index = in MapFunc() local 358 (*output)[output_index] = in MapFunc() 360 Tensor& serialized_sparse = (*output)[output_index]; in MapFunc() 365 CheckOutputTensor(serialized_sparse, d, output_index); in MapFunc() 368 int output_index = in MapFunc() local 370 (*output)[output_index] = in MapFunc() 377 (*output)[output_index] = in MapFunc() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | reduce_join_op.cc | 50 int64 output_index, const gtl::InlinedVector<int32, 8>& dim_list, in LinearSubIndexToFullIndex() argument 54 int64 quotient = output_index; in LinearSubIndexToFullIndex() 164 for (int64 output_index = 0; output_index < output_shape.num_elements(); in Compute() local 165 ++output_index) { in Compute() 167 output_index, unreduced_indices, input_shape, strides); in Compute() 175 output_flat(output_index) = absl::StrJoin(curr_strings, separator_); in Compute()
|
D | dynamic_stitch_op_gpu.cu.cc | 39 GPU_1D_KERNEL_LOOP(output_index, output_size) { in DynamicStitchKernel() 40 const int32 slice_id = output_index / slice_size; in DynamicStitchKernel() 41 const int32 slice_offset = output_index % slice_size; in DynamicStitchKernel() 44 output[output_index] = ldg(data_ptrs[input_index] + slice_offset); in DynamicStitchKernel()
|
D | segment_reduction_ops_gpu.cu.cc | 80 const Index output_index = in SortedSegmentSumCustomKernel() local 85 GpuAtomicAdd(output + output_index, sum); in SortedSegmentSumCustomKernel() 87 *(output + output_index) = sum; in SortedSegmentSumCustomKernel() 98 const Index output_index = in SortedSegmentSumCustomKernel() local 100 GpuAtomicAdd(output + output_index, sum); in SortedSegmentSumCustomKernel() 121 const int64 output_index = in UnsortedSegmentCustomKernel() local 123 KernelReductionFunctor()(output + output_index, ldg(input + input_index)); in UnsortedSegmentCustomKernel()
|
D | ops_testutil.cc | 185 Tensor* OpsTestBase::GetOutput(int output_index) { in GetOutput() argument 186 CHECK_LT(output_index, context_->num_outputs()); in GetOutput() 187 Tensor* output = context_->mutable_output(output_index); in GetOutput() 192 if (!managed_outputs_[output_index]) { in GetOutput() 200 managed_outputs_[output_index] = managed_output; in GetOutput() 202 output = managed_outputs_[output_index]; in GetOutput()
|
D | ragged_tensor_to_tensor_op.cc | 369 vector<INDEX_TYPE> output_index, new_output_index; in Compute() local 371 output_index.reserve(nvals); in Compute() 375 output_size[0], &output_index); in Compute() 378 context, i - 1, output_index, multiplier[i], in Compute() 380 output_index.swap(new_output_index); in Compute() 384 SetOutput(context, ragged_rank_, output_index, output_tensor); in Compute() 388 const vector<INDEX_TYPE>& output_index, 440 const vector<INDEX_TYPE>& output_index, in SetOutput() argument 456 size_t output_index_size = output_index.size(); in SetOutput() 491 INDEX_TYPE dst_i = src_i < output_index_size ? output_index[src_i] : -1; in SetOutput()
|
D | dynamic_partition_op.cc | 100 gtl::InlinedVector<int, 32> output_index(num_partitions_); in Compute() local 117 auto oi = output_index[p]; in Compute() 123 output_index[p]++; in Compute() 147 auto oi = output_index[p]; in Compute() 155 output_index[p]++; in Compute()
|
D | unsorted_segment_join_op.cc | 139 auto output_index = start_offset + flat_segment_id(i) * big_stride; in Compute() local 141 if (output_flat(output_index).length() != 0) in Compute() 142 output_flat(output_index).append(separator_.c_str()); in Compute() 143 output_flat(output_index).append(flat_input(offset)); in Compute()
|
D | conv_2d_gpu.h | 198 GPU_1D_KERNEL_LOOP(output_index, nthreads) { 199 Index<3> output_tensor_index = FlatToTensorIndex(output_index, output_dims); 208 output[output_index] = 348 int output_index = output_origin_flat_index + ti * output_dims[2] + tj; 354 output[output_index] = shared_memory_tile[tj][i_loc]; 355 output_index += output_increment; 360 output[output_index] = shared_memory_tile[tj][i_loc]; 361 output_index += output_increment; 378 int output_index = index; 380 FlatToTensorIndex(output_index, output_dims); [all …]
|
/external/python/cffi/c/ |
D | parse_c_type.c | 54 size_t output_index; member 211 size_t index = tok->output_index; in write_ds() 217 tok->output_index = index + 1; in write_ds() 287 x = tok->output_index; in parse_sequel() 313 *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); in parse_sequel() 314 p_current = tok->output + tok->output_index; in parse_sequel() 369 *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); in parse_sequel() 370 p_current = tok->output + tok->output_index; in parse_sequel() 813 int parse_c_type_from(struct _cffi_parse_info_s *info, size_t *output_index, in parse_c_type_from() argument 825 token.output_index = *output_index; in parse_c_type_from() [all …]
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
D | insert_logging.cc | 82 int32 output_index; in InsertLogging() local 83 if (!strings::safe_strto32(output_index_string, &output_index)) { in InsertLogging() 87 node_outputs[name].insert(output_index); in InsertLogging() 131 for (int output_index : node_outputs[node.name()]) { in InsertLogging() local 132 print_node->add_input(strings::StrCat(node.name(), ":", output_index)); in InsertLogging()
|
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/ |
D | node_info_delegate_test.cc | 146 for (size_t output_index = 0; output_index < info.outputs.size(); in TEST() local 147 output_index++) { in TEST() 149 interpreter->tensor(tflite_node->outputs->data[output_index]); in TEST() 152 subgraph_tensors->Get(info.outputs[output_index])->name()->str()); in TEST()
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/ |
D | tensorrt_test.cc | 107 const int output_index = engine.getBindingIndex(kOutputTensor); in Execute() local 112 ASSERT_EQ(0, cudaMalloc(&buffers[output_index], sizeof(float))); in Execute() 124 ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float), in Execute() 131 ASSERT_EQ(0, cudaFree(buffers[output_index])); in Execute()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | prelu.h | 47 int output_index = Offset(extended_output_shape, b, y, x, c); in BroadcastPrelu4DSlow() local 52 output_data[output_index] = input_data[input_index]; in BroadcastPrelu4DSlow() 66 output_data[output_index] = static_cast<uint8>(clamped_output); in BroadcastPrelu4DSlow()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | scoped_allocator_optimizer.cc | 74 const std::vector<OpInfo::TensorProperties>& output_props, int output_index, in GetOutputDataType() argument 76 if (output_index >= output_props.size()) { in GetOutputDataType() 77 return errors::Internal("Invalid output index ", output_index, in GetOutputDataType() 80 *dtype = output_props[output_index].dtype(); in GetOutputDataType() 214 int output_index, NodeDef* op, NodeDef** new_input, in MaybeRewriteInput() argument 220 *new_output_index = output_index; in MaybeRewriteInput() 234 NodeDefBuilder::NodeOut(input->name(), output_index, dtype)); in MaybeRewriteInput() 244 << " old output index " << output_index << " with identity " in MaybeRewriteInput() 259 int output_index = 0; in GetInputs() local 268 ParseNodeName(input_name, &output_index); in GetInputs() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | graph_compiler_util.cc | 59 const int output_index = 0; in AddArgNodes() local 83 .Attr("T", BaseType(feed_node->output_type(output_index))) in AddArgNodes() 97 if (edge->src_output() == output_index) { in AddArgNodes() 121 if (id.output_index() >= fetch_node->num_outputs()) { in AddRetvalNodes() 131 .Input(fetch_node, id.output_index()) in AddRetvalNodes() 132 .Attr("T", BaseType(fetch_node->output_type(id.output_index()))) in AddRetvalNodes()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | optional_ops.cc | 138 Status WriteOptionalWithValueToOutput(OpKernelContext* ctx, int output_index, in WriteOptionalWithValueToOutput() argument 144 TF_RETURN_IF_ERROR(ctx->allocate_output(output_index, TensorShape({}), in WriteOptionalWithValueToOutput() 150 Status WriteOptionalNoneToOutput(OpKernelContext* ctx, int output_index) { in WriteOptionalNoneToOutput() argument 155 TF_RETURN_IF_ERROR(ctx->allocate_output(output_index, TensorShape({}), in WriteOptionalNoneToOutput()
|
/external/tensorflow/tensorflow/lite/python/ |
D | op_hint.py | 493 def aggregate_and_return_name_for_output(self, fused_op_name, output_index, argument 508 del fused_op_name, output_index, out_graphdef 613 def aggregate_and_return_name_for_output(self, fused_op_name, output_index, argument 637 fused_op_name, output_index, out_graphdef) 646 fused_op_name, output_index)) 894 def _tensorflow_output_name(tensor_name, output_index): argument 895 return tensor_name if output_index == 0 else "%s:%d" % (tensor_name, 896 output_index) 1013 for output_index in sorted_output_indices: 1014 output = call.outputs[output_index] [all …]
|