/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_input_output_alias_config.cc | 22 const ShapeIndex& output_index) const { in OutputHasAlias() 23 return alias_.element(output_index).has_value(); in OutputHasAlias() 26 Status HloInputOutputAliasConfig::SetUpAlias(const ShapeIndex& output_index, in SetUpAlias() argument 32 TF_RET_CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index)) in SetUpAlias() 33 << absl::StrCat("Tring to set up alias at ", output_index.ToString(), in SetUpAlias() 37 TF_RET_CHECK(!OutputHasAlias(output_index)) in SetUpAlias() 38 << "Output index " << output_index << " already has an alias setup"; in SetUpAlias() 40 TF_RET_CHECK(!alias_.element(output_index)) << absl::StrFormat( in SetUpAlias() 43 param_number, param_index.ToString(), output_index.ToString(), in SetUpAlias() 44 alias_.element(output_index)->parameter_number, in SetUpAlias() [all …]
|
D | hlo_input_output_alias_config.h | 66 Status SetUpAlias(const ShapeIndex& output_index, int64 param_number, 82 bool OutputHasAlias(const ShapeIndex& output_index) const; 101 const ShapeIndex& output_index) const; 104 std::function<void(const ShapeIndex& output_index, const Alias&)>; 110 std::function<Status(const ShapeIndex& output_index, const Alias&)>;
|
D | hlo_input_output_alias_config_test.cc | 38 void expect_aliased(const ShapeIndex& output_index, int64 param_number, in expect_aliased() argument 45 EXPECT_EQ(aliased_output.value(), output_index); in expect_aliased() 48 config.GetAliasedParameter(output_index); in expect_aliased() 55 void expect_not_aliased(const ShapeIndex& output_index, int64 param_number, in expect_not_aliased() argument 61 EXPECT_FALSE(aliased_output && aliased_output == output_index); in expect_not_aliased() 64 config.GetAliasedParameter(output_index); in expect_not_aliased()
|
D | optimize_input_output_buffer_alias.cc | 73 const ShapeIndex& output_index = index; in Build() local 75 !alias_config->OutputHasAlias(output_index)) { in Build() 77 output_index, 0, input_index, in Build()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | mirror_pad.cc | 89 int output_index) { in Eval() argument 92 if (output_index >= eval_data->output_size) { in Eval() 93 return output_index; in Eval() 95 eval_data->output_data[output_index] = eval_data->input_data[flat_index]; in Eval() 96 return output_index + 1; in Eval() 105 memcpy(eval_data->output_data + output_index, in Eval() 107 return output_index + count; in Eval() 109 cache_entry.first = output_index; in Eval() 120 output_index = Eval(eval_data, current_dim + 1, flat_index + i * multiplier, in Eval() 121 output_index); in Eval() [all …]
|
D | gather.cc | 83 int output_index = 0; in Prepare() local 85 output_shape->data[output_index++] = input->dims->data[i]; in Prepare() 88 output_shape->data[output_index++] = positions->dims->data[i]; in Prepare() 91 output_shape->data[output_index++] = input->dims->data[i]; in Prepare()
|
/external/tensorflow/tensorflow/java/src/main/native/ |
D | operation_jni.cc | 89 jint output_index) { in Java_org_tensorflow_Operation_shape() argument 96 if (output_index < 0 || output_index >= num_outputs) { in Java_org_tensorflow_Operation_shape() 100 output_index, num_outputs); in Java_org_tensorflow_Operation_shape() 104 TF_Output output{op, output_index}; in Java_org_tensorflow_Operation_shape() 142 jint output_index) { in Java_org_tensorflow_Operation_dtype() argument 149 if (output_index < 0 || output_index >= num_outputs) { in Java_org_tensorflow_Operation_dtype() 153 output_index, num_outputs); in Java_org_tensorflow_Operation_dtype() 157 return static_cast<jint>(TF_OperationOutputType(TF_Output{op, output_index})); in Java_org_tensorflow_Operation_dtype()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | ops_testutil.cc | 44 Tensor* OpsTestBase::GetOutput(int output_index) { in GetOutput() argument 45 CHECK_LT(output_index, context_->num_outputs()); in GetOutput() 46 Tensor* output = context_->mutable_output(output_index); in GetOutput() 51 if (!managed_outputs_[output_index]) { in GetOutput() 59 managed_outputs_[output_index] = managed_output; in GetOutput() 61 output = managed_outputs_[output_index]; in GetOutput()
|
D | reduce_join_op.cc | 50 int64 output_index, const gtl::InlinedVector<int32, 8>& dim_list, in LinearSubIndexToFullIndex() argument 54 int64 quotient = output_index; in LinearSubIndexToFullIndex() 164 for (int64 output_index = 0; output_index < output_shape.num_elements(); in Compute() local 165 ++output_index) { in Compute() 167 output_index, unreduced_indices, input_shape, strides); in Compute() 175 output_flat(output_index) = in Compute()
|
D | dynamic_stitch_op_gpu.cu.cc | 39 CUDA_1D_KERNEL_LOOP(output_index, output_size) { in DynamicStitchKernel() 40 const int32 slice_id = output_index / slice_size; in DynamicStitchKernel() 41 const int32 slice_offset = output_index % slice_size; in DynamicStitchKernel() 44 output[output_index] = ldg(data_ptrs[input_index] + slice_offset); in DynamicStitchKernel()
|
D | segment_reduction_ops_gpu.cu.cc | 80 const Index output_index = in SortedSegmentSumCustomKernel() local 85 CudaAtomicAdd(output + output_index, sum); in SortedSegmentSumCustomKernel() 87 *(output + output_index) = sum; in SortedSegmentSumCustomKernel() 98 const Index output_index = in SortedSegmentSumCustomKernel() local 100 CudaAtomicAdd(output + output_index, sum); in SortedSegmentSumCustomKernel() 122 const Index output_index = in UnsortedSegmentCustomKernel() local 124 KernelReductionFunctor()(output + output_index, ldg(input + input_index)); in UnsortedSegmentCustomKernel()
|
D | dynamic_partition_op.cc | 100 gtl::InlinedVector<int, 32> output_index(num_partitions_); in Compute() local 117 auto oi = output_index[p]; in Compute() 123 output_index[p]++; in Compute() 147 auto oi = output_index[p]; in Compute() 155 output_index[p]++; in Compute()
|
D | conv_2d_gpu.h | 194 CUDA_1D_KERNEL_LOOP(output_index, nthreads) { 195 Index<3> output_tensor_index = FlatToTensorIndex(output_index, output_dims); 204 output[output_index] = 340 int output_index = output_origin_flat_index + ti * output_dims[2] + tj; 346 output[output_index] = shared_memory_tile[tj][i_loc]; 347 output_index += output_increment; 352 output[output_index] = shared_memory_tile[tj][i_loc]; 353 output_index += output_increment; 368 int output_index = index; 370 FlatToTensorIndex(output_index, output_dims); [all …]
|
D | decode_proto_op.cc | 200 : output_index(user_index), default_value(def_value) { in FieldInfo() 224 int output_index = -1; member 693 const int output_index = output_indices[i]; in DecodeProtoOp() local 694 const DataType dtype = output_types[output_index]; in DecodeProtoOp() 695 const FieldDescriptor* field_descriptor = field_descs[output_index]; in DecodeProtoOp() 700 MakeUnique<FieldInfo>(field_descriptor, output_index, default_value)); in DecodeProtoOp() 799 OP_REQUIRES_OK(ctx, ctx->allocate_output(fields_[fi]->output_index + 1, in Compute() 869 sizes(message_index, fields_[fi]->output_index) = size; in CountFields() 922 for (int output_index = 0; output_index < field_count; ++output_index) { in AccumulateFields() local 923 const TensorInfo& info = tensors[output_index]; in AccumulateFields() [all …]
|
/external/tensorflow/tensorflow/core/grappler/optimizers/data/vectorization/ |
D | wrapped_tensor.h | 27 const int output_index; member 37 WrappedTensor(Node* node, int output_index, bool stacked) in WrappedTensor() 38 : node(node), output_index(output_index), stacked(stacked) {} in WrappedTensor()
|
/external/libtextclassifier/utils/ |
D | tflite-model-executor.h | 114 TensorView<T> OutputView(const int output_index, in OutputView() argument 117 interpreter->tensor(interpreter->outputs()[output_index]); in OutputView() 118 return TensorView<T>(interpreter->typed_output_tensor<T>(output_index), in OutputView() 125 std::vector<T> Output(const int output_index, in Output() argument 127 TensorView<T> output_view = OutputView<T>(output_index, interpreter); in Output() 147 const int output_index, const tflite::Interpreter* interpreter) const; 151 const int output_index, const tflite::Interpreter* interpreter) const;
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | parse_example_dataset_op.cc | 291 int output_index = in MapFunc() local 294 dataset_->output_dtypes()[output_index]) in MapFunc() 297 << DataTypeString(dataset_->output_dtypes()[output_index]) in MapFunc() 301 DCHECK(dataset_->output_shapes()[output_index].IsCompatibleWith( in MapFunc() 305 << dataset_->output_shapes()[output_index].DebugString() in MapFunc() 309 (*output)[output_index] = example_result.dense_values[d]; in MapFunc() 312 int output_index = in MapFunc() local 314 (*output)[output_index] = in MapFunc() 316 Tensor& serialized_sparse = (*output)[output_index]; in MapFunc() 322 dataset_->output_dtypes()[output_index]) in MapFunc() [all …]
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
D | insert_logging.cc | 82 int32 output_index; in InsertLogging() local 83 if (!strings::safe_strto32(output_index_string, &output_index)) { in InsertLogging() 87 node_outputs[name].insert(output_index); in InsertLogging() 131 for (int output_index : node_outputs[node.name()]) { in InsertLogging() local 132 print_node->add_input(strings::StrCat(node.name(), ":", output_index)); in InsertLogging()
|
/external/tensorflow/tensorflow/contrib/periodic_resample/kernels/ |
D | periodic_resample_op.h | 66 void MoveToOutputIndex(tensorflow::int64 output_index); 100 void InputIndexer::MoveToOutputIndex(tensorflow::int64 output_index) { in MoveToOutputIndex() argument 101 linear_output_index_ = output_index; in MoveToOutputIndex() 105 auto last_reduced_i = output_index; in MoveToOutputIndex() 323 for (tensorflow::int64 output_index = start; output_index < limit; in do_periodic_resample_op() local 324 ++output_index) { in do_periodic_resample_op() 326 output(output_index) = input(local_indexer.linear_input_index()); in do_periodic_resample_op() 328 output(local_indexer.linear_input_index()) = input(output_index); in do_periodic_resample_op()
|
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/ |
D | node_info_delegate_test.cc | 146 for (size_t output_index = 0; output_index < info.outputs.size(); in TEST() local 147 output_index++) { in TEST() 149 interpreter->tensor(tflite_node->outputs->data[output_index]); in TEST() 152 subgraph_tensors->Get(info.outputs[output_index])->name()->str()); in TEST()
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/ |
D | tensorrt_test.cc | 107 const int output_index = engine.getBindingIndex(kOutputTensor); in Execute() local 112 ASSERT_EQ(0, cudaMalloc(&buffers[output_index], sizeof(float))); in Execute() 124 ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float), in Execute() 131 ASSERT_EQ(0, cudaFree(buffers[output_index])); in Execute()
|
/external/tensorflow/tensorflow/lite/python/ |
D | op_hint.py | 492 def aggregate_and_return_name_for_output(self, fused_op_name, output_index, argument 507 del fused_op_name, output_index, out_graphdef 609 def aggregate_and_return_name_for_output(self, fused_op_name, output_index, argument 630 fused_op_name, output_index, out_graphdef) 639 fused_op_name, output_index)) 887 def _tensorflow_output_name(tensor_name, output_index): argument 888 return tensor_name if output_index == 0 else "%s:%d" % (tensor_name, 889 output_index) 1006 for output_index in sorted_output_indices: 1007 output = call.outputs[output_index] [all …]
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | optional_ops.cc | 135 Status WriteOptionalWithValueToOutput(OpKernelContext* ctx, int output_index, in WriteOptionalWithValueToOutput() argument 141 TF_RETURN_IF_ERROR(ctx->allocate_output(output_index, TensorShape({}), in WriteOptionalWithValueToOutput() 147 Status WriteOptionalNoneToOutput(OpKernelContext* ctx, int output_index) { in WriteOptionalNoneToOutput() argument 152 TF_RETURN_IF_ERROR(ctx->allocate_output(output_index, TensorShape({}), in WriteOptionalNoneToOutput()
|
/external/tensorflow/tensorflow/core/framework/ |
D | op_kernel.cc | 441 int output_index) { in forward_ref_input_to_ref_output() argument 445 set_output_ref(output_index, (*params_->inputs)[input_index].mutex_if_ref, in forward_ref_input_to_ref_output() 450 int input_index, int output_index, const TensorShape& output_shape, in forward_input_to_output_with_shape() argument 454 : output_alloc_attr(output_index); in forward_input_to_output_with_shape() 456 input_index, output_index, expected_output_dtype(output_index), in forward_input_to_output_with_shape() 457 output_shape, output_memory_type(output_index), output_attr); in forward_input_to_output_with_shape() 460 outputs_[output_index] = TensorValue(new_tensor.release()); in forward_input_to_output_with_shape() 461 *output = outputs_[output_index].tensor; in forward_input_to_output_with_shape() 471 int input_index, output_index, stop; in forward_input_to_output_with_shape() local 481 params_->op_kernel->OutputRange(output_name, &output_index, &stop)); in forward_input_to_output_with_shape() [all …]
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | quantize.cc | 341 std::size_t output_index, ArrayDataType* quantized_data_type, in ChooseQuantizationForOperatorOutput() argument 343 const auto& output = op.outputs[output_index]; in ChooseQuantizationForOperatorOutput() 389 if (output_index == LstmCellOperator::STATE_OUTPUT || in ChooseQuantizationForOperatorOutput() 390 output_index == LstmCellOperator::ACTIV_TEMP) { in ChooseQuantizationForOperatorOutput() 622 for (std::size_t output_index = 0; output_index < op.outputs.size(); in Run() local 623 output_index++) { in Run() 626 if (ChooseQuantizationForOperatorOutput(this, model, op, output_index, in Run() 630 const auto& output = op.outputs[output_index]; in Run()
|