/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
D | object_reader.cc | 38 uint32_t tensor_idx, Value** value) { in ReadNonConstantTensor() argument 39 if (tensor_idx >= context->tensors_size) { in ReadNonConstantTensor() 41 absl::StrCat("ReadNonConstTensor: input tensor index: ", tensor_idx)); in ReadNonConstantTensor() 44 if (tensor_to_value->find(tensor_idx) == tensor_to_value->end()) { in ReadNonConstantTensor() 45 TfLiteTensor* tflite_tensor = &context->tensors[tensor_idx]; in ReadNonConstantTensor() 48 "ReadNonConstantTensor: value is a constant tensor: ", tensor_idx)); in ReadNonConstantTensor() 55 if (quant_conversion_map->find(tensor_idx) == in ReadNonConstantTensor() 62 context, tensor_idx, kTfLiteFloat32, &fp_tflite_tensor, in ReadNonConstantTensor() 68 tflite_tensor = &context->tensors[tensor_idx]; in ReadNonConstantTensor() 71 (*quant_conversion_map)[fp_tensor_index] = tensor_idx; in ReadNonConstantTensor() [all …]
|
D | object_reader.h | 39 uint32_t tensor_idx, Value** value = nullptr); 53 absl::Status ReadValueByTensorIdx(uint32_t tensor_idx, Value** value); 66 const int32_t tensor_idx = node_->inputs->data[idx]; in ReadTensor() local 67 if (tensor_idx < 0) { in ReadTensor() 73 const TfLiteTensor* tflite_tensor = context_->tensors + tensor_idx; in ReadTensor() 82 t->id = tensor_idx; in ReadTensor()
|
D | model_builder.cc | 2458 int tensor_idx = tensor_indices->data[i]; in IsAllAllowedTensors() local 2459 if (tensor_idx == kTfLiteOptionalTensor) continue; in IsAllAllowedTensors() 2460 const TfLiteTensor* t = &context->tensors[tensor_idx]; in IsAllAllowedTensors() 2565 int tensor_idx = tflite_node->inputs->data[i]; in CopyVariableTensorOutputs() local 2567 if (!reader.ReadValueByTensorIdx(tensor_idx, &value).ok()) continue; in CopyVariableTensorOutputs() 2575 tensor_idx)); in CopyVariableTensorOutputs()
|
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/ |
D | calibrator_test.cc | 105 for (int tensor_idx = 0; tensor_idx < 4; tensor_idx++) { in TEST() local 106 EXPECT_NEAR(stats.at(tensor_idx).min, tensor_idx + 1, eps); in TEST() 107 EXPECT_NEAR(stats.at(tensor_idx).max, tensor_idx + 1, eps); in TEST() 164 for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) { in TEST() local 165 EXPECT_NEAR(stats.at(tensor_idx).min, expected_values[tensor_idx], eps); in TEST() 166 EXPECT_NEAR(stats.at(tensor_idx).max, expected_values[tensor_idx], eps); in TEST() 181 for (int tensor_idx = 1; tensor_idx < 5; tensor_idx++) { in TEST() local 182 EXPECT_NEAR(stats.at(tensor_idx).min, expected_values[tensor_idx], eps); in TEST() 183 EXPECT_NEAR(stats.at(tensor_idx).max, expected_values[tensor_idx], eps); in TEST() 251 for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) { in TEST() local [all …]
|
/external/tensorflow/tensorflow/lite/tools/optimize/ |
D | quantize_weights.cc | 60 int32_t tensor_idx) { in GetTensorConsumers() argument 70 if (op->inputs[i] == tensor_idx) { in GetTensorConsumers() 177 int32_t tensor_idx = op->inputs[op_input_idx]; in CheckAllOpInputsQuantized() local 179 if (tensor_idx == -1) { in CheckAllOpInputsQuantized() 184 TensorT* tensor = subgraph->tensors[tensor_idx].get(); in CheckAllOpInputsQuantized() 207 int32_t tensor_idx = op->inputs[op_input_idx]; in InsertQuantizableInputTensorsFromOperator() local 208 if (tensor_idx == -1) { in InsertQuantizableInputTensorsFromOperator() 214 TensorT* tensor = subgraph->tensors[tensor_idx].get(); in InsertQuantizableInputTensorsFromOperator() 239 tensor_map->insert({tensor_idx, in InsertQuantizableInputTensorsFromOperator() 243 tensor_map->insert({tensor_idx, in InsertQuantizableInputTensorsFromOperator() [all …]
|
D | quantize_model.cc | 42 bool IsFloatTensor(const SubGraphT* subgraph, int32_t tensor_idx) { in IsFloatTensor() argument 43 TensorT* tensor = subgraph->tensors[tensor_idx].get(); in IsFloatTensor() 107 const int32_t tensor_idx = op->inputs[input_idx]; in PopulateRealValueOpSet() local 108 if (IsFloatTensor(subgraph, tensor_idx)) { in PopulateRealValueOpSet() 116 const int32_t tensor_idx = op->outputs[output_idx]; in PopulateRealValueOpSet() local 117 if (IsFloatTensor(subgraph, tensor_idx)) { in PopulateRealValueOpSet() 124 const int32_t tensor_idx = op->inputs[0]; in PopulateRealValueOpSet() local 125 if (IsFloatTensor(subgraph, tensor_idx)) { in PopulateRealValueOpSet() 131 const int32_t tensor_idx = op->outputs[0]; in PopulateRealValueOpSet() local 132 if (IsFloatTensor(subgraph, tensor_idx)) { in PopulateRealValueOpSet() [all …]
|
D | modify_model_interface.cc | 402 const int32_t tensor_idx = subgraph->inputs[input_idx]; in AddUint8Dequant() local 403 TensorT* tensor = subgraph->tensors[tensor_idx].get(); in AddUint8Dequant() 423 tensor_idx); in AddUint8Dequant() 442 const int32_t tensor_idx = subgraph->outputs[output_idx]; in AddUint8Quant() local 443 TensorT* tensor = subgraph->tensors[tensor_idx].get(); in AddUint8Quant() 462 utils::MakeQuantizeOperator(model, &tailing_op, tensor_idx, in AddUint8Quant()
|
D | quantize_weights_test.cc | 97 bool IsModelInputOrOutput(const Model* model, uint32_t tensor_idx) { in IsModelInputOrOutput() argument 102 if (subgraph->inputs()->Get(i) == tensor_idx) { in IsModelInputOrOutput() 107 if (subgraph->outputs()->Get(i) == tensor_idx) { in IsModelInputOrOutput() 117 uint32_t tensor_idx, in GetProducerOpCode() argument 123 if (op->outputs()->Get(i) == tensor_idx) { in GetProducerOpCode()
|
D | quantize_model_test.cc | 303 for (int tensor_idx = 0; tensor_idx < subgraph->tensors.size(); in TEST_P() local 304 ++tensor_idx) { in TEST_P() 305 const auto& tensor = subgraph->tensors[tensor_idx]; in TEST_P() 306 if (input_idx != tensor_idx && output_idx != tensor_idx) { in TEST_P() 381 for (int tensor_idx = 0; tensor_idx < subgraph->tensors.size(); in TEST_P() local 382 ++tensor_idx) { in TEST_P() 383 const auto& tensor = subgraph->tensors[tensor_idx]; in TEST_P() 384 if (input_idx != tensor_idx && output_idx != tensor_idx) { in TEST_P()
|
/external/tflite-support/tensorflow_lite_support/custom_ops/ |
D | tflite_inference_main.cc | 62 for (int tensor_idx : interpreter->inputs()) { in RunWithRandomInputs() local 63 auto tensor = interpreter->tensor(tensor_idx); in RunWithRandomInputs() 87 for (int tensor_idx : interpreter->outputs()) { in RunWithRandomInputs() local 88 auto tensor = interpreter->tensor(tensor_idx); in RunWithRandomInputs()
|
/external/tensorflow/tensorflow/lite/testing/ |
D | selective_build_test.cc | 48 for (int tensor_idx : interpreter->inputs()) { in RunWithRandomInputs() local 49 auto tensor = interpreter->tensor(tensor_idx); in RunWithRandomInputs()
|
/external/tensorflow/tensorflow/lite/experimental/quantization_debugger/ |
D | debugger.py | 130 {tensor_idx: op_info['op_name'] for tensor_idx in op_info['outputs']}) 298 tensor_idx = numeric_verify_name.rsplit(':', 1)[-1] 299 return int(tensor_idx)
|
/external/tensorflow/tensorflow/lite/micro/ |
D | micro_interpreter.cc | 84 int tensor_idx) { in GetTensor() argument 87 helper->model_, helper->eval_tensors_, tensor_idx); in GetTensor() 91 const struct TfLiteContext* context, int tensor_idx) { in GetEvalTensor() argument 93 return &helper->eval_tensors_[tensor_idx]; in GetEvalTensor()
|
D | micro_interpreter.h | 57 int tensor_idx); 59 int tensor_idx);
|
D | micro_allocator_test.cc | 85 TfLiteEvalTensor* eval_tensors, int tensor_idx, in VerifyMockTensor() argument 88 model, eval_tensors, tensor_idx), in VerifyMockTensor() 90 VerifyMockTfLiteEvalTensor(&eval_tensors[tensor_idx]); in VerifyMockTensor() 94 TfLiteEvalTensor* eval_tensors, int tensor_idx) { in VerifyMockWeightTensor() argument 96 model, eval_tensors, tensor_idx)); in VerifyMockWeightTensor() 97 VerifyMockWeightTfLiteEvalTensor(&eval_tensors[tensor_idx]); in VerifyMockWeightTensor()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_conditional_accumulator.h | 87 const Tensor* tensor_idx = std::get<0>(*tensor); in ValidateShape() local 112 tensor_idx->dims() > 0) { in ValidateShape() 113 for (int64 i = 0; i < tensor_idx->dim_size(0); i++) { in ValidateShape() 114 if (tensor_idx->vec<int64>()(i) >= shape_.dim_size(0)) { in ValidateShape() 117 "; index is ", tensor_idx->vec<int64>()(i), " exceeded ", in ValidateShape()
|
D | depthwise_conv_op_gpu.h | 250 const int tensor_idx = thread_pix * in_depth + thread_depth; 274 const int filter_offset = tensor_idx + start_channel; 540 const int tensor_idx = thread_depth * in_pixels + thread_pix; 566 const int inout_offset = channel * in_pixels + tensor_idx; 1225 const int tensor_idx = thread_pix * in_depth + thread_depth; 1248 const int filter_offset = tensor_idx + start_channel; 1497 const int tensor_idx = thread_depth * in_pixels + thread_pix; 1517 const int inout_offset = channel * in_pixels + tensor_idx;
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tensor_tracer_report.py | 168 tensor_idx = self.graph_order.tensor_to_idx[tensor_name] 171 self.cache_idx_to_tensor_idx.append(tensor_idx) 415 tensor_idx = tensor_trace_order.cache_idx_to_tensor_idx[cache_idx] 416 line = '%d %d\n'%(cache_idx, tensor_idx)
|
/external/tensorflow/tensorflow/lite/c/ |
D | common.h | 761 int tensor_idx); 767 int tensor_idx);
|
/external/tensorflow/tensorflow/lite/java/src/main/native/ |
D | nativeinterpreterwrapper_jni.cc | 642 const int tensor_idx = interpreter->inputs()[input_idx]; in Java_org_tensorflow_lite_NativeInterpreterWrapper_resizeInput() local 644 TfLiteTensor* target = interpreter->tensor(tensor_idx); in Java_org_tensorflow_lite_NativeInterpreterWrapper_resizeInput() 650 tensor_idx, convertJIntArrayToVector(env, dims)); in Java_org_tensorflow_lite_NativeInterpreterWrapper_resizeInput() 653 tensor_idx, convertJIntArrayToVector(env, dims)); in Java_org_tensorflow_lite_NativeInterpreterWrapper_resizeInput()
|
/external/tensorflow/tensorflow/lite/tools/ |
D | verifier.cc | 521 for (const int tensor_idx : *subgraph.inputs()) { in VerifySubGraphConsistency() 522 subgraph_input_tensors.insert(tensor_idx); in VerifySubGraphConsistency()
|
/external/tensorflow/tensorflow/lite/ |
D | interpreter_test.cc | 1534 void AssignCustomAllocForTensor(int tensor_idx, int required_alignment) { in AssignCustomAllocForTensor() argument 1535 const TfLiteTensor* tensor = interpreter_->tensor(tensor_idx); in AssignCustomAllocForTensor() 1538 interpreter_->SetCustomAllocationForTensor(tensor_idx, tensor_alloc), in AssignCustomAllocForTensor()
|
/external/tensorflow/tensorflow/lite/delegates/nnapi/ |
D | nnapi_delegate.cc | 2367 auto is_const_tensor = [&node, &context](int tensor_idx) { in Validate() argument 2368 return context->tensors[node->inputs->data[tensor_idx]] in Validate()
|