Searched refs:quant_tensor (Results 1 – 7 of 7) sorted by relevance
/external/tensorflow/tensorflow/lite/tools/optimize/ |
D | quantize_weights_test.cc | 167 const auto quant_tensor = quantized_graph->tensors()->Get(i); in TEST_F() local 170 EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); in TEST_F() 171 EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); in TEST_F() 172 EXPECT_EQ(GetAsVector(quant_tensor->shape()), in TEST_F() 174 EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str()); in TEST_F() 175 EXPECT_EQ(quant_tensor->type(), float_tensor->type()); in TEST_F() 206 const auto quant_tensor = quantized_graph->tensors()->Get(i); in TEST_F() local 208 EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer()); in TEST_F() 209 EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable()); in TEST_F() 210 EXPECT_EQ(GetAsVector(quant_tensor->shape()), in TEST_F() [all …]
|
D | modify_model_interface.cc | 204 TensorT* quant_tensor = subgraph->tensors[tot.output_index].get(); in SetInputTypeToUINT8() local 205 const float quant_tensor_scale = quant_tensor->quantization->scale[0]; in SetInputTypeToUINT8() 206 const int quant_tensor_zp = quant_tensor->quantization->zero_point[0]; in SetInputTypeToUINT8() 231 TensorT* quant_tensor = subgraph->tensors[tot.input_index].get(); in SetOutputTypeToUINT8() local 232 const float quant_tensor_scale = quant_tensor->quantization->scale[0]; in SetOutputTypeToUINT8() 233 const int quant_tensor_zp = quant_tensor->quantization->zero_point[0]; in SetOutputTypeToUINT8()
|
D | quantize_model_test.cc | 173 const auto quant_tensor = quantized_graph->tensors[i].get(); in TEST_P() local 175 EXPECT_EQ(quant_tensor->buffer, float_tensor->buffer()); in TEST_P() 176 EXPECT_EQ(quant_tensor->is_variable, float_tensor->is_variable()); in TEST_P() 177 EXPECT_EQ(quant_tensor->shape, GetAsVector(float_tensor->shape())); in TEST_P() 178 EXPECT_EQ(quant_tensor->name, float_tensor->name()->str()); in TEST_P() 179 EXPECT_EQ(quant_tensor->type, float_tensor->type()); in TEST_P() 196 const auto quant_tensor = quantized_graph->tensors[i].get(); in TEST_P() local 198 EXPECT_EQ(quant_tensor->buffer, float_tensor->buffer()); in TEST_P() 199 EXPECT_EQ(quant_tensor->is_variable, float_tensor->is_variable()); in TEST_P() 200 EXPECT_EQ(quant_tensor->shape, GetAsVector(float_tensor->shape())); in TEST_P() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/ |
D | quantize_weights_test.cc | 238 const auto quant_tensor = quantized_graph->tensors()->Get(i); in TEST_F() local 241 /*quantized_tensor=*/quant_tensor, in TEST_F() 276 const auto quant_tensor = quantized_graph->tensors()->Get(i); in TEST_F() local 279 /*quantized_tensor=*/quant_tensor, in TEST_F() 286 if (quant_tensor->name()->str() == "conv_bias") { in TEST_F() 287 EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); in TEST_F() 289 EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32); in TEST_F() 290 } else if (quant_tensor->buffer() != 0) { in TEST_F() 291 EXPECT_EQ(quant_tensor->type(), TensorType_INT8) in TEST_F() 292 << quant_tensor->name()->str(); in TEST_F() [all …]
|
/external/executorch/examples/qualcomm/qaihub_scripts/stable_diffusion/runner/ |
D | runner.cpp | 305 void Runner::quant_tensor( in quant_tensor() function in example::Runner 429 quant_tensor( in generate() 439 quant_tensor( in generate() 464 quant_tensor( in generate() 529 quant_tensor(latent, vae_input, vae_input_scale_, vae_input_offset_); in generate()
|
D | runner.h | 101 void quant_tensor(
|
/external/tensorflow/tensorflow/lite/python/ |
D | util.py | 681 float_tensor, quant_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]] 694 quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type) 699 quant_tensor.name, 790 quant_tensor, float_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]] 802 quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type) 807 quant_tensor.name,
|