Home
last modified time | relevance | path

Searched refs:input_max (Results 1 – 25 of 74) sorted by relevance

123

/external/tensorflow/tensorflow/lite/micro/kernels/arc_mli/
Dpooling_slicing_test.cc39 const float input_max, const int filter_height, const int filter_width, in TestAveragePoolingQuantized() argument
55 input_max), in TestAveragePoolingQuantized()
109 float input_min, float input_max, int filter_width, in TestMaxPoolQuantized() argument
126 input_max), in TestMaxPoolQuantized()
191 const float input_max = 127; in TF_LITE_MICRO_TEST() local
203 kInput1Data, input_min, input_max, // input quantization range in TF_LITE_MICRO_TEST()
216 const float input_max = 127; in TF_LITE_MICRO_TEST() local
230 kInput1Data, input_min, input_max, // input quantization range in TF_LITE_MICRO_TEST()
244 const float input_max = 127; in TF_LITE_MICRO_TEST() local
261 kInput2Data, input_min, input_max, // input quantization range in TF_LITE_MICRO_TEST()
[all …]
Dfully_connected_slicing_test.cc40 const float input_max, const int* weights_dims_data, const T* weights_data, in TestFullyConnectedQuantized() argument
57 input_max), in TestFullyConnectedQuantized()
127 const float input_max = 127.0f; in TF_LITE_MICRO_TEST() local
148 input_dims_data, input_data, input_min, input_max, weights_dims_data, in TF_LITE_MICRO_TEST()
156 const float input_max = 127.0f; in TF_LITE_MICRO_TEST() local
183 input_dims_data_local, input_data_local, input_min, input_max, in TF_LITE_MICRO_TEST()
193 const float input_max = 127.0f; in TF_LITE_MICRO_TEST() local
219 input_dims_data_2, input_data_2, input_min, input_max, in TF_LITE_MICRO_TEST()
228 const float input_max = 127.0f; in TF_LITE_MICRO_TEST() local
255 input_dims_data_local_2, input_data_local_2, input_min, input_max, in TF_LITE_MICRO_TEST()
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dquantization_utils_test.cc35 float input_max, float output_min, float output_max, in TestRequantizeMany() argument
43 QuantizedToFloat(values_quantized[value_index], input_min, input_max), in TestRequantizeMany()
55 input_max, output_min, output_max, in TestRequantizeMany()
59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max, in TestRequantizeMany()
71 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany()
76 void TestRequantizeMany8To32Bit(float input_min, float input_max, in TestRequantizeMany8To32Bit() argument
85 QuantizedToFloat(values_quantized[value_index], input_min, input_max), in TestRequantizeMany8To32Bit()
96 input_max, output_min, output_max, in TestRequantizeMany8To32Bit()
107 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany8To32Bit()
231 const float input_max = ranges[range_index][1]; in TestRequantizeManyInNewRangeEigenVsNonEigen() local
[all …]
Dquantized_activation_ops_test.cc46 const float input_max = 127.0f; in TEST_F() local
52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
59 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
77 const float input_max = 127.0f; in TEST_F() local
83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
90 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
Dquantized_pooling_ops_test.cc52 const float input_max = 255.0f; in TEST_F() local
62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
73 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
97 const float input_max = 255.0f; in TEST_F() local
107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
118 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
Dquantized_bias_add_op_test.cc52 const float input_max = 60.0f; in TEST_F() local
59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
78 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
102 const float input_max = 2006.27f; in TEST_F() local
119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
160 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
Dquantize_and_dequantize_op.h68 typename TTypes<T>::ConstScalar input_max,
190 auto input_max = input_max_tensor->scalar<T>(); in Compute() local
193 input_max.device(d) = input.maximum(); in Compute()
195 d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T)); in Compute()
231 auto input_max = input_max_tensor->vec<T>(); in Compute() local
242 input_max.device(d) = input.maximum(reduce_dims); in Compute()
245 d.memcpyDeviceToHost(max_range.data(), input_max.data(), in Compute()
279 typename TTypes<T>::ConstScalar input_max, in Compute()
284 const T max_val = input_max(); in Compute()
306 auto input_max = input_max_tensor->vec<T>(); in Compute() local
[all …]
Dquantized_bias_add_op.cc42 const float input_max = context->input(3).flat<float>()(0); in Compute() local
71 GetOutputMinAndMaxForQuantizedAdd(input_min, input_max, bias_min, in Compute()
75 bias_ui8_array.size(), input_min, input_max, in Compute()
81 input_max, bias, bias_min, bias_max, output, &total_min, &total_max); in Compute()
Dquantized_concat_op.cc42 const float input_max = (*input_min_and_max)[input_index].second; in Copy() local
43 if (input_min == output_min && input_max == output_max) { in Copy()
52 QuantizedToFloatStruct<T> q2f(input_min, input_max); in Copy()
88 const float input_max = input_maxes[i].flat<float>()(0); in CalculateInputAndOutputRange() local
89 input_mins_and_maxes->emplace_back(input_min, input_max); in CalculateInputAndOutputRange()
91 overall_max = std::max(overall_max, input_max); in CalculateInputAndOutputRange()
Dquantized_batch_norm_op.cc32 const float input_max, const Tensor& mean, in ReferenceBatchNorm() argument
57 QuantizedToFloat(input_flat(input_index), input_min, input_max); in ReferenceBatchNorm()
95 const float input_max, const Tensor& mean, in FixedPointBatchNorm() argument
151 input_max, *output_min, *output_max); in FixedPointBatchNorm()
177 const float input_max = context->input(2).flat<float>()(0); in Compute() local
212 FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min, in Compute()
Dquantized_batch_norm_op_test.cc61 const float input_max = 127.0f; in TEST_F() local
71 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
100 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
158 const float input_max = 127.0f; in TEST_F() local
168 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
197 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
/external/tensorflow/tensorflow/lite/micro/kernels/
Dhard_swish_test.cc60 float input_min, float input_max, float output_min, in TestHardSwishQuantized() argument
66 const float input_scale = ScaleFromMinMax<T>(input_min, input_max); in TestHardSwishQuantized()
67 const int input_zero_point = ZeroPointFromMinMax<T>(input_min, input_max); in TestHardSwishQuantized()
76 std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f); in TestHardSwishQuantized()
84 GenerateUniformRandomVector(size, input_min, input_max, random_engine, in TestHardSwishQuantized()
129 float input_max, float output_min, in TestHardSwishQuantizedBias() argument
133 const float input_scale = ScaleFromMinMax<T>(input_min, input_max); in TestHardSwishQuantizedBias()
136 const int input_zero_point = ZeroPointFromMinMax<T>(input_min, input_max); in TestHardSwishQuantizedBias()
144 TF_LITE_MICRO_EXPECT_GE(input_max, 3.0f); in TestHardSwishQuantizedBias()
285 float input_max = minmax_pairs[x][1]; in TF_LITE_MICRO_TEST() local
[all …]
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_QuantizeAndDequantizeV2.pbtxt18 name: "input_max"
92 If range_given == False, the initial input_min, input_max will be determined
94 the specified values of input_min, input_max are used.
96 Note: If the input_min, input_max are specified, they do not need to equal the
102 [input_min, input_max] range to a range that lies within the representable
105 It determines the scale from one of input_min and input_max, then updates the
110 * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
112 would update input_max to be 127 / 12.8 = 9.921875
113 * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
117 specified input_max is used.
[all …]
/external/tensorflow/tensorflow/core/common_runtime/
Dquantize_training.cc53 float input_max; member
62 input_max(max) {} in EdgeToConvert()
78 bool* range_given, float* input_min, float* input_max) { in FindType() argument
94 *input_max = 6; in FindType()
99 *input_max = 1; in FindType()
104 *input_max = 1; in FindType()
112 input_max); in FindType()
122 input_max); in FindType()
503 Node** input_max) { in MakeInputMinMax() argument
515 input_max_tensor.flat<float>()(0) = edge.input_max; in MakeInputMinMax()
[all …]
/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/tests/
Dactivations_test.cc194 void TestQuantizedHardSwish(int size, float input_min, float input_max, in TestQuantizedHardSwish() argument
198 GenerateUniformRandomVector(size, input_min, input_max, random_engine, in TestQuantizedHardSwish()
208 /*input=*/{Tensor_Type, {1, 1, 1, size}, input_min, input_max}, in TestQuantizedHardSwish()
224 std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f); in TestQuantizedHardSwish()
237 float input_max = input_minmax.second; in HardSwishTestImpl() local
242 size, input_min, input_max, output_min, output_max, &random_engine); in HardSwishTestImpl()
259 float input_max = 25.036512f; in HardSwishBiasTestImpl() local
267 const float input_scale = (input_max - input_min) / quantized_type_range; in HardSwishBiasTestImpl()
274 ASSERT_GE(input_max, 3.0f); in HardSwishBiasTestImpl()
297 /*input=*/{Tensor_Type, {1, 1, 1, size}, input_min, input_max}, in HardSwishBiasTestImpl()
/external/tensorflow/tensorflow/core/kernels/mkl/
Dmkl_requantization_range_per_channel_op.cc49 const Tensor& input_max = ctx->input(kInputMaxIndex); in Compute() local
51 const size_t depth = input_max.NumElements(); in Compute()
57 ctx, input_max.dim_size(0) == depth, in Compute()
59 depth, " was ", input_max.dim_size(0))); in Compute()
62 const float* input_max_data = input_max.flat<float>().data(); in Compute()
Dmkl_quantized_pooling_ops_test.cc82 const float input_max = 255.0f; in TEST_F() local
92 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
111 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
151 const float input_max = 255.0f; in TEST_F() local
161 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
179 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DQuantizeAndDequantizeV2.pbtxt12 name: "input_max"
62 name: "input_max"
113 name: "input_max"
165 name: "input_max"
230 name: "input_max"
302 name: "input_max"
DQuantizeAndDequantizeV3.pbtxt12 name: "input_max"
59 name: "input_max"
107 name: "input_max"
156 name: "input_max"
212 name: "input_max"
DQuantizeAndDequantize.pbtxt40 name: "input_max"
96 name: "input_max"
155 name: "input_max"
214 name: "input_max"
274 name: "input_max"
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DQuantizeAndDequantize.pbtxt40 name: "input_max"
96 name: "input_max"
155 name: "input_max"
214 name: "input_max"
274 name: "input_max"
DQuantizeAndDequantizeV2.pbtxt12 name: "input_max"
62 name: "input_max"
113 name: "input_max"
165 name: "input_max"
230 name: "input_max"
DQuantizeAndDequantizeV3.pbtxt12 name: "input_max"
59 name: "input_max"
107 name: "input_max"
156 name: "input_max"
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dfake_quantize_ops.cc90 xla::XlaOp input_max, int num_bits, bool narrow_range) { in BuildFakeQuantCustomCall() argument
97 std::vector<xla::XlaOp> args = {input, input_min, input_max, num_bits_arg, in BuildFakeQuantCustomCall()
190 float input_min, input_max, scale; in FakeQuantWithMinMaxArgsGradOp() local
192 OP_REQUIRES_OK(ctx, ctx->GetAttr("max", &input_max)); in FakeQuantWithMinMaxArgsGradOp()
193 CpuNudge(input_min, input_max, quant_min, quant_max, &nudged_input_min_, in FakeQuantWithMinMaxArgsGradOp()
244 xla::XlaOp input_max = ctx->Input(2); in Compile() local
250 b, input, input_min, input_max, num_bits_, narrow_range_)); in Compile()
256 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_, in Compile()
297 xla::XlaOp input_max = ctx->Input(3); in Compile() local
301 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_, in Compile()
/external/tensorflow/tensorflow/compiler/tests/
Dfake_quant_ops_test.py82 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
117 max=input_max,
180 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
211 max=input_max,
281 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
325 max_placeholder: input_max
386 def _TestOp(self, input_min, input_max, num_bits, narrow_range, argument
429 max_placeholder: input_max

123