Home
last modified time | relevance | path

Searched refs:output_max (Results 1 – 25 of 56) sorted by relevance

123

/external/tensorflow/tensorflow/core/kernels/
Dquantized_add_op.cc49 float scalar_input_max, float output_min, float output_max, in ScalarAddition() argument
52 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition()
55 full_input[i], full_input_min, full_input_max, output_min, output_max); in ScalarAddition()
67 float output_min, float output_max, qint32* output) { in ScalarAddition() argument
69 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition()
76 FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); in ScalarAddition()
78 FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max); in ScalarAddition()
124 float output_min, float output_max, qint32* output) { in ScalarAddition() argument
126 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition()
133 FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); in ScalarAddition()
[all …]
Dquantized_batch_norm_op.cc38 Tensor* output, float* output_min, float* output_max) { in ReferenceBatchNorm() argument
50 *output_max = std::numeric_limits<float>::lowest(); in ReferenceBatchNorm()
79 *output_max = std::max(output_value, *output_max); in ReferenceBatchNorm()
82 FloatToQuantized<T2>(output_value, *output_min, *output_max); in ReferenceBatchNorm()
101 Tensor* output, float* output_min, float* output_max) { in FixedPointBatchNorm() argument
116 *output_max = (1 << 20); in FixedPointBatchNorm()
139 FloatToQuantized<T2>(scale_value, *output_min, *output_max); in FixedPointBatchNorm()
141 FloatToQuantized<T2>(offset_value, *output_min, *output_max); in FixedPointBatchNorm()
145 FloatToQuantized<T2>(1.0f, *output_min, *output_max); in FixedPointBatchNorm()
151 input_max, *output_min, *output_max); in FixedPointBatchNorm()
[all …]
Dquantized_concat_op.cc35 float output_min, float output_max) in RequantizeCopier()
37 output_max(output_max), in RequantizeCopier()
43 if (input_min == output_min && input_max == output_max) { in Copy()
54 FloatToQuantizedStruct<T> f2q(output_min, output_max); in Copy()
65 float output_max; member
82 float* output_min, float* output_max) { in CalculateInputAndOutputRange() argument
101 *output_max = largest_value; in CalculateInputAndOutputRange()
104 *output_max = overall_max; in CalculateInputAndOutputRange()
195 float output_max = std::numeric_limits<float>::lowest(); in Compute() local
199 &output_max); in Compute()
[all …]
Dquantized_activation_ops.cc54 Tensor* output_max = nullptr; in Compute() local
55 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); in Compute()
56 output_max->flat<float>()(0) = max_input; in Compute()
92 Tensor* output_max = nullptr; in Compute() local
93 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); in Compute()
94 output_max->flat<float>()(0) = max_input; in Compute()
Dquantized_pooling_ops.cc97 Tensor* output_max = nullptr; in Compute() local
98 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); in Compute()
99 output_max->flat<float>()(0) = max_input; in Compute()
121 Tensor* output_max = nullptr; in Compute() local
122 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); in Compute()
123 output_max->flat<float>()(0) = max_input; in Compute()
Dquantization_utils_test.cc35 float input_max, float output_min, float output_max, in TestRequantizeMany() argument
44 output_min, output_max)); in TestRequantizeMany()
55 input_max, output_min, output_max, in TestRequantizeMany()
59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max, in TestRequantizeMany()
72 << ", output_max=" << output_max << ", value_index=" << value_index; in TestRequantizeMany()
77 float output_min, float output_max, in TestRequantizeMany8To32Bit() argument
86 output_min, output_max)); in TestRequantizeMany8To32Bit()
96 input_max, output_min, output_max, in TestRequantizeMany8To32Bit()
108 << ", output_max=" << output_max << ", value_index=" << value_index; in TestRequantizeMany8To32Bit()
233 const float output_max = ranges[range_index][3]; in TestRequantizeManyInNewRangeEigenVsNonEigen() local
[all …]
Dquantization_utils.cc23 float* output_min, float* output_max) { in GetOutputMinAndMaxForQuantizedAdd() argument
35 *output_max = in GetOutputMinAndMaxForQuantizedAdd()
39 *output_min = -(*output_max); in GetOutputMinAndMaxForQuantizedAdd()
Dquantized_activation_ops_test.cc63 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
65 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
94 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
96 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
Dquantized_reshape_op.cc40 Tensor* output_max = nullptr; in Compute() local
41 OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); in Compute()
42 output_max->flat<float>()(0) = input_max_float; in Compute()
Dquantized_pooling_ops_test.cc77 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
79 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
122 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
124 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
Drequantization_range_op.cc53 Tensor* output_max = nullptr; in Compute() local
54 OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_max)); in Compute()
69 output_max->flat<float>().setConstant(used_max_float); in Compute()
Dquantized_bias_add_op_test.cc84 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
86 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
166 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
168 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
Dquantize_op_test.cc293 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
295 EXPECT_NEAR(255.0f, output_max, 1e-5f); in TEST_F()
315 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
317 EXPECT_LT(0.0f, output_max); in TEST_F()
337 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
339 EXPECT_NEAR(0.3f, output_max, 1e-5f); in TEST_F()
359 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
361 EXPECT_NEAR(0.0f, output_max, 1e-5f); in TEST_F()
Drequantize.cc51 Tensor* output_max = nullptr; in Compute() local
52 OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); in Compute()
93 output_max->flat<float>().setConstant(requested_output_max_float); in Compute()
Dquantized_concat_op_test.cc112 const float output_max = GetOutput(2)->flat<float>()(0); in TestSmall8Bit() local
114 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSmall8Bit()
178 const float output_max = GetOutput(2)->flat<float>()(0); in TestSmall32Bit() local
180 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TestSmall32Bit()
240 const float output_max = GetOutput(2)->flat<float>()(0); in TestSecondDim8Bit() local
242 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSecondDim8Bit()
Dquantize_down_and_shrink_range.cc49 Tensor* output_max = nullptr; in Compute() local
50 OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); in Compute()
97 output_max->flat<float>().setConstant(actual_max_float); in Compute()
Dquantized_bias_add_op.cc88 Tensor* output_max = nullptr; in Compute() local
89 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); in Compute()
90 output_max->flat<float>()(0) = total_max; in Compute()
Dmeta_support.h81 float output_max, quint8* output);
101 float bias_max, float output_min, float output_max,
Dquantized_batch_norm_op_test.cc128 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
130 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
236 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
238 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
Dquantized_conv_ops_test.cc128 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
130 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
319 const float output_max = GetOutput(2)->flat<float>()(0); in TEST_F() local
321 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
Dquantization_utils.h799 float* output_min, float* output_max);
809 float* output_max) { in QuantizedAddUsingEigen() argument
815 smaller_input_max, output_min, output_max); in QuantizedAddUsingEigen()
821 FloatToQuantized<T3>(0.0f, *output_min, *output_max); in QuantizedAddUsingEigen()
829 FloatToQuantizedStruct<T3> f2q(*output_min, *output_max); in QuantizedAddUsingEigen()
855 Tensor* output, float* output_min, float* output_max) { in QuantizedAdd() argument
861 smaller_input_max, output_min, output_max); in QuantizedAdd()
867 FloatToQuantized<T3>(0.0f, *output_min, *output_max); in QuantizedAdd()
873 float total_max = *output_max; in QuantizedAdd()
Dmeta_support.cc258 float output_max, quint8* output) { in Requantize() argument
274 CalculateOneOverRangeScale<uint8_t>(output_min, output_max); in Requantize()
349 float bias_max, float output_min, float output_max, in QuantizedBiasAdd() argument
373 CalculateOneOverRangeScale<int32_t>(output_min, output_max); in QuantizedBiasAdd()
Dquantized_instance_norm.cc298 Tensor* output_max = nullptr; in Compute() local
299 OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); in Compute()
345 output_max->scalar<float>()() = maximum; in Compute()
392 output_max->flat<float>()(0) = normed_max(); in Compute()
/external/tensorflow/tensorflow/python/ops/
Dquantized_conv_ops_test.py79 output_max = value[2]
81 output_max)
/external/tensorflow/tensorflow/tools/graph_transforms/
Dquantize_nodes_test.cc540 quantize_op.output_min, quantize_op.output_max); in TestRemoveRedundantQuantization()
603 requantization_range_op.output_min, requantization_range_op.output_max, in TestRemoveRedundantQuantizationWithBiasAdd()
608 requantize_op.output_min, requantize_op.output_max); in TestRemoveRedundantQuantizationWithBiasAdd()
639 quantize_op.output_min, quantize_op.output_max); in TestRemoveRedundantQuantizationWithBiasAdd()
729 requantization_range_op.output_min, requantization_range_op.output_max, in TestRemoveRedundantQuantizationWithMultipleOutputs()
734 requantize_op.output_min, requantize_op.output_max); in TestRemoveRedundantQuantizationWithMultipleOutputs()
765 quantize_op.output_min, quantize_op.output_max); in TestRemoveRedundantQuantizationWithMultipleOutputs()
966 requantization_range_op.output_min, requantization_range_op.output_max, in TestMergeAdjacentRequantizes()
971 requantize_op.output_min, requantize_op.output_max, in TestMergeAdjacentRequantizes()
1002 quantize_op.output_min, quantize_op.output_max, fake_requantize_min_op, in TestMergeAdjacentRequantizes()
[all …]

123