/external/tensorflow/tensorflow/lite/kernels/ |
D | table_test.cc | 68 inline float GetLUTTolerance(float input_min, float input_max, float output_min, in GetLUTTolerance() argument 74 const float range_sum = (input_max - input_min) + (output_max - output_min); in GetLUTTolerance() 87 float input_max = 0.8f; in TableWithExpLUTToInt8Test() local 91 input_max = 0.8f * std::numeric_limits<InputT>::max() / in TableWithExpLUTToInt8Test() 105 GetLUTTolerance<TableT>(input_min, input_max, output_min, output_max); in TableWithExpLUTToInt8Test() 108 TableOpModel m({GetTensorType<InputT>(), {1, 2, 3, 1}, input_min, input_max}, in TableWithExpLUTToInt8Test() 115 [](float v) { return std::exp(v) - 1.204706f; }, input_min, input_max, in TableWithExpLUTToInt8Test() 134 float input_max = 0.8f; in TableWithExpLUTToInt16Test() local 138 input_max = 0.8f * std::numeric_limits<InputT>::max() / in TableWithExpLUTToInt16Test() 152 GetLUTTolerance<TableT>(input_min, input_max, output_min, output_max); in TableWithExpLUTToInt16Test() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantization_utils_test.cc | 35 float input_max, float output_min, float output_max, in TestRequantizeMany() argument 43 QuantizedToFloat(values_quantized[value_index], input_min, input_max), in TestRequantizeMany() 55 input_max, output_min, output_max, in TestRequantizeMany() 59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max, in TestRequantizeMany() 71 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany() 76 void TestRequantizeMany8To32Bit(float input_min, float input_max, in TestRequantizeMany8To32Bit() argument 85 QuantizedToFloat(values_quantized[value_index], input_min, input_max), in TestRequantizeMany8To32Bit() 96 input_max, output_min, output_max, in TestRequantizeMany8To32Bit() 107 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany8To32Bit() 231 const float input_max = ranges[range_index][1]; in TestRequantizeManyInNewRangeEigenVsNonEigen() local [all …]
|
D | quantized_activation_ops_test.cc | 46 const float input_max = 127.0f; in TEST_F() local 52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 59 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F() 77 const float input_max = 127.0f; in TEST_F() local 83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 90 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F()
|
D | quantized_pooling_ops_test.cc | 52 const float input_max = 255.0f; in TEST_F() local 62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 73 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F() 97 const float input_max = 255.0f; in TEST_F() local 107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 118 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F()
|
D | quantized_bias_add_op_test.cc | 52 const float input_max = 60.0f; in TEST_F() local 59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 78 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F() 102 const float input_max = 2006.27f; in TEST_F() local 119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 160 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F()
|
D | quantize_and_dequantize_op.h | 68 typename TTypes<T>::ConstScalar input_max, 190 auto input_max = input_max_tensor->scalar<T>(); in Compute() local 193 input_max.device(d) = input.maximum(); in Compute() 195 d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T)); in Compute() 231 auto input_max = input_max_tensor->vec<T>(); in Compute() local 238 input_max.device(d) = input.maximum(reduce_dims); in Compute() 241 d.memcpyDeviceToHost(max_range.data(), input_max.data(), in Compute() 275 typename TTypes<T>::ConstScalar input_max, in Compute() 280 const T max_val = input_max(); in Compute() 302 auto input_max = input_max_tensor->vec<T>(); in Compute() local [all …]
|
D | requantize.cc | 45 const Tensor& input_max = ctx->input(2); in Compute() local 53 ctx, TensorShapeUtils::IsScalar(input_max.shape()), in Compute() 55 input_max.dims())); in Compute() 66 const float input_max_float = input_max.flat<float>()(0); in Compute()
|
D | quantize_down_and_shrink_range.cc | 44 const Tensor& input_max = ctx->input(2); in Compute() local 51 ctx, TensorShapeUtils::IsScalar(input_max.shape()), in Compute() 53 input_max.dims())); in Compute() 56 const float input_max_float = input_max.scalar<float>()(); in Compute()
|
D | quantized_batch_norm_op_test.cc | 61 const float input_max = 127.0f; in TEST_F() local 71 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 100 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F() 158 const float input_max = 127.0f; in TEST_F() local 168 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 197 AddInputFromArray<float>(TensorShape({1}), {input_max}); in TEST_F()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_QuantizeAndDequantizeV2.pbtxt | 18 name: "input_max" 92 If range_given == False, the initial input_min, input_max will be determined 94 the specified values of input_min, input_max are used. 96 Note: If the input_min, input_max are specified, they do not need to equal the 102 [input_min, input_max] range to a range that lies within the representable 105 It determines the scale from one of input_min and input_max, then updates the 110 * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 112 would update input_max to be 127 / 12.8 = 9.921875 113 * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 117 specified input_max is used. [all …]
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | quantize_training.cc | 53 float input_max; member 62 input_max(max) {} in EdgeToConvert() 78 bool* range_given, float* input_min, float* input_max) { in FindType() argument 94 *input_max = 6; in FindType() 99 *input_max = 1; in FindType() 104 *input_max = 1; in FindType() 112 input_max); in FindType() 122 input_max); in FindType() 503 Node** input_max) { in MakeInputMinMax() argument 515 input_max_tensor.flat<float>()(0) = edge.input_max; in MakeInputMinMax() [all …]
|
/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_quantized_pooling_ops_test.cc | 51 const float input_max = 255.0f; in TEST_F() local 61 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 80 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F() 108 const float input_max = 255.0f; in TEST_F() local 118 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 136 AddInputFromArray<float>(TensorShape({}), {input_max}); in TEST_F()
|
D | mkl_requantization_range_per_channel_op.cc | 49 const Tensor& input_max = ctx->input(kInputMaxIndex); in Compute() local 51 const size_t depth = input_max.NumElements(); in Compute() 57 ctx, input_max.dim_size(0) == depth, in Compute() 59 depth, " was ", input_max.dim_size(0))); in Compute() 76 const float* input_max_data = input_max.flat<float>().data(); in Compute()
|
/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/tests/ |
D | activations_test.cc | 195 void TestQuantizedHardSwish(int size, float input_min, float input_max, in TestQuantizedHardSwish() argument 199 GenerateUniformRandomVector(size, input_min, input_max, random_engine, in TestQuantizedHardSwish() 209 /*input=*/{Tensor_Type, {1, 1, 1, size}, input_min, input_max}, in TestQuantizedHardSwish() 225 std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f); in TestQuantizedHardSwish() 238 float input_max = input_minmax.second; in HardSwishTestImpl() local 243 size, input_min, input_max, output_min, output_max, &random_engine); in HardSwishTestImpl() 260 float input_max = 25.036512f; in HardSwishBiasTestImpl() local 268 const float input_scale = (input_max - input_min) / quantized_type_range; in HardSwishBiasTestImpl() 275 ASSERT_GE(input_max, 3.0f); in HardSwishBiasTestImpl() 298 /*input=*/{Tensor_Type, {1, 1, 1, size}, input_min, input_max}, in HardSwishBiasTestImpl()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | fake_quantize_ops.cc | 103 float input_min, input_max, scale; in FakeQuantWithMinMaxArgsGradOp() local 105 OP_REQUIRES_OK(ctx, ctx->GetAttr("max", &input_max)); in FakeQuantWithMinMaxArgsGradOp() 106 CpuNudge(input_min, input_max, quant_min, quant_max, &nudged_input_min_, in FakeQuantWithMinMaxArgsGradOp() 157 xla::XlaOp input_max = ctx->Input(2); in Compile() local 160 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_, in Compile() 201 xla::XlaOp input_max = ctx->Input(3); in Compile() local 205 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_, in Compile() 262 xla::XlaOp input_max = ctx->Input(2); in Compile() local 271 input_max = convert_to_input_shape(input_max); in Compile() 274 XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_, in Compile() [all …]
|
/external/tensorflow/tensorflow/security/advisory/ |
D | tfsa-2022-108.md | 7 If `QuantizeDownAndShrinkRange` is given nonscalar inputs for `input_min` or `input_max`, it result… 14 input_max = tf.constant(-256, shape=[1], dtype=tf.float32) 15 tf.raw_ops.QuantizeDownAndShrinkRange(input=input, input_min=input_min, input_max=input_max, out_ty…
|
D | tfsa-2022-104.md | 7 If `Requantize` is given `input_min`, `input_max`, `requested_output_min`, `requested_output_max` t… 14 input_max = tf.constant(-256, shape=[1], dtype=tf.float32) 17 tf.raw_ops.Requantize(input=input, input_min=input_min, input_max=input_max, requested_output_min=r…
|
D | tfsa-2021-058.md | 14 input_max = tf.constant([], dtype=tf.float32) 17 tf.raw_ops.RequantizationRange(input=input, input_min=input_min, input_max=input_max) 22 assumes that the `input_min` and `input_max` tensors have at least one element,
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | QuantizeAndDequantizeV2.pbtxt | 12 name: "input_max" 62 name: "input_max" 113 name: "input_max" 165 name: "input_max" 230 name: "input_max" 302 name: "input_max"
|
D | QuantizeAndDequantize.pbtxt | 40 name: "input_max" 96 name: "input_max" 155 name: "input_max" 214 name: "input_max" 274 name: "input_max"
|
D | QuantizeAndDequantizeV3.pbtxt | 12 name: "input_max" 59 name: "input_max" 107 name: "input_max" 156 name: "input_max" 212 name: "input_max"
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | QuantizeAndDequantize.pbtxt | 40 name: "input_max" 96 name: "input_max" 155 name: "input_max" 214 name: "input_max" 274 name: "input_max"
|
D | QuantizeAndDequantizeV2.pbtxt | 12 name: "input_max" 62 name: "input_max" 113 name: "input_max" 165 name: "input_max" 230 name: "input_max"
|
D | QuantizeAndDequantizeV3.pbtxt | 12 name: "input_max" 59 name: "input_max" 107 name: "input_max" 156 name: "input_max"
|
/external/tensorflow/tensorflow/python/kernel_tests/quantization_ops/ |
D | quantization_ops_test.py | 303 input_max=1.0, 314 input_max=[], 325 input_max=1.0, 336 input_max=1.0, 408 input=inputs, input_min=[], input_max=4.0, 421 input_max = constant_op.constant(127, shape=(), dtype=dtypes.float32) 427 input_max, 440 input_max = constant_op.constant(127, shape=(), dtype=dtypes.float32) 449 input_value, input_min, input_max, num_bits, signed_input=True)
|