Home
last modified time | relevance | path

Searched full:clamp_min (Results 1 – 25 of 151) sorted by relevance

1234567

/external/tensorflow/tensorflow/lite/toco/graph_transformations/
Dremove_trivial_quantized_activation_func.cc36 double clamp_min; in IsTrivialUnfusedActivationFunc() local
40 clamp_min = 0.0; in IsTrivialUnfusedActivationFunc()
44 clamp_min = -1.0; in IsTrivialUnfusedActivationFunc()
48 clamp_min = 0.0; in IsTrivialUnfusedActivationFunc()
56 return IsArrayQuantizedRangeSubset(transformation, input_array, clamp_min, in IsTrivialUnfusedActivationFunc()
64 double clamp_min; in IsTrivialFusedActivationFunc() local
70 clamp_min = 0.0; in IsTrivialFusedActivationFunc()
74 clamp_min = -1.0; in IsTrivialFusedActivationFunc()
78 clamp_min = 0.0; in IsTrivialFusedActivationFunc()
88 return IsArrayQuantizedRangeSubset(transformation, output_array, clamp_min, in IsTrivialFusedActivationFunc()
Dremove_trivial_quantized_min_max.cc47 double clamp_min; in IsTrivialMinMax() local
51 clamp_min = -std::numeric_limits<double>::infinity(); in IsTrivialMinMax()
55 clamp_min = clamp_value; in IsTrivialMinMax()
64 return IsArrayQuantizedRangeSubset(transformation, input_array, clamp_min, in IsTrivialMinMax()
Dquantization_util.h57 // Either clamp_min or clamp_max may be +/-infinity to indicate that the value
60 const Array& array, double clamp_min,
Dquantization_util.cc213 const Array& array, double clamp_min, in IsArrayQuantizedRangeSubset() argument
255 if (lowest_representable_output < clamp_min) { in IsArrayQuantizedRangeSubset()
261 lowest_representable_output, clamp_min); in IsArrayQuantizedRangeSubset()
/external/XNNPACK/tools/
Ddump-jit-output.py9 # dump-jit-output.py <path to JIT cc file> --max_mr=6 [--clamp_min]
15 # The parameters prefetch, clamp_min, clamp_max defaults to True if not
30 parser.add_argument("--clamp_min", action="store_true")
64 'clamp_min': options.clamp_min,
/external/ruy/ruy/
Dkernel_common.h121 std::int32_t clamp_min;
197 params->clamp_min = mul_params.clamp_min();
231 float clamp_min;
274 params->clamp_min = mul_params.clamp_min();
Dmul_params.h152 DstScalar clamp_min() const { return storage_.clamp_min; } in clamp_min() function
153 void set_clamp_min(const DstScalar value) { storage_.clamp_min = value; } in set_clamp_min()
226 DstScalar clamp_min = -std::numeric_limits<DstScalar>::infinity(); member
266 DstScalar clamp_min = std::numeric_limits<DstScalar>::lowest();
293 static constexpr DstScalar clamp_min =
Dmul_params_test.cc38 EXPECT_EQ(mul_params.clamp_min(), -128); in TEST()
69 EXPECT_EQ(mul_params.clamp_min(), -10); in TEST()
Dkernel_arm32.cc72 static_assert(offsetof(Params, clamp_min) == RUY_OFFSET_CLAMP_MIN, ""); in CheckOffsetsInKernelParamsFloat32()
350 // Load the clamp_min, clamp_max bounds in KernelFloat32Neon()
353 "vdup.32 q12, r2\n" // clamp_min in KernelFloat32Neon()
356 // Apply the clamp_min bound in KernelFloat32Neon()
612 static_assert(offsetof(Params, clamp_min) == RUY_OFFSET_CLAMP_MIN, ""); in CheckOffsetsInKernelParams8bit()
1121 // Load the clamp_min, clamp_max bounds in Kernel8bitNeon()
1124 "vdup.8 d28, r2\n" // clamp_min in Kernel8bitNeon()
1127 // Apply the clamp_min bound in Kernel8bitNeon()
1245 // Load the clamp_min, clamp_max bounds in Kernel8bitNeon()
1248 "vdup.8 d28, r2\n" // clamp_min in Kernel8bitNeon()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_gemm_test.cc130 void Clamp(const std::vector<Scalar>& src, Scalar clamp_min, Scalar clamp_max, in Clamp() argument
134 (*dst)[i] = std::max(std::min(src[i], clamp_max), clamp_min); in Clamp()
141 DstScalar clamp_min, DstScalar clamp_max, in Clamp() argument
144 dst->clamp_min = clamp_min; in Clamp()
269 DstScalar clamp_min, clamp_max; in PerformGemmThenCompareResultsThenAgainWithClamping() local
271 clamp_min = std::numeric_limits<DstScalar>::lowest(); in PerformGemmThenCompareResultsThenAgainWithClamping()
273 Clamp(expected, clamp_min, clamp_max, &expected_with_clamp); in PerformGemmThenCompareResultsThenAgainWithClamping()
274 Clamp(params, clamp_min, clamp_max, &params_with_clamp); in PerformGemmThenCompareResultsThenAgainWithClamping()
280 clamp_min = expected_median; in PerformGemmThenCompareResultsThenAgainWithClamping()
282 Clamp(expected, clamp_min, clamp_max, &expected_with_clamp); in PerformGemmThenCompareResultsThenAgainWithClamping()
[all …]
Dcpu_backend_gemm_eigen.cc70 BiasAndClamp(params.clamp_min, params.clamp_max, dst_params.rows, in Run()
73 eigen_dst = eigen_dst.cwiseMin(params.clamp_max).cwiseMax(params.clamp_min); in Run()
Dcpu_backend_gemm_custom_gemv.h236 inline void ClampAndStore(int32x4_t src, std::uint8_t clamp_min, in ClampAndStore() argument
243 res8 = vmax_u8(res8, vdup_n_u8(clamp_min)); in ClampAndStore()
252 inline void ClampAndStore(int32x4_t src, std::int8_t clamp_min, in ClampAndStore() argument
259 res8 = vmax_s8(res8, vdup_n_s8(clamp_min)); in ClampAndStore()
268 inline void ClampAndStore(int32x4_t src, std::int16_t clamp_min, in ClampAndStore() argument
273 res16 = vmax_s16(res16, vdup_n_s16(clamp_min)); in ClampAndStore()
585 ClampAndStore(reduced, params.clamp_min, params.clamp_max,
775 reduced = vmaxq_f32(reduced, vdupq_n_f32(params.clamp_min));
Dcpu_backend_gemm_ruy.h71 ruy_mul_params->set_clamp_min(params.clamp_min); in Run()
97 ruy_mul_params->set_clamp_min(params.clamp_min);
Dcpu_backend_gemm_gemmlowp.h104 clamp_stage.min = params.clamp_min;
171 clamp_stage.min = params.clamp_min;
/external/rust/android-crates-io/crates/num-traits/src/
Dlib.rs417 /// `clamp_min(std::f32::NAN, 1.0)` preserves `NAN` different from `f32::min(std::f32::NAN, 1.0)`.
422 pub fn clamp_min<T: PartialOrd>(input: T, min: T) -> T { in clamp_min() function
455 assert_eq!(1, clamp_min(1, -1)); in clamp_test()
456 assert_eq!(-1, clamp_min(-2, -1)); in clamp_test()
464 assert_eq!(1.0, clamp_min(1.0, -1.0)); in clamp_test()
465 assert_eq!(-1.0, clamp_min(-2.0, -1.0)); in clamp_test()
469 assert!(clamp_min(::core::f32::NAN, 1.0).is_nan()); in clamp_test()
498 clamp_min(0., ::core::f32::NAN); in clamp_min_nan_min()
/external/pytorch/aten/src/ATen/native/
DLoss.cpp21 #include <ATen/ops/clamp_min.h>
187 // using inplace clamp_min doesn't work because we end up writing in hinge_embedding_loss()
191 ? margin_diff.clamp_min(0) in hinge_embedding_loss()
221 auto output = at::clamp_min(margin + dist_pos - dist_neg, 0); in triplet_margin_loss()
229 // using inplace clamp_min doesn't work because we end up writing in margin_ranking_loss()
233 ? unclamped_output.clamp_min(0) in margin_ranking_loss()
/external/ComputeLibrary/src/cpu/kernels/
DCpuGemmLowpQuantizeDownInt32ScaleKernel.cpp150 …const int clamp_min = (_is_bounded_relu) ? _output_stage->gemmlowp_min_bound : std::numeric_limits… in run_internal() local
153 VectorType min = wrapper::vdup_n(static_cast<T>(clamp_min), wrapper::traits::vector_128_tag{}); in run_internal()
217 … *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); in run_internal()
255 … *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); in run_internal()
/external/tensorflow/tensorflow/core/kernels/
Dmeta_support.h104 // Take an array of uint8 values and clamp them to the range [clamp_min,
107 quint8 clamp_min, quint8 clamp_max, quint8* output);
/external/pytorch/torch/_decomp/
D__init__.py157 @register_decomposition(torch.ops.aten.clamp_min)
158 def clamp_min(x):
283 aten.clamp_min,
/external/pytorch/test/cpp/api/
Dintegration.cpp168 auto x = linear->forward(inp).clamp_min(0); in TEST_F()
271 x = linear1->forward(x).clamp_min(0); in TEST_F()
307 x = linear1->forward(x).clamp_min(0); in TEST_F()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dfully_connected.h91 gemm_params.clamp_min = output_activation_min; in FullyConnectedPerChannel()
160 gemm_params.clamp_min = output_activation_min; in FullyConnected()
/external/pytorch/functorch/dim/
Dop_properties.py111 torch.Tensor.clamp_min,
112 torch.clamp_min,
/external/XNNPACK/src/f32-gemm/
D4x8-aarch64-neonfma-cortex-a75.cc37 // x13 used to store pointer to params->max if (!clamp_min && clamp_max).
73 const bool clamp_min = min != -std::numeric_limits<float>::infinity(); in generate() local
80 if (clamp_min && clamp_max) { in generate()
82 } else if (clamp_min) { in generate()
321 if (clamp_min && clamp_max) { in generate()
323 } else if (clamp_min) { in generate()
428 if (clamp_min) { in generate()
/external/XNNPACK/src/f32-igemm/
D4x8-aarch64-neonfma-cortex-a75.cc39 // x21 used to store params->max if (!clamp_min && clamp_max).
75 const bool clamp_min = min != -std::numeric_limits<float>::infinity(); in generate() local
85 if (clamp_min && clamp_max) { in generate()
87 } else if (clamp_min) { in generate()
342 if (clamp_min && clamp_max) { in generate()
344 } else if (clamp_min) { in generate()
452 if (clamp_min) { in generate()
/external/tensorflow/tensorflow/security/advisory/
Dtfsa-2022-021.md10 inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,

1234567