• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
6 #define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
7 
8 // IWYU pragma: private
9 
10 #include <stdint.h>
11 
12 #include <cassert>
13 
14 #include "base/numerics/safe_conversions.h"
15 
16 namespace base::internal {
17 
18 template <typename T, typename U>
19 struct CheckedMulFastAsmOp {
20   static constexpr bool is_supported =
21       kEnableAsmCode && kIsFastIntegerArithmeticPromotionContained<T, U>;
22 
23   // The following is not an assembler routine and is thus constexpr safe, it
24   // just emits much more efficient code than the Clang and GCC builtins for
25   // performing overflow-checked multiplication when a twice wider type is
26   // available. The below compiles down to 2-3 instructions, depending on the
27   // width of the types in use.
28   // As an example, an int32_t multiply compiles to:
29   //    smull   r0, r1, r0, r1
30   //    cmp     r1, r1, asr #31
31   // And an int16_t multiply compiles to:
32   //    smulbb  r1, r1, r0
33   //    asr     r2, r1, #16
34   //    cmp     r2, r1, asr #15
35   template <typename V>
DoCheckedMulFastAsmOp36   static constexpr bool Do(T x, U y, V* result) {
37     using Promotion = FastIntegerArithmeticPromotion<T, U>;
38     Promotion presult;
39 
40     presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
41     if (!IsValueInRangeForNumericType<V>(presult)) {
42       return false;
43     }
44     *result = static_cast<V>(presult);
45     return true;
46   }
47 };
48 
49 template <typename T, typename U>
50 struct ClampedAddFastAsmOp {
51   static constexpr bool is_supported =
52       kEnableAsmCode && kIsBigEnoughPromotionContained<T, U> &&
53       kIsTypeInRangeForNumericType<int32_t, BigEnoughPromotion<T, U>>;
54 
55   template <typename V>
DoClampedAddFastAsmOp56   __attribute__((always_inline)) static V Do(T x, U y) {
57     // This will get promoted to an int, so let the compiler do whatever is
58     // clever and rely on the saturated cast to bounds check.
59     if constexpr (kIsIntegerArithmeticSafe<int, T, U>) {
60       return saturated_cast<V>(static_cast<int>(x) + static_cast<int>(y));
61     } else {
62       int32_t result;
63       int32_t x_i32 = checked_cast<int32_t>(x);
64       int32_t y_i32 = checked_cast<int32_t>(y);
65 
66       asm("qadd %[result], %[first], %[second]"
67           : [result] "=r"(result)
68           : [first] "r"(x_i32), [second] "r"(y_i32));
69       return saturated_cast<V>(result);
70     }
71   }
72 };
73 
74 template <typename T, typename U>
75 struct ClampedSubFastAsmOp {
76   static constexpr bool is_supported =
77       kEnableAsmCode && kIsBigEnoughPromotionContained<T, U> &&
78       kIsTypeInRangeForNumericType<int32_t, BigEnoughPromotion<T, U>>;
79 
80   template <typename V>
DoClampedSubFastAsmOp81   __attribute__((always_inline)) static V Do(T x, U y) {
82     // This will get promoted to an int, so let the compiler do whatever is
83     // clever and rely on the saturated cast to bounds check.
84     if constexpr (kIsIntegerArithmeticSafe<int, T, U>) {
85       return saturated_cast<V>(static_cast<int>(x) - static_cast<int>(y));
86     } else {
87       int32_t result;
88       int32_t x_i32 = checked_cast<int32_t>(x);
89       int32_t y_i32 = checked_cast<int32_t>(y);
90 
91       asm("qsub %[result], %[first], %[second]"
92           : [result] "=r"(result)
93           : [first] "r"(x_i32), [second] "r"(y_i32));
94       return saturated_cast<V>(result);
95     }
96   }
97 };
98 
99 template <typename T, typename U>
100 struct ClampedMulFastAsmOp {
101   static constexpr bool is_supported =
102       kEnableAsmCode && CheckedMulFastAsmOp<T, U>::is_supported;
103 
104   template <typename V>
DoClampedMulFastAsmOp105   __attribute__((always_inline)) static V Do(T x, U y) {
106     // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
107     // it's fewer instructions than promoting and then saturating.
108     if constexpr (!kIsIntegerArithmeticSafe<int32_t, T, U> &&
109                   !kIsIntegerArithmeticSafe<uint32_t, T, U>) {
110       V result;
111       return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
112                  ? result
113                  : CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
114     } else {
115       static_assert(kIsFastIntegerArithmeticPromotionContained<T, U>);
116       using Promotion = FastIntegerArithmeticPromotion<T, U>;
117       return saturated_cast<V>(static_cast<Promotion>(x) *
118                                static_cast<Promotion>(y));
119     }
120   }
121 };
122 
123 }  // namespace base::internal
124 
125 #endif  // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
126