// Copyright 2017 The Chromium Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_ #define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_ // IWYU pragma: private #include #include #include "base/numerics/safe_conversions.h" namespace base::internal { template struct CheckedMulFastAsmOp { static constexpr bool is_supported = kEnableAsmCode && kIsFastIntegerArithmeticPromotionContained; // The following is not an assembler routine and is thus constexpr safe, it // just emits much more efficient code than the Clang and GCC builtins for // performing overflow-checked multiplication when a twice wider type is // available. The below compiles down to 2-3 instructions, depending on the // width of the types in use. // As an example, an int32_t multiply compiles to: // smull r0, r1, r0, r1 // cmp r1, r1, asr #31 // And an int16_t multiply compiles to: // smulbb r1, r1, r0 // asr r2, r1, #16 // cmp r2, r1, asr #15 template static constexpr bool Do(T x, U y, V* result) { using Promotion = FastIntegerArithmeticPromotion; Promotion presult; presult = static_cast(x) * static_cast(y); if (!IsValueInRangeForNumericType(presult)) { return false; } *result = static_cast(presult); return true; } }; template struct ClampedAddFastAsmOp { static constexpr bool is_supported = kEnableAsmCode && kIsBigEnoughPromotionContained && kIsTypeInRangeForNumericType>; template __attribute__((always_inline)) static V Do(T x, U y) { // This will get promoted to an int, so let the compiler do whatever is // clever and rely on the saturated cast to bounds check. if constexpr (kIsIntegerArithmeticSafe) { return saturated_cast(static_cast(x) + static_cast(y)); } else { int32_t result; int32_t x_i32 = checked_cast(x); int32_t y_i32 = checked_cast(y); asm("qadd %[result], %[first], %[second]" : [result] "=r"(result) : [first] "r"(x_i32), [second] "r"(y_i32)); return saturated_cast(result); } } }; template struct ClampedSubFastAsmOp { static constexpr bool is_supported = kEnableAsmCode && kIsBigEnoughPromotionContained && kIsTypeInRangeForNumericType>; template __attribute__((always_inline)) static V Do(T x, U y) { // This will get promoted to an int, so let the compiler do whatever is // clever and rely on the saturated cast to bounds check. if constexpr (kIsIntegerArithmeticSafe) { return saturated_cast(static_cast(x) - static_cast(y)); } else { int32_t result; int32_t x_i32 = checked_cast(x); int32_t y_i32 = checked_cast(y); asm("qsub %[result], %[first], %[second]" : [result] "=r"(result) : [first] "r"(x_i32), [second] "r"(y_i32)); return saturated_cast(result); } } }; template struct ClampedMulFastAsmOp { static constexpr bool is_supported = kEnableAsmCode && CheckedMulFastAsmOp::is_supported; template __attribute__((always_inline)) static V Do(T x, U y) { // Use the CheckedMulFastAsmOp for full-width 32-bit values, because // it's fewer instructions than promoting and then saturating. if constexpr (!kIsIntegerArithmeticSafe && !kIsIntegerArithmeticSafe) { V result; return CheckedMulFastAsmOp::Do(x, y, &result) ? result : CommonMaxOrMin(IsValueNegative(x) ^ IsValueNegative(y)); } else { static_assert(kIsFastIntegerArithmeticPromotionContained); using Promotion = FastIntegerArithmeticPromotion; return saturated_cast(static_cast(x) * static_cast(y)); } } }; } // namespace base::internal #endif // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_