1 /* 2 * Copyright (c) 2018-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_WRAPPER_MLA_H 25 #define ARM_COMPUTE_WRAPPER_MLA_H 26 27 #include <arm_neon.h> 28 29 namespace arm_compute 30 { 31 namespace wrapper 32 { 33 #define VMLA_IMPL(stype, vtype, prefix, postfix) \ 34 inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \ 35 { \ 36 return prefix##_##postfix(a, b, c); \ 37 } 38 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 39 #define VMLA_IMPL2(stype, vtype, prefix1, prefix2, postfix) \ 40 inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \ 41 { \ 42 return prefix1##_##postfix(a, prefix2##_##postfix(b, c)); \ 43 } 44 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 45 46 VMLA_IMPL(uint8x8_t, uint8x8_t, vmla, u8) 47 VMLA_IMPL(int8x8_t, int8x8_t, vmla, s8) 48 VMLA_IMPL(uint16x4_t, uint16x4_t, vmla, u16) 49 VMLA_IMPL(int16x4_t, int16x4_t, vmla, s16) 50 VMLA_IMPL(uint32x2_t, uint32x2_t, vmla, u32) 51 VMLA_IMPL(int32x2_t, int32x2_t, vmla, s32) 52 VMLA_IMPL(float32x2_t, float32x2_t, vmla, f32) 53 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 54 VMLA_IMPL2(float16x4_t, float16x4_t, vadd, vmul, f16) 55 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 56 57 VMLA_IMPL(uint8x16_t, uint8x16_t, vmlaq, u8) 58 VMLA_IMPL(int8x16_t, int8x16_t, vmlaq, s8) 59 VMLA_IMPL(uint16x8_t, uint16x8_t, vmlaq, u16) 60 VMLA_IMPL(int16x8_t, int16x8_t, vmlaq, s16) 61 VMLA_IMPL(uint32x4_t, uint32x4_t, vmlaq, u32) 62 VMLA_IMPL(int32x4_t, int32x4_t, vmlaq, s32) 63 VMLA_IMPL(float32x4_t, float32x4_t, vmlaq, f32) 64 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 65 VMLA_IMPL2(float16x8_t, float16x8_t, vaddq, vmulq, f16) 66 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 67 68 #undef VMLA_IMPL 69 70 #define VMLAL_IMPL(vtype_in, vtype_out, postfix) \ 71 inline vtype_out vmlal(const vtype_out &a, const vtype_in &b, const vtype_in &c) \ 72 { \ 73 return vmlal_##postfix(a, b, c); \ 74 } 75 76 VMLAL_IMPL(uint8x8_t, uint16x8_t, u8) 77 VMLAL_IMPL(int8x8_t, int16x8_t, s8) 78 VMLAL_IMPL(uint16x4_t, uint32x4_t, u16) 79 VMLAL_IMPL(int16x4_t, int32x4_t, s16) 80 VMLAL_IMPL(uint32x2_t, uint64x2_t, u32) 81 VMLAL_IMPL(int32x2_t, int64x2_t, s32) 82 83 #undef VMLAL_IMPL 84 85 } // namespace wrapper 86 } // namespace arm_compute 87 #endif /* ARM_COMPUTE_WRAPPER_MLA_H */ 88