• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_WRAPPER_MOVN_H
25 #define ARM_COMPUTE_WRAPPER_MOVN_H
26 
27 #include <arm_neon.h>
28 
29 namespace arm_compute
30 {
31 namespace wrapper
32 {
33 #define VMOVN_IMPL(dtype, vtype, prefix, postfix) \
34     inline dtype vmovn(const vtype &a)            \
35     {                                             \
36         return prefix##_##postfix(a);             \
37     }
38 
39 VMOVN_IMPL(uint32x2_t, uint64x2_t, vmovn, u64)
40 VMOVN_IMPL(int32x2_t, int64x2_t, vmovn, s64)
41 VMOVN_IMPL(uint16x4_t, uint32x4_t, vmovn, u32)
42 VMOVN_IMPL(int16x4_t, int32x4_t, vmovn, s32)
43 VMOVN_IMPL(uint8x8_t, uint16x8_t, vmovn, u16)
44 VMOVN_IMPL(int8x8_t, int16x8_t, vmovn, s16)
45 
46 #define VQMOVN_IMPL(dtype, vtype, prefix, postfix) \
47     inline dtype vqmovn(const vtype &a)            \
48     {                                              \
49         return prefix##_##postfix(a);              \
50     }
51 
52 VQMOVN_IMPL(uint32x2_t, uint64x2_t, vqmovn, u64)
53 VQMOVN_IMPL(int32x2_t, int64x2_t, vqmovn, s64)
54 VQMOVN_IMPL(uint16x4_t, uint32x4_t, vqmovn, u32)
55 VQMOVN_IMPL(int16x4_t, int32x4_t, vqmovn, s32)
56 VQMOVN_IMPL(uint8x8_t, uint16x8_t, vqmovn, u16)
57 VQMOVN_IMPL(int8x8_t, int16x8_t, vqmovn, s16)
58 
59 #undef VMOVN_IMPL
60 } // namespace wrapper
61 } // namespace arm_compute
62 #endif /* ARM_COMPUTE_WRAPPER_MOVN_H */
63