1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12 #include <assert.h>
13
14 #include "aom_dsp/aom_dsp_common.h"
15 #include "aom_dsp/arm/mem_neon.h"
16
av1_block_error_neon(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)17 int64_t av1_block_error_neon(const tran_low_t *coeff, const tran_low_t *dqcoeff,
18 intptr_t block_size, int64_t *ssz) {
19 int64x2_t error = vdupq_n_s64(0);
20 int64x2_t sqcoeff = vdupq_n_s64(0);
21
22 assert(block_size >= 8);
23 assert((block_size % 8) == 0);
24
25 do {
26 const int16x8_t c = load_tran_low_to_s16q(coeff);
27 const int16x8_t d = load_tran_low_to_s16q(dqcoeff);
28 const int16x8_t diff = vsubq_s16(c, d);
29 const int16x4_t diff_lo = vget_low_s16(diff);
30 const int16x4_t diff_hi = vget_high_s16(diff);
31 // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before
32 // accumulating them in 64-bits.
33 const int32x4_t err0 = vmull_s16(diff_lo, diff_lo);
34 const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi);
35 const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1));
36 error = vaddq_s64(error, err2);
37
38 const int16x4_t coeff_lo = vget_low_s16(c);
39 const int16x4_t coeff_hi = vget_high_s16(c);
40 const int32x4_t sqcoeff0 = vmull_s16(coeff_lo, coeff_lo);
41 const int32x4_t sqcoeff1 = vmlal_s16(sqcoeff0, coeff_hi, coeff_hi);
42 const int64x2_t sqcoeff2 =
43 vaddl_s32(vget_low_s32(sqcoeff1), vget_high_s32(sqcoeff1));
44 sqcoeff = vaddq_s64(sqcoeff, sqcoeff2);
45
46 coeff += 8;
47 dqcoeff += 8;
48 block_size -= 8;
49 } while (block_size != 0);
50
51 #if defined(__aarch64__)
52 *ssz = vaddvq_s64(sqcoeff);
53 return vaddvq_s64(error);
54 #else
55 *ssz = vgetq_lane_s64(sqcoeff, 0) + vgetq_lane_s64(sqcoeff, 1);
56 return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1);
57 #endif
58 }
59
av1_block_error_lp_neon(const int16_t * coeff,const int16_t * dqcoeff,int block_size)60 int64_t av1_block_error_lp_neon(const int16_t *coeff, const int16_t *dqcoeff,
61 int block_size) {
62 int64x2_t error = vdupq_n_s64(0);
63
64 assert(block_size >= 8);
65 assert((block_size % 8) == 0);
66
67 do {
68 const int16x8_t c = vld1q_s16(coeff);
69 const int16x8_t d = vld1q_s16(dqcoeff);
70 const int16x8_t diff = vsubq_s16(c, d);
71 const int16x4_t diff_lo = vget_low_s16(diff);
72 const int16x4_t diff_hi = vget_high_s16(diff);
73 // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before
74 // accumulating them in 64-bits.
75 const int32x4_t err0 = vmull_s16(diff_lo, diff_lo);
76 const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi);
77 const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1));
78 error = vaddq_s64(error, err2);
79 coeff += 8;
80 dqcoeff += 8;
81 block_size -= 8;
82 } while (block_size != 0);
83
84 return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1);
85 }
86