• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *  Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
4  *
5  *  Use of this source code is governed by a BSD-style license
6  *  that can be found in the LICENSE file in the root of the source
7  *  tree. An additional intellectual property rights grant can be found
8  *  in the file PATENTS.  All contributing project authors may
9  *  be found in the AUTHORS file in the root of the source tree.
10  */
11 
12 #include <arm_neon.h>
13 #include <assert.h>
14 
15 #include "config/aom_config.h"
16 
17 #include "aom_dsp/aom_dsp_common.h"
18 #include "aom_dsp/arm/mem_neon.h"
19 #include "aom_dsp/arm/sum_neon.h"
20 
av1_block_error_neon(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)21 int64_t av1_block_error_neon(const tran_low_t *coeff, const tran_low_t *dqcoeff,
22                              intptr_t block_size, int64_t *ssz) {
23   uint64x2_t err_u64 = vdupq_n_u64(0);
24   int64x2_t ssz_s64 = vdupq_n_s64(0);
25 
26   assert(block_size >= 16);
27   assert((block_size % 16) == 0);
28 
29   do {
30     const int16x8_t c0 = load_tran_low_to_s16q(coeff);
31     const int16x8_t c1 = load_tran_low_to_s16q(coeff + 8);
32     const int16x8_t d0 = load_tran_low_to_s16q(dqcoeff);
33     const int16x8_t d1 = load_tran_low_to_s16q(dqcoeff + 8);
34 
35     const uint16x8_t diff0 = vreinterpretq_u16_s16(vabdq_s16(c0, d0));
36     const uint16x8_t diff1 = vreinterpretq_u16_s16(vabdq_s16(c1, d1));
37 
38     // By operating on unsigned integers we can store up to 4 squared diff in a
39     // 32-bit element before having to widen to 64 bits.
40     uint32x4_t err = vmull_u16(vget_low_u16(diff0), vget_low_u16(diff0));
41     err = vmlal_u16(err, vget_high_u16(diff0), vget_high_u16(diff0));
42     err = vmlal_u16(err, vget_low_u16(diff1), vget_low_u16(diff1));
43     err = vmlal_u16(err, vget_high_u16(diff1), vget_high_u16(diff1));
44     err_u64 = vpadalq_u32(err_u64, err);
45 
46     // We can't do the same here as we're operating on signed integers, so we
47     // can only accumulate 2 squares.
48     int32x4_t ssz0 = vmull_s16(vget_low_s16(c0), vget_low_s16(c0));
49     ssz0 = vmlal_s16(ssz0, vget_high_s16(c0), vget_high_s16(c0));
50     ssz_s64 = vpadalq_s32(ssz_s64, ssz0);
51 
52     int32x4_t ssz1 = vmull_s16(vget_low_s16(c1), vget_low_s16(c1));
53     ssz1 = vmlal_s16(ssz1, vget_high_s16(c1), vget_high_s16(c1));
54     ssz_s64 = vpadalq_s32(ssz_s64, ssz1);
55 
56     coeff += 16;
57     dqcoeff += 16;
58     block_size -= 16;
59   } while (block_size != 0);
60 
61   *ssz = horizontal_add_s64x2(ssz_s64);
62   return (int64_t)horizontal_add_u64x2(err_u64);
63 }
64 
av1_block_error_lp_neon(const int16_t * coeff,const int16_t * dqcoeff,int block_size)65 int64_t av1_block_error_lp_neon(const int16_t *coeff, const int16_t *dqcoeff,
66                                 int block_size) {
67   uint64x2_t err_u64 = vdupq_n_u64(0);
68 
69   assert(block_size >= 16);
70   assert((block_size % 16) == 0);
71 
72   do {
73     const int16x8_t c0 = vld1q_s16(coeff);
74     const int16x8_t c1 = vld1q_s16(coeff + 8);
75     const int16x8_t d0 = vld1q_s16(dqcoeff);
76     const int16x8_t d1 = vld1q_s16(dqcoeff + 8);
77 
78     const uint16x8_t diff0 = vreinterpretq_u16_s16(vabdq_s16(c0, d0));
79     const uint16x8_t diff1 = vreinterpretq_u16_s16(vabdq_s16(c1, d1));
80 
81     // By operating on unsigned integers we can store up to 4 squared diff in a
82     // 32-bit element before having to widen to 64 bits.
83     uint32x4_t err = vmull_u16(vget_low_u16(diff0), vget_low_u16(diff0));
84     err = vmlal_u16(err, vget_high_u16(diff0), vget_high_u16(diff0));
85     err = vmlal_u16(err, vget_low_u16(diff1), vget_low_u16(diff1));
86     err = vmlal_u16(err, vget_high_u16(diff1), vget_high_u16(diff1));
87     err_u64 = vpadalq_u32(err_u64, err);
88 
89     coeff += 16;
90     dqcoeff += 16;
91     block_size -= 16;
92   } while (block_size != 0);
93 
94   return (int64_t)horizontal_add_u64x2(err_u64);
95 }
96