• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <altivec.h>
13 
14 #include "config/av1_rtcd.h"
15 
16 #include "av1/common/cfl.h"
17 
18 #define OFF_0 0
19 #define OFF_1 16
20 #define OFF_2 32
21 #define OFF_3 48
22 #define CFL_BUF_LINE_BYTES 64
23 #define CFL_LINE_1 64
24 #define CFL_LINE_2 128
25 #define CFL_LINE_3 192
26 
27 typedef vector signed char int8x16_t;          // NOLINT(runtime/int)
28 typedef vector unsigned char uint8x16_t;       // NOLINT(runtime/int)
29 typedef vector signed short int16x8_t;         // NOLINT(runtime/int)
30 typedef vector unsigned short uint16x8_t;      // NOLINT(runtime/int)
31 typedef vector signed int int32x4_t;           // NOLINT(runtime/int)
32 typedef vector unsigned int uint32x4_t;        // NOLINT(runtime/int)
33 typedef vector unsigned long long uint64x2_t;  // NOLINT(runtime/int)
34 
subtract_average_vsx(const uint16_t * src_ptr,int16_t * dst,int width,int height,int round_offset,int num_pel_log2)35 static INLINE void subtract_average_vsx(const uint16_t *src_ptr, int16_t *dst,
36                                         int width, int height, int round_offset,
37                                         int num_pel_log2) {
38   //  int16_t *dst = dst_ptr;
39   const int16_t *dst_end = dst + height * CFL_BUF_LINE;
40   const int16_t *sum_buf = (const int16_t *)src_ptr;
41   const int16_t *end = sum_buf + height * CFL_BUF_LINE;
42   const uint32x4_t div_shift = vec_splats((uint32_t)num_pel_log2);
43   const uint8x16_t mask_64 = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
44                                0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
45   const uint8x16_t mask_32 = { 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03,
46                                0x1C, 0x1D, 0x1E, 0x1F, 0x08, 0x09, 0x0A, 0x0B };
47 
48   int32x4_t sum_32x4_0 = { 0, 0, 0, round_offset };
49   int32x4_t sum_32x4_1 = { 0, 0, 0, 0 };
50   do {
51     sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_0, sum_buf), sum_32x4_0);
52     sum_32x4_1 = vec_sum4s(vec_vsx_ld(OFF_0 + CFL_LINE_1, sum_buf), sum_32x4_1);
53     if (width >= 16) {
54       sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_1, sum_buf), sum_32x4_0);
55       sum_32x4_1 =
56           vec_sum4s(vec_vsx_ld(OFF_1 + CFL_LINE_1, sum_buf), sum_32x4_1);
57     }
58     if (width == 32) {
59       sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_2, sum_buf), sum_32x4_0);
60       sum_32x4_1 =
61           vec_sum4s(vec_vsx_ld(OFF_2 + CFL_LINE_1, sum_buf), sum_32x4_1);
62       sum_32x4_0 = vec_sum4s(vec_vsx_ld(OFF_3, sum_buf), sum_32x4_0);
63       sum_32x4_1 =
64           vec_sum4s(vec_vsx_ld(OFF_3 + CFL_LINE_1, sum_buf), sum_32x4_1);
65     }
66   } while ((sum_buf += (CFL_BUF_LINE * 2)) < end);
67   int32x4_t sum_32x4 = vec_add(sum_32x4_0, sum_32x4_1);
68 
69   const int32x4_t perm_64 = vec_perm(sum_32x4, sum_32x4, mask_64);
70   sum_32x4 = vec_add(sum_32x4, perm_64);
71   const int32x4_t perm_32 = vec_perm(sum_32x4, sum_32x4, mask_32);
72   sum_32x4 = vec_add(sum_32x4, perm_32);
73   const int32x4_t avg = vec_sr(sum_32x4, div_shift);
74   const int16x8_t vec_avg = vec_pack(avg, avg);
75   do {
76     vec_vsx_st(vec_sub(vec_vsx_ld(OFF_0, dst), vec_avg), OFF_0, dst);
77     vec_vsx_st(vec_sub(vec_vsx_ld(OFF_0 + CFL_LINE_1, dst), vec_avg),
78                OFF_0 + CFL_BUF_LINE_BYTES, dst);
79     vec_vsx_st(vec_sub(vec_vsx_ld(OFF_0 + CFL_LINE_2, dst), vec_avg),
80                OFF_0 + CFL_LINE_2, dst);
81     vec_vsx_st(vec_sub(vec_vsx_ld(OFF_0 + CFL_LINE_3, dst), vec_avg),
82                OFF_0 + CFL_LINE_3, dst);
83     if (width >= 16) {
84       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_1, dst), vec_avg), OFF_1, dst);
85       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_1 + CFL_LINE_1, dst), vec_avg),
86                  OFF_1 + CFL_LINE_1, dst);
87       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_1 + CFL_LINE_2, dst), vec_avg),
88                  OFF_1 + CFL_LINE_2, dst);
89       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_1 + CFL_LINE_3, dst), vec_avg),
90                  OFF_1 + CFL_LINE_3, dst);
91     }
92     if (width == 32) {
93       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_2, dst), vec_avg), OFF_2, dst);
94       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_2 + CFL_LINE_1, dst), vec_avg),
95                  OFF_2 + CFL_LINE_1, dst);
96       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_2 + CFL_LINE_2, dst), vec_avg),
97                  OFF_2 + CFL_LINE_2, dst);
98       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_2 + CFL_LINE_3, dst), vec_avg),
99                  OFF_2 + CFL_LINE_3, dst);
100 
101       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_3, dst), vec_avg), OFF_3, dst);
102       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_3 + CFL_LINE_1, dst), vec_avg),
103                  OFF_3 + CFL_LINE_1, dst);
104       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_3 + CFL_LINE_2, dst), vec_avg),
105                  OFF_3 + CFL_LINE_2, dst);
106       vec_vsx_st(vec_sub(vec_vsx_ld(OFF_3 + CFL_LINE_3, dst), vec_avg),
107                  OFF_3 + CFL_LINE_3, dst);
108     }
109   } while ((dst += CFL_BUF_LINE * 4) < dst_end);
110 }
111 
112 // Declare wrappers for VSX sizes
113 CFL_SUB_AVG_X(vsx, 8, 4, 16, 5)
114 CFL_SUB_AVG_X(vsx, 8, 8, 32, 6)
115 CFL_SUB_AVG_X(vsx, 8, 16, 64, 7)
116 CFL_SUB_AVG_X(vsx, 8, 32, 128, 8)
117 CFL_SUB_AVG_X(vsx, 16, 4, 32, 6)
118 CFL_SUB_AVG_X(vsx, 16, 8, 64, 7)
119 CFL_SUB_AVG_X(vsx, 16, 16, 128, 8)
120 CFL_SUB_AVG_X(vsx, 16, 32, 256, 9)
121 CFL_SUB_AVG_X(vsx, 32, 8, 128, 8)
122 CFL_SUB_AVG_X(vsx, 32, 16, 256, 9)
123 CFL_SUB_AVG_X(vsx, 32, 32, 512, 10)
124 
125 // Based on observation, for small blocks VSX does not outperform C (no 64bit
126 // load and store intrinsics). So we call the C code for block widths 4.
get_subtract_average_fn_vsx(TX_SIZE tx_size)127 cfl_subtract_average_fn get_subtract_average_fn_vsx(TX_SIZE tx_size) {
128   static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = {
129     subtract_average_4x4_c,     /* 4x4 */
130     subtract_average_8x8_vsx,   /* 8x8 */
131     subtract_average_16x16_vsx, /* 16x16 */
132     subtract_average_32x32_vsx, /* 32x32 */
133     NULL,                       /* 64x64 (invalid CFL size) */
134     subtract_average_4x8_c,     /* 4x8 */
135     subtract_average_8x4_vsx,   /* 8x4 */
136     subtract_average_8x16_vsx,  /* 8x16 */
137     subtract_average_16x8_vsx,  /* 16x8 */
138     subtract_average_16x32_vsx, /* 16x32 */
139     subtract_average_32x16_vsx, /* 32x16 */
140     NULL,                       /* 32x64 (invalid CFL size) */
141     NULL,                       /* 64x32 (invalid CFL size) */
142     subtract_average_4x16_c,    /* 4x16 */
143     subtract_average_16x4_vsx,  /* 16x4 */
144     subtract_average_8x32_vsx,  /* 8x32 */
145     subtract_average_32x8_vsx,  /* 32x8 */
146     NULL,                       /* 16x64 (invalid CFL size) */
147     NULL,                       /* 64x16 (invalid CFL size) */
148   };
149   // Modulo TX_SIZES_ALL to ensure that an attacker won't be able to
150   // index the function pointer array out of bounds.
151   return sub_avg[tx_size % TX_SIZES_ALL];
152 }
153