• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // MIPS version of speed-critical encoding functions.
11 //
12 // Author(s): Djordje Pesut    (djordje.pesut@imgtec.com)
13 //            Jovan Zelincevic (jovan.zelincevic@imgtec.com)
14 //            Slobodan Prijic  (slobodan.prijic@imgtec.com)
15 
16 #include "src/dsp/dsp.h"
17 
18 #if defined(WEBP_USE_MIPS32)
19 
20 #include "src/dsp/mips_macro.h"
21 #include "src/enc/vp8i_enc.h"
22 #include "src/enc/cost_enc.h"
23 
24 static const int kC1 = WEBP_TRANSFORM_AC3_C1;
25 static const int kC2 = WEBP_TRANSFORM_AC3_C2;
26 
27 // macro for one vertical pass in ITransformOne
28 // MUL macro inlined
29 // temp0..temp15 holds tmp[0]..tmp[15]
30 // A..D - offsets in bytes to load from in buffer
31 // TEMP0..TEMP3 - registers for corresponding tmp elements
32 // TEMP4..TEMP5 - temporary registers
33 #define VERTICAL_PASS(A, B, C, D, TEMP4, TEMP0, TEMP1, TEMP2, TEMP3) \
34   "lh      %[temp16],      " #A "(%[temp20])                 \n\t"          \
35   "lh      %[temp18],      " #B "(%[temp20])                 \n\t"          \
36   "lh      %[temp17],      " #C "(%[temp20])                 \n\t"          \
37   "lh      %[temp19],      " #D "(%[temp20])                 \n\t"          \
38   "addu    %[" #TEMP4 "],    %[temp16],      %[temp18]       \n\t"          \
39   "subu    %[temp16],      %[temp16],      %[temp18]         \n\t"          \
40   "mul     %[" #TEMP0 "],    %[temp17],      %[kC2]          \n\t"          \
41   MUL_SHIFT_C1_IO(temp17, temp18)                                           \
42   MUL_SHIFT_C1(temp18, temp19)                                              \
43   "mul     %[temp19],      %[temp19],      %[kC2]            \n\t"          \
44   "sra     %[" #TEMP0 "],    %[" #TEMP0 "],    16            \n\n"          \
45   "sra     %[temp19],      %[temp19],      16                \n\n"          \
46   "subu    %[" #TEMP2 "],    %[" #TEMP0 "],    %[temp18]     \n\t"          \
47   "addu    %[" #TEMP3 "],    %[temp17],      %[temp19]       \n\t"          \
48   "addu    %[" #TEMP0 "],    %[" #TEMP4 "],    %[" #TEMP3 "] \n\t"          \
49   "addu    %[" #TEMP1 "],    %[temp16],      %[" #TEMP2 "]   \n\t"          \
50   "subu    %[" #TEMP2 "],    %[temp16],      %[" #TEMP2 "]   \n\t"          \
51   "subu    %[" #TEMP3 "],    %[" #TEMP4 "],    %[" #TEMP3 "] \n\t"
52 
53 // macro for one horizontal pass in ITransformOne
54 // MUL and STORE macros inlined
55 // a = clip_8b(a) is replaced with: a = max(a, 0); a = min(a, 255)
56 // temp0..temp15 holds tmp[0]..tmp[15]
57 // A - offset in bytes to load from ref and store to dst buffer
58 // TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements
59 #define HORIZONTAL_PASS(A, TEMP0, TEMP4, TEMP8, TEMP12) \
60   "addiu   %[" #TEMP0 "],    %[" #TEMP0 "],    4               \n\t"          \
61   "addu    %[temp16],      %[" #TEMP0 "],    %[" #TEMP8 "]     \n\t"          \
62   "subu    %[temp17],      %[" #TEMP0 "],    %[" #TEMP8 "]     \n\t"          \
63   "mul     %[" #TEMP0 "],    %[" #TEMP4 "],    %[kC2]          \n\t"          \
64   MUL_SHIFT_C1_IO(TEMP4, TEMP8)                                               \
65   MUL_SHIFT_C1(TEMP8, TEMP12)                                                 \
66   "mul     %[" #TEMP12 "],   %[" #TEMP12 "],   %[kC2]          \n\t"          \
67   "sra     %[" #TEMP0 "],    %[" #TEMP0 "],    16              \n\t"          \
68   "sra     %[" #TEMP12 "],   %[" #TEMP12 "],   16              \n\t"          \
69   "subu    %[temp18],      %[" #TEMP0 "],    %[" #TEMP8 "]     \n\t"          \
70   "addu    %[temp19],      %[" #TEMP4 "],    %[" #TEMP12 "]    \n\t"          \
71   "addu    %[" #TEMP0 "],    %[temp16],      %[temp19]         \n\t"          \
72   "addu    %[" #TEMP4 "],    %[temp17],      %[temp18]         \n\t"          \
73   "subu    %[" #TEMP8 "],    %[temp17],      %[temp18]         \n\t"          \
74   "subu    %[" #TEMP12 "],   %[temp16],      %[temp19]         \n\t"          \
75   "lw      %[temp20],      0(%[args])                          \n\t"          \
76   "sra     %[" #TEMP0 "],    %[" #TEMP0 "],    3               \n\t"          \
77   "sra     %[" #TEMP4 "],    %[" #TEMP4 "],    3               \n\t"          \
78   "sra     %[" #TEMP8 "],    %[" #TEMP8 "],    3               \n\t"          \
79   "sra     %[" #TEMP12 "],   %[" #TEMP12 "],   3               \n\t"          \
80   "lbu     %[temp16],      0+" XSTR(BPS) "*" #A "(%[temp20])   \n\t"          \
81   "lbu     %[temp17],      1+" XSTR(BPS) "*" #A "(%[temp20])   \n\t"          \
82   "lbu     %[temp18],      2+" XSTR(BPS) "*" #A "(%[temp20])   \n\t"          \
83   "lbu     %[temp19],      3+" XSTR(BPS) "*" #A "(%[temp20])   \n\t"          \
84   "addu    %[" #TEMP0 "],    %[temp16],      %[" #TEMP0 "]     \n\t"          \
85   "addu    %[" #TEMP4 "],    %[temp17],      %[" #TEMP4 "]     \n\t"          \
86   "addu    %[" #TEMP8 "],    %[temp18],      %[" #TEMP8 "]     \n\t"          \
87   "addu    %[" #TEMP12 "],   %[temp19],      %[" #TEMP12 "]    \n\t"          \
88   "slt     %[temp16],      %[" #TEMP0 "],    $zero             \n\t"          \
89   "slt     %[temp17],      %[" #TEMP4 "],    $zero             \n\t"          \
90   "slt     %[temp18],      %[" #TEMP8 "],    $zero             \n\t"          \
91   "slt     %[temp19],      %[" #TEMP12 "],   $zero             \n\t"          \
92   "movn    %[" #TEMP0 "],    $zero,          %[temp16]         \n\t"          \
93   "movn    %[" #TEMP4 "],    $zero,          %[temp17]         \n\t"          \
94   "movn    %[" #TEMP8 "],    $zero,          %[temp18]         \n\t"          \
95   "movn    %[" #TEMP12 "],   $zero,          %[temp19]         \n\t"          \
96   "addiu   %[temp20],      $zero,          255                 \n\t"          \
97   "slt     %[temp16],      %[" #TEMP0 "],    %[temp20]         \n\t"          \
98   "slt     %[temp17],      %[" #TEMP4 "],    %[temp20]         \n\t"          \
99   "slt     %[temp18],      %[" #TEMP8 "],    %[temp20]         \n\t"          \
100   "slt     %[temp19],      %[" #TEMP12 "],   %[temp20]         \n\t"          \
101   "movz    %[" #TEMP0 "],    %[temp20],      %[temp16]         \n\t"          \
102   "movz    %[" #TEMP4 "],    %[temp20],      %[temp17]         \n\t"          \
103   "lw      %[temp16],      8(%[args])                          \n\t"          \
104   "movz    %[" #TEMP8 "],    %[temp20],      %[temp18]         \n\t"          \
105   "movz    %[" #TEMP12 "],   %[temp20],      %[temp19]         \n\t"          \
106   "sb      %[" #TEMP0 "],    0+" XSTR(BPS) "*" #A "(%[temp16]) \n\t"          \
107   "sb      %[" #TEMP4 "],    1+" XSTR(BPS) "*" #A "(%[temp16]) \n\t"          \
108   "sb      %[" #TEMP8 "],    2+" XSTR(BPS) "*" #A "(%[temp16]) \n\t"          \
109   "sb      %[" #TEMP12 "],   3+" XSTR(BPS) "*" #A "(%[temp16]) \n\t"
110 
111 // Does one or two inverse transforms.
ITransformOne_MIPS32(const uint8_t * WEBP_RESTRICT ref,const int16_t * WEBP_RESTRICT in,uint8_t * WEBP_RESTRICT dst)112 static WEBP_INLINE void ITransformOne_MIPS32(const uint8_t* WEBP_RESTRICT ref,
113                                              const int16_t* WEBP_RESTRICT in,
114                                              uint8_t* WEBP_RESTRICT dst) {
115   int temp0, temp1, temp2, temp3, temp4, temp5, temp6;
116   int temp7, temp8, temp9, temp10, temp11, temp12, temp13;
117   int temp14, temp15, temp16, temp17, temp18, temp19, temp20;
118   const int* args[3] = {(const int*)ref, (const int*)in, (const int*)dst};
119 
120   __asm__ volatile(
121     "lw      %[temp20],      4(%[args])                      \n\t"
122     VERTICAL_PASS(0, 16,  8, 24, temp4,  temp0,  temp1,  temp2,  temp3)
123     VERTICAL_PASS(2, 18, 10, 26, temp8,  temp4,  temp5,  temp6,  temp7)
124     VERTICAL_PASS(4, 20, 12, 28, temp12, temp8,  temp9,  temp10, temp11)
125     VERTICAL_PASS(6, 22, 14, 30, temp20, temp12, temp13, temp14, temp15)
126 
127     HORIZONTAL_PASS(0, temp0, temp4, temp8,  temp12)
128     HORIZONTAL_PASS(1, temp1, temp5, temp9,  temp13)
129     HORIZONTAL_PASS(2, temp2, temp6, temp10, temp14)
130     HORIZONTAL_PASS(3, temp3, temp7, temp11, temp15)
131 
132     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
133       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
134       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8),
135       [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11),
136       [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14),
137       [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17),
138       [temp18]"=&r"(temp18), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20)
139     : [args]"r"(args), [kC1]"r"(kC1), [kC2]"r"(kC2)
140     : "memory", "hi", "lo"
141   );
142 }
143 
ITransform_MIPS32(const uint8_t * WEBP_RESTRICT ref,const int16_t * WEBP_RESTRICT in,uint8_t * WEBP_RESTRICT dst,int do_two)144 static void ITransform_MIPS32(const uint8_t* WEBP_RESTRICT ref,
145                               const int16_t* WEBP_RESTRICT in,
146                               uint8_t* WEBP_RESTRICT dst, int do_two) {
147   ITransformOne_MIPS32(ref, in, dst);
148   if (do_two) {
149     ITransformOne_MIPS32(ref + 4, in + 16, dst + 4);
150   }
151 }
152 
153 #undef VERTICAL_PASS
154 #undef HORIZONTAL_PASS
155 
156 // macro for one pass through for loop in QuantizeBlock
157 // QUANTDIV macro inlined
158 // J - offset in bytes (kZigzag[n] * 2)
159 // K - offset in bytes (kZigzag[n] * 4)
160 // N - offset in bytes (n * 2)
161 #define QUANTIZE_ONE(J, K, N)                                               \
162   "lh           %[temp0],       " #J "(%[ppin])                     \n\t"   \
163   "lhu          %[temp1],       " #J "(%[ppsharpen])                \n\t"   \
164   "lw           %[temp2],       " #K "(%[ppzthresh])                \n\t"   \
165   "sra          %[sign],        %[temp0],           15              \n\t"   \
166   "xor          %[coeff],       %[temp0],           %[sign]         \n\t"   \
167   "subu         %[coeff],       %[coeff],           %[sign]         \n\t"   \
168   "addu         %[coeff],       %[coeff],           %[temp1]        \n\t"   \
169   "slt          %[temp4],       %[temp2],           %[coeff]        \n\t"   \
170   "addiu        %[temp5],       $zero,              0               \n\t"   \
171   "addiu        %[level],       $zero,              0               \n\t"   \
172   "beqz         %[temp4],       2f                                  \n\t"   \
173   "lhu          %[temp1],       " #J "(%[ppiq])                     \n\t"   \
174   "lw           %[temp2],       " #K "(%[ppbias])                   \n\t"   \
175   "lhu          %[temp3],       " #J "(%[ppq])                      \n\t"   \
176   "mul          %[level],       %[coeff],           %[temp1]        \n\t"   \
177   "addu         %[level],       %[level],           %[temp2]        \n\t"   \
178   "sra          %[level],       %[level],           17              \n\t"   \
179   "slt          %[temp4],       %[max_level],       %[level]        \n\t"   \
180   "movn         %[level],       %[max_level],       %[temp4]        \n\t"   \
181   "xor          %[level],       %[level],           %[sign]         \n\t"   \
182   "subu         %[level],       %[level],           %[sign]         \n\t"   \
183   "mul          %[temp5],       %[level],           %[temp3]        \n\t"   \
184 "2:                                                                 \n\t"   \
185   "sh           %[temp5],       " #J "(%[ppin])                     \n\t"   \
186   "sh           %[level],       " #N "(%[pout])                     \n\t"
187 
QuantizeBlock_MIPS32(int16_t in[16],int16_t out[16],const VP8Matrix * const mtx)188 static int QuantizeBlock_MIPS32(int16_t in[16], int16_t out[16],
189                                 const VP8Matrix* const mtx) {
190   int temp0, temp1, temp2, temp3, temp4, temp5;
191   int sign, coeff, level, i;
192   int max_level = MAX_LEVEL;
193 
194   int16_t* ppin             = &in[0];
195   int16_t* pout             = &out[0];
196   const uint16_t* ppsharpen = &mtx->sharpen_[0];
197   const uint32_t* ppzthresh = &mtx->zthresh_[0];
198   const uint16_t* ppq       = &mtx->q_[0];
199   const uint16_t* ppiq      = &mtx->iq_[0];
200   const uint32_t* ppbias    = &mtx->bias_[0];
201 
202   __asm__ volatile(
203     QUANTIZE_ONE( 0,  0,  0)
204     QUANTIZE_ONE( 2,  4,  2)
205     QUANTIZE_ONE( 8, 16,  4)
206     QUANTIZE_ONE(16, 32,  6)
207     QUANTIZE_ONE(10, 20,  8)
208     QUANTIZE_ONE( 4,  8, 10)
209     QUANTIZE_ONE( 6, 12, 12)
210     QUANTIZE_ONE(12, 24, 14)
211     QUANTIZE_ONE(18, 36, 16)
212     QUANTIZE_ONE(24, 48, 18)
213     QUANTIZE_ONE(26, 52, 20)
214     QUANTIZE_ONE(20, 40, 22)
215     QUANTIZE_ONE(14, 28, 24)
216     QUANTIZE_ONE(22, 44, 26)
217     QUANTIZE_ONE(28, 56, 28)
218     QUANTIZE_ONE(30, 60, 30)
219 
220     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
221       [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
222       [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
223       [sign]"=&r"(sign), [coeff]"=&r"(coeff),
224       [level]"=&r"(level)
225     : [pout]"r"(pout), [ppin]"r"(ppin),
226       [ppiq]"r"(ppiq), [max_level]"r"(max_level),
227       [ppbias]"r"(ppbias), [ppzthresh]"r"(ppzthresh),
228       [ppsharpen]"r"(ppsharpen), [ppq]"r"(ppq)
229     : "memory", "hi", "lo"
230   );
231 
232   // moved out from macro to increase possibility for earlier breaking
233   for (i = 15; i >= 0; i--) {
234     if (out[i]) return 1;
235   }
236   return 0;
237 }
238 
Quantize2Blocks_MIPS32(int16_t in[32],int16_t out[32],const VP8Matrix * WEBP_RESTRICT const mtx)239 static int Quantize2Blocks_MIPS32(int16_t in[32], int16_t out[32],
240                                   const VP8Matrix* WEBP_RESTRICT const mtx) {
241   int nz;
242   nz  = QuantizeBlock_MIPS32(in + 0 * 16, out + 0 * 16, mtx) << 0;
243   nz |= QuantizeBlock_MIPS32(in + 1 * 16, out + 1 * 16, mtx) << 1;
244   return nz;
245 }
246 
247 #undef QUANTIZE_ONE
248 
249 // macro for one horizontal pass in Disto4x4 (TTransform)
250 // two calls of function TTransform are merged into single one
251 // A - offset in bytes to load from a and b buffers
252 // E..H - offsets in bytes to store first results to tmp buffer
253 // E1..H1 - offsets in bytes to store second results to tmp buffer
254 #define HORIZONTAL_PASS(A, E, F, G, H, E1, F1, G1, H1)                  \
255   "lbu    %[temp0],  0+" XSTR(BPS) "*" #A "(%[a])  \n\t"                \
256   "lbu    %[temp1],  1+" XSTR(BPS) "*" #A "(%[a])  \n\t"                \
257   "lbu    %[temp2],  2+" XSTR(BPS) "*" #A "(%[a])  \n\t"                \
258   "lbu    %[temp3],  3+" XSTR(BPS) "*" #A "(%[a])  \n\t"                \
259   "lbu    %[temp4],  0+" XSTR(BPS) "*" #A "(%[b])  \n\t"                \
260   "lbu    %[temp5],  1+" XSTR(BPS) "*" #A "(%[b])  \n\t"                \
261   "lbu    %[temp6],  2+" XSTR(BPS) "*" #A "(%[b])  \n\t"                \
262   "lbu    %[temp7],  3+" XSTR(BPS) "*" #A "(%[b])  \n\t"                \
263   "addu   %[temp8],  %[temp0],    %[temp2]         \n\t"                \
264   "subu   %[temp0],  %[temp0],    %[temp2]         \n\t"                \
265   "addu   %[temp2],  %[temp1],    %[temp3]         \n\t"                \
266   "subu   %[temp1],  %[temp1],    %[temp3]         \n\t"                \
267   "addu   %[temp3],  %[temp4],    %[temp6]         \n\t"                \
268   "subu   %[temp4],  %[temp4],    %[temp6]         \n\t"                \
269   "addu   %[temp6],  %[temp5],    %[temp7]         \n\t"                \
270   "subu   %[temp5],  %[temp5],    %[temp7]         \n\t"                \
271   "addu   %[temp7],  %[temp8],    %[temp2]         \n\t"                \
272   "subu   %[temp2],  %[temp8],    %[temp2]         \n\t"                \
273   "addu   %[temp8],  %[temp0],    %[temp1]         \n\t"                \
274   "subu   %[temp0],  %[temp0],    %[temp1]         \n\t"                \
275   "addu   %[temp1],  %[temp3],    %[temp6]         \n\t"                \
276   "subu   %[temp3],  %[temp3],    %[temp6]         \n\t"                \
277   "addu   %[temp6],  %[temp4],    %[temp5]         \n\t"                \
278   "subu   %[temp4],  %[temp4],    %[temp5]         \n\t"                \
279   "sw     %[temp7],  " #E "(%[tmp])                \n\t"                \
280   "sw     %[temp2],  " #H "(%[tmp])                \n\t"                \
281   "sw     %[temp8],  " #F "(%[tmp])                \n\t"                \
282   "sw     %[temp0],  " #G "(%[tmp])                \n\t"                \
283   "sw     %[temp1],  " #E1 "(%[tmp])               \n\t"                \
284   "sw     %[temp3],  " #H1 "(%[tmp])               \n\t"                \
285   "sw     %[temp6],  " #F1 "(%[tmp])               \n\t"                \
286   "sw     %[temp4],  " #G1 "(%[tmp])               \n\t"
287 
288 // macro for one vertical pass in Disto4x4 (TTransform)
289 // two calls of function TTransform are merged into single one
290 // since only one accu is available in mips32r1 instruction set
291 //   first is done second call of function TTransform and after
292 //   that first one.
293 //   const int sum1 = TTransform(a, w);
294 //   const int sum2 = TTransform(b, w);
295 //   return abs(sum2 - sum1) >> 5;
296 //   (sum2 - sum1) is calculated with madds (sub2) and msubs (sub1)
297 // A..D - offsets in bytes to load first results from tmp buffer
298 // A1..D1 - offsets in bytes to load second results from tmp buffer
299 // E..H - offsets in bytes to load from w buffer
300 #define VERTICAL_PASS(A, B, C, D, A1, B1, C1, D1, E, F, G, H)     \
301   "lw     %[temp0],  " #A1 "(%[tmp])         \n\t"                \
302   "lw     %[temp1],  " #C1 "(%[tmp])         \n\t"                \
303   "lw     %[temp2],  " #B1 "(%[tmp])         \n\t"                \
304   "lw     %[temp3],  " #D1 "(%[tmp])         \n\t"                \
305   "addu   %[temp8],  %[temp0],    %[temp1]   \n\t"                \
306   "subu   %[temp0],  %[temp0],    %[temp1]   \n\t"                \
307   "addu   %[temp1],  %[temp2],    %[temp3]   \n\t"                \
308   "subu   %[temp2],  %[temp2],    %[temp3]   \n\t"                \
309   "addu   %[temp3],  %[temp8],    %[temp1]   \n\t"                \
310   "subu   %[temp8],  %[temp8],    %[temp1]   \n\t"                \
311   "addu   %[temp1],  %[temp0],    %[temp2]   \n\t"                \
312   "subu   %[temp0],  %[temp0],    %[temp2]   \n\t"                \
313   "sra    %[temp4],  %[temp3],    31         \n\t"                \
314   "sra    %[temp5],  %[temp1],    31         \n\t"                \
315   "sra    %[temp6],  %[temp0],    31         \n\t"                \
316   "sra    %[temp7],  %[temp8],    31         \n\t"                \
317   "xor    %[temp3],  %[temp3],    %[temp4]   \n\t"                \
318   "xor    %[temp1],  %[temp1],    %[temp5]   \n\t"                \
319   "xor    %[temp0],  %[temp0],    %[temp6]   \n\t"                \
320   "xor    %[temp8],  %[temp8],    %[temp7]   \n\t"                \
321   "subu   %[temp3],  %[temp3],    %[temp4]   \n\t"                \
322   "subu   %[temp1],  %[temp1],    %[temp5]   \n\t"                \
323   "subu   %[temp0],  %[temp0],    %[temp6]   \n\t"                \
324   "subu   %[temp8],  %[temp8],    %[temp7]   \n\t"                \
325   "lhu    %[temp4],  " #E "(%[w])            \n\t"                \
326   "lhu    %[temp5],  " #F "(%[w])            \n\t"                \
327   "lhu    %[temp6],  " #G "(%[w])            \n\t"                \
328   "lhu    %[temp7],  " #H "(%[w])            \n\t"                \
329   "madd   %[temp4],  %[temp3]                \n\t"                \
330   "madd   %[temp5],  %[temp1]                \n\t"                \
331   "madd   %[temp6],  %[temp0]                \n\t"                \
332   "madd   %[temp7],  %[temp8]                \n\t"                \
333   "lw     %[temp0],  " #A "(%[tmp])          \n\t"                \
334   "lw     %[temp1],  " #C "(%[tmp])          \n\t"                \
335   "lw     %[temp2],  " #B "(%[tmp])          \n\t"                \
336   "lw     %[temp3],  " #D "(%[tmp])          \n\t"                \
337   "addu   %[temp8],  %[temp0],    %[temp1]   \n\t"                \
338   "subu   %[temp0],  %[temp0],    %[temp1]   \n\t"                \
339   "addu   %[temp1],  %[temp2],    %[temp3]   \n\t"                \
340   "subu   %[temp2],  %[temp2],    %[temp3]   \n\t"                \
341   "addu   %[temp3],  %[temp8],    %[temp1]   \n\t"                \
342   "subu   %[temp1],  %[temp8],    %[temp1]   \n\t"                \
343   "addu   %[temp8],  %[temp0],    %[temp2]   \n\t"                \
344   "subu   %[temp0],  %[temp0],    %[temp2]   \n\t"                \
345   "sra    %[temp2],  %[temp3],    31         \n\t"                \
346   "xor    %[temp3],  %[temp3],    %[temp2]   \n\t"                \
347   "subu   %[temp3],  %[temp3],    %[temp2]   \n\t"                \
348   "msub   %[temp4],  %[temp3]                \n\t"                \
349   "sra    %[temp2],  %[temp8],    31         \n\t"                \
350   "sra    %[temp3],  %[temp0],    31         \n\t"                \
351   "sra    %[temp4],  %[temp1],    31         \n\t"                \
352   "xor    %[temp8],  %[temp8],    %[temp2]   \n\t"                \
353   "xor    %[temp0],  %[temp0],    %[temp3]   \n\t"                \
354   "xor    %[temp1],  %[temp1],    %[temp4]   \n\t"                \
355   "subu   %[temp8],  %[temp8],    %[temp2]   \n\t"                \
356   "subu   %[temp0],  %[temp0],    %[temp3]   \n\t"                \
357   "subu   %[temp1],  %[temp1],    %[temp4]   \n\t"                \
358   "msub   %[temp5],  %[temp8]                \n\t"                \
359   "msub   %[temp6],  %[temp0]                \n\t"                \
360   "msub   %[temp7],  %[temp1]                \n\t"
361 
Disto4x4_MIPS32(const uint8_t * WEBP_RESTRICT const a,const uint8_t * WEBP_RESTRICT const b,const uint16_t * WEBP_RESTRICT const w)362 static int Disto4x4_MIPS32(const uint8_t* WEBP_RESTRICT const a,
363                            const uint8_t* WEBP_RESTRICT const b,
364                            const uint16_t* WEBP_RESTRICT const w) {
365   int tmp[32];
366   int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
367 
368   __asm__ volatile(
369     HORIZONTAL_PASS(0,   0,  4,  8, 12,    64,  68,  72,  76)
370     HORIZONTAL_PASS(1,  16, 20, 24, 28,    80,  84,  88,  92)
371     HORIZONTAL_PASS(2,  32, 36, 40, 44,    96, 100, 104, 108)
372     HORIZONTAL_PASS(3,  48, 52, 56, 60,   112, 116, 120, 124)
373     "mthi   $zero                             \n\t"
374     "mtlo   $zero                             \n\t"
375     VERTICAL_PASS( 0, 16, 32, 48,     64, 80,  96, 112,   0,  8, 16, 24)
376     VERTICAL_PASS( 4, 20, 36, 52,     68, 84, 100, 116,   2, 10, 18, 26)
377     VERTICAL_PASS( 8, 24, 40, 56,     72, 88, 104, 120,   4, 12, 20, 28)
378     VERTICAL_PASS(12, 28, 44, 60,     76, 92, 108, 124,   6, 14, 22, 30)
379     "mflo   %[temp0]                          \n\t"
380     "sra    %[temp1],  %[temp0],  31          \n\t"
381     "xor    %[temp0],  %[temp0],  %[temp1]    \n\t"
382     "subu   %[temp0],  %[temp0],  %[temp1]    \n\t"
383     "sra    %[temp0],  %[temp0],  5           \n\t"
384 
385     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
386       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
387       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8)
388     : [a]"r"(a), [b]"r"(b), [w]"r"(w), [tmp]"r"(tmp)
389     : "memory", "hi", "lo"
390   );
391 
392   return temp0;
393 }
394 
395 #undef VERTICAL_PASS
396 #undef HORIZONTAL_PASS
397 
Disto16x16_MIPS32(const uint8_t * WEBP_RESTRICT const a,const uint8_t * WEBP_RESTRICT const b,const uint16_t * WEBP_RESTRICT const w)398 static int Disto16x16_MIPS32(const uint8_t* WEBP_RESTRICT const a,
399                              const uint8_t* WEBP_RESTRICT const b,
400                              const uint16_t* WEBP_RESTRICT const w) {
401   int D = 0;
402   int x, y;
403   for (y = 0; y < 16 * BPS; y += 4 * BPS) {
404     for (x = 0; x < 16; x += 4) {
405       D += Disto4x4_MIPS32(a + x + y, b + x + y, w);
406     }
407   }
408   return D;
409 }
410 
411 // macro for one horizontal pass in FTransform
412 // temp0..temp15 holds tmp[0]..tmp[15]
413 // A - offset in bytes to load from src and ref buffers
414 // TEMP0..TEMP3 - registers for corresponding tmp elements
415 #define HORIZONTAL_PASS(A, TEMP0, TEMP1, TEMP2, TEMP3)                  \
416   "lw     %[" #TEMP1 "],  0(%[args])                           \n\t"    \
417   "lw     %[" #TEMP2 "],  4(%[args])                           \n\t"    \
418   "lbu    %[temp16],    0+" XSTR(BPS) "*" #A "(%[" #TEMP1 "])  \n\t"    \
419   "lbu    %[temp17],    0+" XSTR(BPS) "*" #A "(%[" #TEMP2 "])  \n\t"    \
420   "lbu    %[temp18],    1+" XSTR(BPS) "*" #A "(%[" #TEMP1 "])  \n\t"    \
421   "lbu    %[temp19],    1+" XSTR(BPS) "*" #A "(%[" #TEMP2 "])  \n\t"    \
422   "subu   %[temp20],    %[temp16],    %[temp17]                \n\t"    \
423   "lbu    %[temp16],    2+" XSTR(BPS) "*" #A "(%[" #TEMP1 "])  \n\t"    \
424   "lbu    %[temp17],    2+" XSTR(BPS) "*" #A "(%[" #TEMP2 "])  \n\t"    \
425   "subu   %[" #TEMP0 "],  %[temp18],    %[temp19]              \n\t"    \
426   "lbu    %[temp18],    3+" XSTR(BPS) "*" #A "(%[" #TEMP1 "])  \n\t"    \
427   "lbu    %[temp19],    3+" XSTR(BPS) "*" #A "(%[" #TEMP2 "])  \n\t"    \
428   "subu   %[" #TEMP1 "],  %[temp16],    %[temp17]              \n\t"    \
429   "subu   %[" #TEMP2 "],  %[temp18],    %[temp19]              \n\t"    \
430   "addu   %[" #TEMP3 "],  %[temp20],    %[" #TEMP2 "]          \n\t"    \
431   "subu   %[" #TEMP2 "],  %[temp20],    %[" #TEMP2 "]          \n\t"    \
432   "addu   %[temp20],    %[" #TEMP0 "],  %[" #TEMP1 "]          \n\t"    \
433   "subu   %[" #TEMP0 "],  %[" #TEMP0 "],  %[" #TEMP1 "]        \n\t"    \
434   "mul    %[temp16],    %[" #TEMP2 "],  %[c5352]               \n\t"    \
435   "mul    %[temp17],    %[" #TEMP2 "],  %[c2217]               \n\t"    \
436   "mul    %[temp18],    %[" #TEMP0 "],  %[c5352]               \n\t"    \
437   "mul    %[temp19],    %[" #TEMP0 "],  %[c2217]               \n\t"    \
438   "addu   %[" #TEMP1 "],  %[" #TEMP3 "],  %[temp20]            \n\t"    \
439   "subu   %[temp20],    %[" #TEMP3 "],  %[temp20]              \n\t"    \
440   "sll    %[" #TEMP0 "],  %[" #TEMP1 "],  3                    \n\t"    \
441   "sll    %[" #TEMP2 "],  %[temp20],    3                      \n\t"    \
442   "addiu  %[temp16],    %[temp16],    1812                     \n\t"    \
443   "addiu  %[temp17],    %[temp17],    937                      \n\t"    \
444   "addu   %[temp16],    %[temp16],    %[temp19]                \n\t"    \
445   "subu   %[temp17],    %[temp17],    %[temp18]                \n\t"    \
446   "sra    %[" #TEMP1 "],  %[temp16],    9                      \n\t"    \
447   "sra    %[" #TEMP3 "],  %[temp17],    9                      \n\t"
448 
449 // macro for one vertical pass in FTransform
450 // temp0..temp15 holds tmp[0]..tmp[15]
451 // A..D - offsets in bytes to store to out buffer
452 // TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements
453 #define VERTICAL_PASS(A, B, C, D, TEMP0, TEMP4, TEMP8, TEMP12)    \
454   "addu   %[temp16],    %[" #TEMP0 "],  %[" #TEMP12 "]   \n\t"    \
455   "subu   %[temp19],    %[" #TEMP0 "],  %[" #TEMP12 "]   \n\t"    \
456   "addu   %[temp17],    %[" #TEMP4 "],  %[" #TEMP8 "]    \n\t"    \
457   "subu   %[temp18],    %[" #TEMP4 "],  %[" #TEMP8 "]    \n\t"    \
458   "mul    %[" #TEMP8 "],  %[temp19],    %[c2217]         \n\t"    \
459   "mul    %[" #TEMP12 "], %[temp18],    %[c2217]         \n\t"    \
460   "mul    %[" #TEMP4 "],  %[temp19],    %[c5352]         \n\t"    \
461   "mul    %[temp18],    %[temp18],    %[c5352]           \n\t"    \
462   "addiu  %[temp16],    %[temp16],    7                  \n\t"    \
463   "addu   %[" #TEMP0 "],  %[temp16],    %[temp17]        \n\t"    \
464   "sra    %[" #TEMP0 "],  %[" #TEMP0 "],  4              \n\t"    \
465   "addu   %[" #TEMP12 "], %[" #TEMP12 "], %[" #TEMP4 "]  \n\t"    \
466   "subu   %[" #TEMP4 "],  %[temp16],    %[temp17]        \n\t"    \
467   "sra    %[" #TEMP4 "],  %[" #TEMP4 "],  4              \n\t"    \
468   "addiu  %[" #TEMP8 "],  %[" #TEMP8 "],  30000          \n\t"    \
469   "addiu  %[" #TEMP12 "], %[" #TEMP12 "], 12000          \n\t"    \
470   "addiu  %[" #TEMP8 "],  %[" #TEMP8 "],  21000          \n\t"    \
471   "subu   %[" #TEMP8 "],  %[" #TEMP8 "],  %[temp18]      \n\t"    \
472   "sra    %[" #TEMP12 "], %[" #TEMP12 "], 16             \n\t"    \
473   "sra    %[" #TEMP8 "],  %[" #TEMP8 "],  16             \n\t"    \
474   "addiu  %[temp16],    %[" #TEMP12 "], 1                \n\t"    \
475   "movn   %[" #TEMP12 "], %[temp16],    %[temp19]        \n\t"    \
476   "sh     %[" #TEMP0 "],  " #A "(%[temp20])              \n\t"    \
477   "sh     %[" #TEMP4 "],  " #C "(%[temp20])              \n\t"    \
478   "sh     %[" #TEMP8 "],  " #D "(%[temp20])              \n\t"    \
479   "sh     %[" #TEMP12 "], " #B "(%[temp20])              \n\t"
480 
FTransform_MIPS32(const uint8_t * WEBP_RESTRICT src,const uint8_t * WEBP_RESTRICT ref,int16_t * WEBP_RESTRICT out)481 static void FTransform_MIPS32(const uint8_t* WEBP_RESTRICT src,
482                               const uint8_t* WEBP_RESTRICT ref,
483                               int16_t* WEBP_RESTRICT out) {
484   int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
485   int temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16;
486   int temp17, temp18, temp19, temp20;
487   const int c2217 = 2217;
488   const int c5352 = 5352;
489   const int* const args[3] =
490       { (const int*)src, (const int*)ref, (const int*)out };
491 
492   __asm__ volatile(
493     HORIZONTAL_PASS(0, temp0,  temp1,  temp2,  temp3)
494     HORIZONTAL_PASS(1, temp4,  temp5,  temp6,  temp7)
495     HORIZONTAL_PASS(2, temp8,  temp9,  temp10, temp11)
496     HORIZONTAL_PASS(3, temp12, temp13, temp14, temp15)
497     "lw   %[temp20],    8(%[args])                     \n\t"
498     VERTICAL_PASS(0,  8, 16, 24, temp0, temp4, temp8,  temp12)
499     VERTICAL_PASS(2, 10, 18, 26, temp1, temp5, temp9,  temp13)
500     VERTICAL_PASS(4, 12, 20, 28, temp2, temp6, temp10, temp14)
501     VERTICAL_PASS(6, 14, 22, 30, temp3, temp7, temp11, temp15)
502 
503     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
504       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
505       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8),
506       [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11),
507       [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14),
508       [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17),
509       [temp18]"=&r"(temp18), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20)
510     : [args]"r"(args), [c2217]"r"(c2217), [c5352]"r"(c5352)
511     : "memory", "hi", "lo"
512   );
513 }
514 
515 #undef VERTICAL_PASS
516 #undef HORIZONTAL_PASS
517 
518 #if !defined(WORK_AROUND_GCC)
519 
520 #define GET_SSE_INNER(A, B, C, D)                               \
521   "lbu     %[temp0],    " #A "(%[a])                 \n\t"      \
522   "lbu     %[temp1],    " #A "(%[b])                 \n\t"      \
523   "lbu     %[temp2],    " #B "(%[a])                 \n\t"      \
524   "lbu     %[temp3],    " #B "(%[b])                 \n\t"      \
525   "lbu     %[temp4],    " #C "(%[a])                 \n\t"      \
526   "lbu     %[temp5],    " #C "(%[b])                 \n\t"      \
527   "lbu     %[temp6],    " #D "(%[a])                 \n\t"      \
528   "lbu     %[temp7],    " #D "(%[b])                 \n\t"      \
529   "subu    %[temp0],    %[temp0],     %[temp1]       \n\t"      \
530   "subu    %[temp2],    %[temp2],     %[temp3]       \n\t"      \
531   "subu    %[temp4],    %[temp4],     %[temp5]       \n\t"      \
532   "subu    %[temp6],    %[temp6],     %[temp7]       \n\t"      \
533   "madd    %[temp0],    %[temp0]                     \n\t"      \
534   "madd    %[temp2],    %[temp2]                     \n\t"      \
535   "madd    %[temp4],    %[temp4]                     \n\t"      \
536   "madd    %[temp6],    %[temp6]                     \n\t"
537 
538 #define GET_SSE(A, B, C, D)               \
539   GET_SSE_INNER(A, A + 1, A + 2, A + 3)   \
540   GET_SSE_INNER(B, B + 1, B + 2, B + 3)   \
541   GET_SSE_INNER(C, C + 1, C + 2, C + 3)   \
542   GET_SSE_INNER(D, D + 1, D + 2, D + 3)
543 
SSE16x16_MIPS32(const uint8_t * WEBP_RESTRICT a,const uint8_t * WEBP_RESTRICT b)544 static int SSE16x16_MIPS32(const uint8_t* WEBP_RESTRICT a,
545                            const uint8_t* WEBP_RESTRICT b) {
546   int count;
547   int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
548 
549   __asm__ volatile(
550      "mult   $zero,    $zero                            \n\t"
551 
552      GET_SSE( 0 * BPS, 4 +  0 * BPS, 8 +  0 * BPS, 12 +  0 * BPS)
553      GET_SSE( 1 * BPS, 4 +  1 * BPS, 8 +  1 * BPS, 12 +  1 * BPS)
554      GET_SSE( 2 * BPS, 4 +  2 * BPS, 8 +  2 * BPS, 12 +  2 * BPS)
555      GET_SSE( 3 * BPS, 4 +  3 * BPS, 8 +  3 * BPS, 12 +  3 * BPS)
556      GET_SSE( 4 * BPS, 4 +  4 * BPS, 8 +  4 * BPS, 12 +  4 * BPS)
557      GET_SSE( 5 * BPS, 4 +  5 * BPS, 8 +  5 * BPS, 12 +  5 * BPS)
558      GET_SSE( 6 * BPS, 4 +  6 * BPS, 8 +  6 * BPS, 12 +  6 * BPS)
559      GET_SSE( 7 * BPS, 4 +  7 * BPS, 8 +  7 * BPS, 12 +  7 * BPS)
560      GET_SSE( 8 * BPS, 4 +  8 * BPS, 8 +  8 * BPS, 12 +  8 * BPS)
561      GET_SSE( 9 * BPS, 4 +  9 * BPS, 8 +  9 * BPS, 12 +  9 * BPS)
562      GET_SSE(10 * BPS, 4 + 10 * BPS, 8 + 10 * BPS, 12 + 10 * BPS)
563      GET_SSE(11 * BPS, 4 + 11 * BPS, 8 + 11 * BPS, 12 + 11 * BPS)
564      GET_SSE(12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS)
565      GET_SSE(13 * BPS, 4 + 13 * BPS, 8 + 13 * BPS, 12 + 13 * BPS)
566      GET_SSE(14 * BPS, 4 + 14 * BPS, 8 + 14 * BPS, 12 + 14 * BPS)
567      GET_SSE(15 * BPS, 4 + 15 * BPS, 8 + 15 * BPS, 12 + 15 * BPS)
568 
569     "mflo    %[count]                                   \n\t"
570     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
571       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
572       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count)
573     : [a]"r"(a), [b]"r"(b)
574     : "memory", "hi", "lo"
575   );
576   return count;
577 }
578 
SSE16x8_MIPS32(const uint8_t * WEBP_RESTRICT a,const uint8_t * WEBP_RESTRICT b)579 static int SSE16x8_MIPS32(const uint8_t* WEBP_RESTRICT a,
580                           const uint8_t* WEBP_RESTRICT b) {
581   int count;
582   int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
583 
584   __asm__ volatile(
585      "mult   $zero,    $zero                            \n\t"
586 
587      GET_SSE( 0 * BPS, 4 +  0 * BPS, 8 +  0 * BPS, 12 +  0 * BPS)
588      GET_SSE( 1 * BPS, 4 +  1 * BPS, 8 +  1 * BPS, 12 +  1 * BPS)
589      GET_SSE( 2 * BPS, 4 +  2 * BPS, 8 +  2 * BPS, 12 +  2 * BPS)
590      GET_SSE( 3 * BPS, 4 +  3 * BPS, 8 +  3 * BPS, 12 +  3 * BPS)
591      GET_SSE( 4 * BPS, 4 +  4 * BPS, 8 +  4 * BPS, 12 +  4 * BPS)
592      GET_SSE( 5 * BPS, 4 +  5 * BPS, 8 +  5 * BPS, 12 +  5 * BPS)
593      GET_SSE( 6 * BPS, 4 +  6 * BPS, 8 +  6 * BPS, 12 +  6 * BPS)
594      GET_SSE( 7 * BPS, 4 +  7 * BPS, 8 +  7 * BPS, 12 +  7 * BPS)
595 
596     "mflo    %[count]                                   \n\t"
597     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
598       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
599       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count)
600     : [a]"r"(a), [b]"r"(b)
601     : "memory", "hi", "lo"
602   );
603   return count;
604 }
605 
SSE8x8_MIPS32(const uint8_t * WEBP_RESTRICT a,const uint8_t * WEBP_RESTRICT b)606 static int SSE8x8_MIPS32(const uint8_t* WEBP_RESTRICT a,
607                          const uint8_t* WEBP_RESTRICT b) {
608   int count;
609   int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
610 
611   __asm__ volatile(
612      "mult   $zero,    $zero                            \n\t"
613 
614      GET_SSE(0 * BPS, 4 + 0 * BPS, 1 * BPS, 4 + 1 * BPS)
615      GET_SSE(2 * BPS, 4 + 2 * BPS, 3 * BPS, 4 + 3 * BPS)
616      GET_SSE(4 * BPS, 4 + 4 * BPS, 5 * BPS, 4 + 5 * BPS)
617      GET_SSE(6 * BPS, 4 + 6 * BPS, 7 * BPS, 4 + 7 * BPS)
618 
619     "mflo    %[count]                                   \n\t"
620     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
621       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
622       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count)
623     : [a]"r"(a), [b]"r"(b)
624     : "memory", "hi", "lo"
625   );
626   return count;
627 }
628 
SSE4x4_MIPS32(const uint8_t * WEBP_RESTRICT a,const uint8_t * WEBP_RESTRICT b)629 static int SSE4x4_MIPS32(const uint8_t* WEBP_RESTRICT a,
630                          const uint8_t* WEBP_RESTRICT b) {
631   int count;
632   int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
633 
634   __asm__ volatile(
635      "mult   $zero,    $zero                            \n\t"
636 
637      GET_SSE(0 * BPS, 1 * BPS, 2 * BPS, 3 * BPS)
638 
639     "mflo    %[count]                                   \n\t"
640     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
641       [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
642       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count)
643     : [a]"r"(a), [b]"r"(b)
644     : "memory", "hi", "lo"
645   );
646   return count;
647 }
648 
649 #undef GET_SSE
650 #undef GET_SSE_INNER
651 
652 #endif  // !WORK_AROUND_GCC
653 
654 //------------------------------------------------------------------------------
655 // Entry point
656 
657 extern void VP8EncDspInitMIPS32(void);
658 
VP8EncDspInitMIPS32(void)659 WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitMIPS32(void) {
660   VP8ITransform = ITransform_MIPS32;
661   VP8FTransform = FTransform_MIPS32;
662 
663   VP8EncQuantizeBlock = QuantizeBlock_MIPS32;
664   VP8EncQuantize2Blocks = Quantize2Blocks_MIPS32;
665 
666   VP8TDisto4x4 = Disto4x4_MIPS32;
667   VP8TDisto16x16 = Disto16x16_MIPS32;
668 
669 #if !defined(WORK_AROUND_GCC)
670   VP8SSE16x16 = SSE16x16_MIPS32;
671   VP8SSE8x8 = SSE8x8_MIPS32;
672   VP8SSE16x8 = SSE16x8_MIPS32;
673   VP8SSE4x4 = SSE4x4_MIPS32;
674 #endif
675 }
676 
677 #else  // !WEBP_USE_MIPS32
678 
679 WEBP_DSP_INIT_STUB(VP8EncDspInitMIPS32)
680 
681 #endif  // WEBP_USE_MIPS32
682