• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // MIPS version of lossless functions
11 //
12 // Author(s):  Djordje Pesut    (djordje.pesut@imgtec.com)
13 //             Jovan Zelincevic (jovan.zelincevic@imgtec.com)
14 
15 #include "src/dsp/dsp.h"
16 #include "src/dsp/lossless.h"
17 #include "src/dsp/lossless_common.h"
18 
19 #if defined(WEBP_USE_MIPS32)
20 
21 #include <assert.h>
22 #include <math.h>
23 #include <stdlib.h>
24 #include <string.h>
25 
FastSLog2Slow_MIPS32(uint32_t v)26 static uint64_t FastSLog2Slow_MIPS32(uint32_t v) {
27   assert(v >= LOG_LOOKUP_IDX_MAX);
28   if (v < APPROX_LOG_WITH_CORRECTION_MAX) {
29     uint32_t log_cnt, y;
30     uint64_t correction;
31     const int c24 = 24;
32     uint32_t temp;
33 
34     // Xf = 256 = 2^8
35     // log_cnt is index of leading one in upper 24 bits
36     __asm__ volatile(
37       "clz      %[log_cnt], %[v]                      \n\t"
38       "addiu    %[y],       $zero,        1           \n\t"
39       "subu     %[log_cnt], %[c24],       %[log_cnt]  \n\t"
40       "sllv     %[y],       %[y],         %[log_cnt]  \n\t"
41       "srlv     %[temp],    %[v],         %[log_cnt]  \n\t"
42       : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y),
43         [temp]"=r"(temp)
44       : [c24]"r"(c24), [v]"r"(v)
45     );
46 
47     // vf = (2^log_cnt) * Xf; where y = 2^log_cnt and Xf < 256
48     // Xf = floor(Xf) * (1 + (v % y) / v)
49     // log2(Xf) = log2(floor(Xf)) + log2(1 + (v % y) / v)
50     // The correction factor: log(1 + d) ~ d; for very small d values, so
51     // log2(1 + (v % y) / v) ~ LOG_2_RECIPROCAL * (v % y)/v
52 
53     // (v % y) = (v % 2^log_cnt) = v & (2^log_cnt - 1)
54     correction = LOG_2_RECIPROCAL_FIXED * (v & (y - 1));
55     return (uint64_t)v * (kLog2Table[temp] +
56                           ((uint64_t)log_cnt << LOG_2_PRECISION_BITS)) +
57            correction;
58   } else {
59     return (uint64_t)(LOG_2_RECIPROCAL_FIXED_DOUBLE * v * log((double)v) + .5);
60   }
61 }
62 
FastLog2Slow_MIPS32(uint32_t v)63 static uint32_t FastLog2Slow_MIPS32(uint32_t v) {
64   assert(v >= LOG_LOOKUP_IDX_MAX);
65   if (v < APPROX_LOG_WITH_CORRECTION_MAX) {
66     uint32_t log_cnt, y;
67     const int c24 = 24;
68     uint32_t log_2;
69     uint32_t temp;
70 
71     __asm__ volatile(
72       "clz      %[log_cnt], %[v]                      \n\t"
73       "addiu    %[y],       $zero,        1           \n\t"
74       "subu     %[log_cnt], %[c24],       %[log_cnt]  \n\t"
75       "sllv     %[y],       %[y],         %[log_cnt]  \n\t"
76       "srlv     %[temp],    %[v],         %[log_cnt]  \n\t"
77       : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y),
78         [temp]"=r"(temp)
79       : [c24]"r"(c24), [v]"r"(v)
80     );
81 
82     log_2 = kLog2Table[temp] + (log_cnt << LOG_2_PRECISION_BITS);
83     if (v >= APPROX_LOG_MAX) {
84       // Since the division is still expensive, add this correction factor only
85       // for large values of 'v'.
86       const uint64_t correction = LOG_2_RECIPROCAL_FIXED * (v & (y - 1));
87       log_2 += (uint32_t)DivRound(correction, v);
88     }
89     return log_2;
90   } else {
91     return (uint32_t)(LOG_2_RECIPROCAL_FIXED_DOUBLE * log((double)v) + .5);
92   }
93 }
94 
95 // C version of this function:
96 //   int i = 0;
97 //   int64_t cost = 0;
98 //   const uint32_t* pop = &population[4];
99 //   const uint32_t* LoopEnd = &population[length];
100 //   while (pop != LoopEnd) {
101 //     ++i;
102 //     cost += i * *pop;
103 //     cost += i * *(pop + 1);
104 //     pop += 2;
105 //   }
106 //   return cost;
ExtraCost_MIPS32(const uint32_t * const population,int length)107 static uint32_t ExtraCost_MIPS32(const uint32_t* const population, int length) {
108   int i, temp0, temp1;
109   const uint32_t* pop = &population[4];
110   const uint32_t* const LoopEnd = &population[length];
111 
112   __asm__ volatile(
113     "mult   $zero,    $zero                  \n\t"
114     "xor    %[i],     %[i],       %[i]       \n\t"
115     "beq    %[pop],   %[LoopEnd], 2f         \n\t"
116   "1:                                        \n\t"
117     "lw     %[temp0], 0(%[pop])              \n\t"
118     "lw     %[temp1], 4(%[pop])              \n\t"
119     "addiu  %[i],     %[i],       1          \n\t"
120     "addiu  %[pop],   %[pop],     8          \n\t"
121     "madd   %[i],     %[temp0]               \n\t"
122     "madd   %[i],     %[temp1]               \n\t"
123     "bne    %[pop],   %[LoopEnd], 1b         \n\t"
124   "2:                                        \n\t"
125     "mfhi   %[temp0]                         \n\t"
126     "mflo   %[temp1]                         \n\t"
127     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
128       [i]"=&r"(i), [pop]"+r"(pop)
129     : [LoopEnd]"r"(LoopEnd)
130     : "memory", "hi", "lo"
131   );
132 
133   return ((int64_t)temp0 << 32 | temp1);
134 }
135 
136 // C version of this function:
137 //   int i = 0;
138 //   int64_t cost = 0;
139 //   const uint32_t* pX = &X[4];
140 //   const uint32_t* pY = &Y[4];
141 //   const uint32_t* LoopEnd = &X[length];
142 //   while (pX != LoopEnd) {
143 //     const uint32_t xy0 = *pX + *pY;
144 //     const uint32_t xy1 = *(pX + 1) + *(pY + 1);
145 //     ++i;
146 //     cost += i * xy0;
147 //     cost += i * xy1;
148 //     pX += 2;
149 //     pY += 2;
150 //   }
151 //   return cost;
ExtraCostCombined_MIPS32(const uint32_t * WEBP_RESTRICT const X,const uint32_t * WEBP_RESTRICT const Y,int length)152 static uint32_t ExtraCostCombined_MIPS32(const uint32_t* WEBP_RESTRICT const X,
153                                          const uint32_t* WEBP_RESTRICT const Y,
154                                          int length) {
155   int i, temp0, temp1, temp2, temp3;
156   const uint32_t* pX = &X[4];
157   const uint32_t* pY = &Y[4];
158   const uint32_t* const LoopEnd = &X[length];
159 
160   __asm__ volatile(
161     "mult   $zero,    $zero                  \n\t"
162     "xor    %[i],     %[i],       %[i]       \n\t"
163     "beq    %[pX],    %[LoopEnd], 2f         \n\t"
164   "1:                                        \n\t"
165     "lw     %[temp0], 0(%[pX])               \n\t"
166     "lw     %[temp1], 0(%[pY])               \n\t"
167     "lw     %[temp2], 4(%[pX])               \n\t"
168     "lw     %[temp3], 4(%[pY])               \n\t"
169     "addiu  %[i],     %[i],       1          \n\t"
170     "addu   %[temp0], %[temp0],   %[temp1]   \n\t"
171     "addu   %[temp2], %[temp2],   %[temp3]   \n\t"
172     "addiu  %[pX],    %[pX],      8          \n\t"
173     "addiu  %[pY],    %[pY],      8          \n\t"
174     "madd   %[i],     %[temp0]               \n\t"
175     "madd   %[i],     %[temp2]               \n\t"
176     "bne    %[pX],    %[LoopEnd], 1b         \n\t"
177   "2:                                        \n\t"
178     "mfhi   %[temp0]                         \n\t"
179     "mflo   %[temp1]                         \n\t"
180     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
181       [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
182       [i]"=&r"(i), [pX]"+r"(pX), [pY]"+r"(pY)
183     : [LoopEnd]"r"(LoopEnd)
184     : "memory", "hi", "lo"
185   );
186 
187   return ((int64_t)temp0 << 32 | temp1);
188 }
189 
190 #define HUFFMAN_COST_PASS                                 \
191   __asm__ volatile(                                       \
192     "sll   %[temp1],  %[temp0],    3           \n\t"      \
193     "addiu %[temp3],  %[streak],   -3          \n\t"      \
194     "addu  %[temp2],  %[pstreaks], %[temp1]    \n\t"      \
195     "blez  %[temp3],  1f                       \n\t"      \
196     "srl   %[temp1],  %[temp1],    1           \n\t"      \
197     "addu  %[temp3],  %[pcnts],    %[temp1]    \n\t"      \
198     "lw    %[temp0],  4(%[temp2])              \n\t"      \
199     "lw    %[temp1],  0(%[temp3])              \n\t"      \
200     "addu  %[temp0],  %[temp0],    %[streak]   \n\t"      \
201     "addiu %[temp1],  %[temp1],    1           \n\t"      \
202     "sw    %[temp0],  4(%[temp2])              \n\t"      \
203     "sw    %[temp1],  0(%[temp3])              \n\t"      \
204     "b     2f                                  \n\t"      \
205   "1:                                          \n\t"      \
206     "lw    %[temp0],  0(%[temp2])              \n\t"      \
207     "addu  %[temp0],  %[temp0],    %[streak]   \n\t"      \
208     "sw    %[temp0],  0(%[temp2])              \n\t"      \
209   "2:                                          \n\t"      \
210     : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),           \
211       [temp3]"=&r"(temp3), [temp0]"+r"(temp0)             \
212     : [pstreaks]"r"(pstreaks), [pcnts]"r"(pcnts),         \
213       [streak]"r"(streak)                                 \
214     : "memory"                                            \
215   );
216 
217 // Returns the various RLE counts
GetEntropyUnrefinedHelper(uint32_t val,int i,uint32_t * WEBP_RESTRICT const val_prev,int * WEBP_RESTRICT const i_prev,VP8LBitEntropy * WEBP_RESTRICT const bit_entropy,VP8LStreaks * WEBP_RESTRICT const stats)218 static WEBP_INLINE void GetEntropyUnrefinedHelper(
219     uint32_t val, int i, uint32_t* WEBP_RESTRICT const val_prev,
220     int* WEBP_RESTRICT const i_prev,
221     VP8LBitEntropy* WEBP_RESTRICT const bit_entropy,
222     VP8LStreaks* WEBP_RESTRICT const stats) {
223   int* const pstreaks = &stats->streaks[0][0];
224   int* const pcnts = &stats->counts[0];
225   int temp0, temp1, temp2, temp3;
226   const int streak = i - *i_prev;
227 
228   // Gather info for the bit entropy.
229   if (*val_prev != 0) {
230     bit_entropy->sum += (*val_prev) * streak;
231     bit_entropy->nonzeros += streak;
232     bit_entropy->nonzero_code = *i_prev;
233     bit_entropy->entropy += VP8LFastSLog2(*val_prev) * streak;
234     if (bit_entropy->max_val < *val_prev) {
235       bit_entropy->max_val = *val_prev;
236     }
237   }
238 
239   // Gather info for the Huffman cost.
240   temp0 = (*val_prev != 0);
241   HUFFMAN_COST_PASS
242 
243   *val_prev = val;
244   *i_prev = i;
245 }
246 
GetEntropyUnrefined_MIPS32(const uint32_t X[],int length,VP8LBitEntropy * WEBP_RESTRICT const bit_entropy,VP8LStreaks * WEBP_RESTRICT const stats)247 static void GetEntropyUnrefined_MIPS32(
248     const uint32_t X[], int length,
249     VP8LBitEntropy* WEBP_RESTRICT const bit_entropy,
250     VP8LStreaks* WEBP_RESTRICT const stats) {
251   int i;
252   int i_prev = 0;
253   uint32_t x_prev = X[0];
254 
255   memset(stats, 0, sizeof(*stats));
256   VP8LBitEntropyInit(bit_entropy);
257 
258   for (i = 1; i < length; ++i) {
259     const uint32_t x = X[i];
260     if (x != x_prev) {
261       GetEntropyUnrefinedHelper(x, i, &x_prev, &i_prev, bit_entropy, stats);
262     }
263   }
264   GetEntropyUnrefinedHelper(0, i, &x_prev, &i_prev, bit_entropy, stats);
265 
266   bit_entropy->entropy = VP8LFastSLog2(bit_entropy->sum) - bit_entropy->entropy;
267 }
268 
GetCombinedEntropyUnrefined_MIPS32(const uint32_t X[],const uint32_t Y[],int length,VP8LBitEntropy * WEBP_RESTRICT const entropy,VP8LStreaks * WEBP_RESTRICT const stats)269 static void GetCombinedEntropyUnrefined_MIPS32(
270     const uint32_t X[], const uint32_t Y[], int length,
271     VP8LBitEntropy* WEBP_RESTRICT const entropy,
272     VP8LStreaks* WEBP_RESTRICT const stats) {
273   int i = 1;
274   int i_prev = 0;
275   uint32_t xy_prev = X[0] + Y[0];
276 
277   memset(stats, 0, sizeof(*stats));
278   VP8LBitEntropyInit(entropy);
279 
280   for (i = 1; i < length; ++i) {
281     const uint32_t xy = X[i] + Y[i];
282     if (xy != xy_prev) {
283       GetEntropyUnrefinedHelper(xy, i, &xy_prev, &i_prev, entropy, stats);
284     }
285   }
286   GetEntropyUnrefinedHelper(0, i, &xy_prev, &i_prev, entropy, stats);
287 
288   entropy->entropy = VP8LFastSLog2(entropy->sum) - entropy->entropy;
289 }
290 
291 #define ASM_START                                       \
292   __asm__ volatile(                                     \
293     ".set   push                            \n\t"       \
294     ".set   at                              \n\t"       \
295     ".set   macro                           \n\t"       \
296   "1:                                       \n\t"
297 
298 // P2 = P0 + P1
299 // A..D - offsets
300 // E - temp variable to tell macro
301 //     if pointer should be incremented
302 // literal_ and successive histograms could be unaligned
303 // so we must use ulw and usw
304 #define ADD_TO_OUT(A, B, C, D, E, P0, P1, P2)           \
305     "ulw    %[temp0], " #A "(%[" #P0 "])    \n\t"       \
306     "ulw    %[temp1], " #B "(%[" #P0 "])    \n\t"       \
307     "ulw    %[temp2], " #C "(%[" #P0 "])    \n\t"       \
308     "ulw    %[temp3], " #D "(%[" #P0 "])    \n\t"       \
309     "ulw    %[temp4], " #A "(%[" #P1 "])    \n\t"       \
310     "ulw    %[temp5], " #B "(%[" #P1 "])    \n\t"       \
311     "ulw    %[temp6], " #C "(%[" #P1 "])    \n\t"       \
312     "ulw    %[temp7], " #D "(%[" #P1 "])    \n\t"       \
313     "addu   %[temp4], %[temp4],   %[temp0]  \n\t"       \
314     "addu   %[temp5], %[temp5],   %[temp1]  \n\t"       \
315     "addu   %[temp6], %[temp6],   %[temp2]  \n\t"       \
316     "addu   %[temp7], %[temp7],   %[temp3]  \n\t"       \
317     "addiu  %[" #P0 "],  %[" #P0 "],  16    \n\t"       \
318   ".if " #E " == 1                          \n\t"       \
319     "addiu  %[" #P1 "],  %[" #P1 "],  16    \n\t"       \
320   ".endif                                   \n\t"       \
321     "usw    %[temp4], " #A "(%[" #P2 "])    \n\t"       \
322     "usw    %[temp5], " #B "(%[" #P2 "])    \n\t"       \
323     "usw    %[temp6], " #C "(%[" #P2 "])    \n\t"       \
324     "usw    %[temp7], " #D "(%[" #P2 "])    \n\t"       \
325     "addiu  %[" #P2 "], %[" #P2 "],   16    \n\t"       \
326     "bne    %[" #P0 "], %[LoopEnd], 1b      \n\t"       \
327     ".set   pop                             \n\t"       \
328 
329 #define ASM_END_COMMON_0                                \
330     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),         \
331       [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),         \
332       [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),         \
333       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),         \
334       [pa]"+r"(pa), [pout]"+r"(pout)
335 
336 #define ASM_END_COMMON_1                                \
337     : [LoopEnd]"r"(LoopEnd)                             \
338     : "memory", "at"                                    \
339   );
340 
341 #define ASM_END_0                                       \
342     ASM_END_COMMON_0                                    \
343       , [pb]"+r"(pb)                                    \
344     ASM_END_COMMON_1
345 
346 #define ASM_END_1                                       \
347     ASM_END_COMMON_0                                    \
348     ASM_END_COMMON_1
349 
AddVector_MIPS32(const uint32_t * WEBP_RESTRICT pa,const uint32_t * WEBP_RESTRICT pb,uint32_t * WEBP_RESTRICT pout,int size)350 static void AddVector_MIPS32(const uint32_t* WEBP_RESTRICT pa,
351                              const uint32_t* WEBP_RESTRICT pb,
352                              uint32_t* WEBP_RESTRICT pout, int size) {
353   uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
354   const int end = ((size) / 4) * 4;
355   const uint32_t* const LoopEnd = pa + end;
356   int i;
357   ASM_START
358   ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout)
359   ASM_END_0
360   for (i = 0; i < size - end; ++i) pout[i] = pa[i] + pb[i];
361 }
362 
AddVectorEq_MIPS32(const uint32_t * WEBP_RESTRICT pa,uint32_t * WEBP_RESTRICT pout,int size)363 static void AddVectorEq_MIPS32(const uint32_t* WEBP_RESTRICT pa,
364                                uint32_t* WEBP_RESTRICT pout, int size) {
365   uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
366   const int end = ((size) / 4) * 4;
367   const uint32_t* const LoopEnd = pa + end;
368   int i;
369   ASM_START
370   ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout)
371   ASM_END_1
372   for (i = 0; i < size - end; ++i) pout[i] += pa[i];
373 }
374 
375 #undef ASM_END_1
376 #undef ASM_END_0
377 #undef ASM_END_COMMON_1
378 #undef ASM_END_COMMON_0
379 #undef ADD_TO_OUT
380 #undef ASM_START
381 
382 //------------------------------------------------------------------------------
383 // Entry point
384 
385 extern void VP8LEncDspInitMIPS32(void);
386 
VP8LEncDspInitMIPS32(void)387 WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMIPS32(void) {
388   VP8LFastSLog2Slow = FastSLog2Slow_MIPS32;
389   VP8LFastLog2Slow = FastLog2Slow_MIPS32;
390   VP8LExtraCost = ExtraCost_MIPS32;
391   VP8LExtraCostCombined = ExtraCostCombined_MIPS32;
392   VP8LGetEntropyUnrefined = GetEntropyUnrefined_MIPS32;
393   VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_MIPS32;
394   VP8LAddVector = AddVector_MIPS32;
395   VP8LAddVectorEq = AddVectorEq_MIPS32;
396 }
397 
398 #else  // !WEBP_USE_MIPS32
399 
400 WEBP_DSP_INIT_STUB(VP8LEncDspInitMIPS32)
401 
402 #endif  // WEBP_USE_MIPS32
403