1 /* Copyright (c) 2007-2008 CSIRO
2 Copyright (c) 2007-2009 Xiph.Org Foundation
3 Written by Jean-Marc Valin */
4 /*
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
7 are met:
8
9 - Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11
12 - Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
20 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include "mathops.h"
34 #include "cwrs.h"
35 #include "vq.h"
36 #include "arch.h"
37 #include "os_support.h"
38 #include "bands.h"
39 #include "rate.h"
40 #include "pitch.h"
41
42 #if defined(MIPSr1_ASM)
43 #include "mips/vq_mipsr1.h"
44 #endif
45
46 #ifndef OVERRIDE_vq_exp_rotation1
exp_rotation1(celt_norm * X,int len,int stride,opus_val16 c,opus_val16 s)47 static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s)
48 {
49 int i;
50 opus_val16 ms;
51 celt_norm *Xptr;
52 Xptr = X;
53 ms = NEG16(s);
54 for (i=0;i<len-stride;i++)
55 {
56 celt_norm x1, x2;
57 x1 = Xptr[0];
58 x2 = Xptr[stride];
59 Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15));
60 *Xptr++ = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
61 }
62 Xptr = &X[len-2*stride-1];
63 for (i=len-2*stride-1;i>=0;i--)
64 {
65 celt_norm x1, x2;
66 x1 = Xptr[0];
67 x2 = Xptr[stride];
68 Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15));
69 *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
70 }
71 }
72 #endif /* OVERRIDE_vq_exp_rotation1 */
73
exp_rotation(celt_norm * X,int len,int dir,int stride,int K,int spread)74 void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread)
75 {
76 static const int SPREAD_FACTOR[3]={15,10,5};
77 int i;
78 opus_val16 c, s;
79 opus_val16 gain, theta;
80 int stride2=0;
81 int factor;
82
83 if (2*K>=len || spread==SPREAD_NONE)
84 return;
85 factor = SPREAD_FACTOR[spread-1];
86
87 gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K));
88 theta = HALF16(MULT16_16_Q15(gain,gain));
89
90 c = celt_cos_norm(EXTEND32(theta));
91 s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */
92
93 if (len>=8*stride)
94 {
95 stride2 = 1;
96 /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding.
97 It's basically incrementing long as (stride2+0.5)^2 < len/stride. */
98 while ((stride2*stride2+stride2)*stride + (stride>>2) < len)
99 stride2++;
100 }
101 /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for
102 extract_collapse_mask().*/
103 len = celt_udiv(len, stride);
104 for (i=0;i<stride;i++)
105 {
106 if (dir < 0)
107 {
108 if (stride2)
109 exp_rotation1(X+i*len, len, stride2, s, c);
110 exp_rotation1(X+i*len, len, 1, c, s);
111 } else {
112 exp_rotation1(X+i*len, len, 1, c, -s);
113 if (stride2)
114 exp_rotation1(X+i*len, len, stride2, s, -c);
115 }
116 }
117 }
118
119 /** Takes the pitch vector and the decoded residual vector, computes the gain
120 that will give ||p+g*y||=1 and mixes the residual with the pitch. */
normalise_residual(int * OPUS_RESTRICT iy,celt_norm * OPUS_RESTRICT X,int N,opus_val32 Ryy,opus_val16 gain)121 static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X,
122 int N, opus_val32 Ryy, opus_val16 gain)
123 {
124 int i;
125 #ifdef FIXED_POINT
126 int k;
127 #endif
128 opus_val32 t;
129 opus_val16 g;
130
131 #ifdef FIXED_POINT
132 k = celt_ilog2(Ryy)>>1;
133 #endif
134 t = VSHR32(Ryy, 2*(k-7));
135 g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
136
137 i=0;
138 do
139 X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1));
140 while (++i < N);
141 }
142
extract_collapse_mask(int * iy,int N,int B)143 static unsigned extract_collapse_mask(int *iy, int N, int B)
144 {
145 unsigned collapse_mask;
146 int N0;
147 int i;
148 if (B<=1)
149 return 1;
150 /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for
151 exp_rotation().*/
152 N0 = celt_udiv(N, B);
153 collapse_mask = 0;
154 i=0; do {
155 int j;
156 unsigned tmp=0;
157 j=0; do {
158 tmp |= iy[i*N0+j];
159 } while (++j<N0);
160 collapse_mask |= (tmp!=0)<<i;
161 } while (++i<B);
162 return collapse_mask;
163 }
164
op_pvq_search_c(celt_norm * X,int * iy,int K,int N,int arch)165 opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch)
166 {
167 VARDECL(celt_norm, y);
168 VARDECL(int, signx);
169 int i, j;
170 int pulsesLeft;
171 opus_val32 sum;
172 opus_val32 xy;
173 opus_val16 yy;
174 SAVE_STACK;
175
176 (void)arch;
177 ALLOC(y, N, celt_norm);
178 ALLOC(signx, N, int);
179
180 /* Get rid of the sign */
181 sum = 0;
182 j=0; do {
183 signx[j] = X[j]<0;
184 /* OPT: Make sure the compiler doesn't use a branch on ABS16(). */
185 X[j] = ABS16(X[j]);
186 iy[j] = 0;
187 y[j] = 0;
188 } while (++j<N);
189
190 xy = yy = 0;
191
192 pulsesLeft = K;
193
194 /* Do a pre-search by projecting on the pyramid */
195 if (K > (N>>1))
196 {
197 opus_val16 rcp;
198 j=0; do {
199 sum += X[j];
200 } while (++j<N);
201
202 /* If X is too small, just replace it with a pulse at 0 */
203 #ifdef FIXED_POINT
204 if (sum <= K)
205 #else
206 /* Prevents infinities and NaNs from causing too many pulses
207 to be allocated. 64 is an approximation of infinity here. */
208 if (!(sum > EPSILON && sum < 64))
209 #endif
210 {
211 X[0] = QCONST16(1.f,14);
212 j=1; do
213 X[j]=0;
214 while (++j<N);
215 sum = QCONST16(1.f,14);
216 }
217 #ifdef FIXED_POINT
218 rcp = EXTRACT16(MULT16_32_Q16(K, celt_rcp(sum)));
219 #else
220 /* Using K+e with e < 1 guarantees we cannot get more than K pulses. */
221 rcp = EXTRACT16(MULT16_32_Q16(K+0.8f, celt_rcp(sum)));
222 #endif
223 j=0; do {
224 #ifdef FIXED_POINT
225 /* It's really important to round *towards zero* here */
226 iy[j] = MULT16_16_Q15(X[j],rcp);
227 #else
228 iy[j] = (int)floor(rcp*X[j]);
229 #endif
230 y[j] = (celt_norm)iy[j];
231 yy = MAC16_16(yy, y[j],y[j]);
232 xy = MAC16_16(xy, X[j],y[j]);
233 y[j] *= 2;
234 pulsesLeft -= iy[j];
235 } while (++j<N);
236 }
237 celt_sig_assert(pulsesLeft>=0);
238
239 /* This should never happen, but just in case it does (e.g. on silence)
240 we fill the first bin with pulses. */
241 #ifdef FIXED_POINT_DEBUG
242 celt_sig_assert(pulsesLeft<=N+3);
243 #endif
244 if (pulsesLeft > N+3)
245 {
246 opus_val16 tmp = (opus_val16)pulsesLeft;
247 yy = MAC16_16(yy, tmp, tmp);
248 yy = MAC16_16(yy, tmp, y[0]);
249 iy[0] += pulsesLeft;
250 pulsesLeft=0;
251 }
252
253 for (i=0;i<pulsesLeft;i++)
254 {
255 opus_val16 Rxy, Ryy;
256 int best_id;
257 opus_val32 best_num;
258 opus_val16 best_den;
259 #ifdef FIXED_POINT
260 int rshift;
261 #endif
262 #ifdef FIXED_POINT
263 rshift = 1+celt_ilog2(K-pulsesLeft+i+1);
264 #endif
265 best_id = 0;
266 /* The squared magnitude term gets added anyway, so we might as well
267 add it outside the loop */
268 yy = ADD16(yy, 1);
269
270 /* Calculations for position 0 are out of the loop, in part to reduce
271 mispredicted branches (since the if condition is usually false)
272 in the loop. */
273 /* Temporary sums of the new pulse(s) */
274 Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[0])),rshift));
275 /* We're multiplying y[j] by two so we don't have to do it here */
276 Ryy = ADD16(yy, y[0]);
277
278 /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
279 Rxy is positive because the sign is pre-computed) */
280 Rxy = MULT16_16_Q15(Rxy,Rxy);
281 best_den = Ryy;
282 best_num = Rxy;
283 j=1;
284 do {
285 /* Temporary sums of the new pulse(s) */
286 Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[j])),rshift));
287 /* We're multiplying y[j] by two so we don't have to do it here */
288 Ryy = ADD16(yy, y[j]);
289
290 /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
291 Rxy is positive because the sign is pre-computed) */
292 Rxy = MULT16_16_Q15(Rxy,Rxy);
293 /* The idea is to check for num/den >= best_num/best_den, but that way
294 we can do it without any division */
295 /* OPT: It's not clear whether a cmov is faster than a branch here
296 since the condition is more often false than true and using
297 a cmov introduces data dependencies across iterations. The optimal
298 choice may be architecture-dependent. */
299 if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num)))
300 {
301 best_den = Ryy;
302 best_num = Rxy;
303 best_id = j;
304 }
305 } while (++j<N);
306
307 /* Updating the sums of the new pulse(s) */
308 xy = ADD32(xy, EXTEND32(X[best_id]));
309 /* We're multiplying y[j] by two so we don't have to do it here */
310 yy = ADD16(yy, y[best_id]);
311
312 /* Only now that we've made the final choice, update y/iy */
313 /* Multiplying y[j] by 2 so we don't have to do it everywhere else */
314 y[best_id] += 2;
315 iy[best_id]++;
316 }
317
318 /* Put the original sign back */
319 j=0;
320 do {
321 /*iy[j] = signx[j] ? -iy[j] : iy[j];*/
322 /* OPT: The is more likely to be compiled without a branch than the code above
323 but has the same performance otherwise. */
324 iy[j] = (iy[j]^-signx[j]) + signx[j];
325 } while (++j<N);
326 RESTORE_STACK;
327 return yy;
328 }
329
alg_quant(celt_norm * X,int N,int K,int spread,int B,ec_enc * enc,opus_val16 gain,int resynth,int arch)330 unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc,
331 opus_val16 gain, int resynth, int arch)
332 {
333 VARDECL(int, iy);
334 opus_val16 yy;
335 unsigned collapse_mask;
336 SAVE_STACK;
337
338 celt_assert2(K>0, "alg_quant() needs at least one pulse");
339 celt_assert2(N>1, "alg_quant() needs at least two dimensions");
340
341 /* Covers vectorization by up to 4. */
342 ALLOC(iy, N+3, int);
343
344 exp_rotation(X, N, 1, B, K, spread);
345
346 yy = op_pvq_search(X, iy, K, N, arch);
347
348 encode_pulses(iy, N, K, enc);
349
350 if (resynth)
351 {
352 normalise_residual(iy, X, N, yy, gain);
353 exp_rotation(X, N, -1, B, K, spread);
354 }
355
356 collapse_mask = extract_collapse_mask(iy, N, B);
357 RESTORE_STACK;
358 return collapse_mask;
359 }
360
361 /** Decode pulse vector and combine the result with the pitch vector to produce
362 the final normalised signal in the current band. */
alg_unquant(celt_norm * X,int N,int K,int spread,int B,ec_dec * dec,opus_val16 gain)363 unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B,
364 ec_dec *dec, opus_val16 gain)
365 {
366 opus_val32 Ryy;
367 unsigned collapse_mask;
368 VARDECL(int, iy);
369 SAVE_STACK;
370
371 celt_assert2(K>0, "alg_unquant() needs at least one pulse");
372 celt_assert2(N>1, "alg_unquant() needs at least two dimensions");
373 ALLOC(iy, N, int);
374 Ryy = decode_pulses(iy, N, K, dec);
375 normalise_residual(iy, X, N, Ryy, gain);
376 exp_rotation(X, N, -1, B, K, spread);
377 collapse_mask = extract_collapse_mask(iy, N, B);
378 RESTORE_STACK;
379 return collapse_mask;
380 }
381
382 #ifndef OVERRIDE_renormalise_vector
renormalise_vector(celt_norm * X,int N,opus_val16 gain,int arch)383 void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch)
384 {
385 int i;
386 #ifdef FIXED_POINT
387 int k;
388 #endif
389 opus_val32 E;
390 opus_val16 g;
391 opus_val32 t;
392 celt_norm *xptr;
393 E = EPSILON + celt_inner_prod(X, X, N, arch);
394 #ifdef FIXED_POINT
395 k = celt_ilog2(E)>>1;
396 #endif
397 t = VSHR32(E, 2*(k-7));
398 g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
399
400 xptr = X;
401 for (i=0;i<N;i++)
402 {
403 *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1));
404 xptr++;
405 }
406 /*return celt_sqrt(E);*/
407 }
408 #endif /* OVERRIDE_renormalise_vector */
409
stereo_itheta(const celt_norm * X,const celt_norm * Y,int stereo,int N,int arch)410 int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch)
411 {
412 int i;
413 int itheta;
414 opus_val16 mid, side;
415 opus_val32 Emid, Eside;
416
417 Emid = Eside = EPSILON;
418 if (stereo)
419 {
420 for (i=0;i<N;i++)
421 {
422 celt_norm m, s;
423 m = ADD16(SHR16(X[i],1),SHR16(Y[i],1));
424 s = SUB16(SHR16(X[i],1),SHR16(Y[i],1));
425 Emid = MAC16_16(Emid, m, m);
426 Eside = MAC16_16(Eside, s, s);
427 }
428 } else {
429 Emid += celt_inner_prod(X, X, N, arch);
430 Eside += celt_inner_prod(Y, Y, N, arch);
431 }
432 mid = celt_sqrt(Emid);
433 side = celt_sqrt(Eside);
434 #ifdef FIXED_POINT
435 /* 0.63662 = 2/pi */
436 itheta = MULT16_16_Q15(QCONST16(0.63662f,15),celt_atan2p(side, mid));
437 #else
438 itheta = (int)floor(.5f+16384*0.63662f*fast_atan2f(side,mid));
439 #endif
440
441 return itheta;
442 }
443