1 /***********************************************************************
2 Copyright (c) 2006-2011, Skype Limited. All rights reserved.
3 Redistribution and use in source and binary forms, with or without
4 modification, are permitted provided that the following conditions
5 are met:
6 - Redistributions of source code must retain the above copyright notice,
7 this list of conditions and the following disclaimer.
8 - Redistributions in binary form must reproduce the above copyright
9 notice, this list of conditions and the following disclaimer in the
10 documentation and/or other materials provided with the distribution.
11 - Neither the name of Internet Society, IETF or IETF Trust, nor the
12 names of specific contributors, may be used to endorse or promote
13 products derived from this software without specific prior written
14 permission.
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 POSSIBILITY OF SUCH DAMAGE.
26 ***********************************************************************/
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include "SigProc_FIX.h"
33 #include "define.h"
34 #include "tuning_parameters.h"
35 #include "pitch.h"
36
37 #define MAX_FRAME_SIZE 384 /* subfr_length * nb_subfr = ( 0.005 * 16000 + 16 ) * 4 = 384 */
38
39 #define QA 25
40 #define N_BITS_HEAD_ROOM 2
41 #define MIN_RSHIFTS -16
42 #define MAX_RSHIFTS (32 - QA)
43
44 /* Compute reflection coefficients from input signal */
silk_burg_modified(opus_int32 * res_nrg,opus_int * res_nrg_Q,opus_int32 A_Q16[],const opus_int16 x[],const opus_int32 minInvGain_Q30,const opus_int subfr_length,const opus_int nb_subfr,const opus_int D)45 void silk_burg_modified(
46 opus_int32 *res_nrg, /* O Residual energy */
47 opus_int *res_nrg_Q, /* O Residual energy Q value */
48 opus_int32 A_Q16[], /* O Prediction coefficients (length order) */
49 const opus_int16 x[], /* I Input signal, length: nb_subfr * ( D + subfr_length ) */
50 const opus_int32 minInvGain_Q30, /* I Inverse of max prediction gain */
51 const opus_int subfr_length, /* I Input signal subframe length (incl. D preceding samples) */
52 const opus_int nb_subfr, /* I Number of subframes stacked in x */
53 const opus_int D /* I Order */
54 )
55 {
56 opus_int k, n, s, lz, rshifts, rshifts_extra, reached_max_gain;
57 opus_int32 C0, num, nrg, rc_Q31, invGain_Q30, Atmp_QA, Atmp1, tmp1, tmp2, x1, x2;
58 const opus_int16 *x_ptr;
59 opus_int32 C_first_row[ SILK_MAX_ORDER_LPC ];
60 opus_int32 C_last_row[ SILK_MAX_ORDER_LPC ];
61 opus_int32 Af_QA[ SILK_MAX_ORDER_LPC ];
62 opus_int32 CAf[ SILK_MAX_ORDER_LPC + 1 ];
63 opus_int32 CAb[ SILK_MAX_ORDER_LPC + 1 ];
64 opus_int32 xcorr[ SILK_MAX_ORDER_LPC ];
65
66 silk_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE );
67
68 /* Compute autocorrelations, added over subframes */
69 silk_sum_sqr_shift( &C0, &rshifts, x, nb_subfr * subfr_length );
70 if( rshifts > MAX_RSHIFTS ) {
71 C0 = silk_LSHIFT32( C0, rshifts - MAX_RSHIFTS );
72 silk_assert( C0 > 0 );
73 rshifts = MAX_RSHIFTS;
74 } else {
75 lz = silk_CLZ32( C0 ) - 1;
76 rshifts_extra = N_BITS_HEAD_ROOM - lz;
77 if( rshifts_extra > 0 ) {
78 rshifts_extra = silk_min( rshifts_extra, MAX_RSHIFTS - rshifts );
79 C0 = silk_RSHIFT32( C0, rshifts_extra );
80 } else {
81 rshifts_extra = silk_max( rshifts_extra, MIN_RSHIFTS - rshifts );
82 C0 = silk_LSHIFT32( C0, -rshifts_extra );
83 }
84 rshifts += rshifts_extra;
85 }
86 CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ) + 1; /* Q(-rshifts) */
87 silk_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
88 if( rshifts > 0 ) {
89 for( s = 0; s < nb_subfr; s++ ) {
90 x_ptr = x + s * subfr_length;
91 for( n = 1; n < D + 1; n++ ) {
92 C_first_row[ n - 1 ] += (opus_int32)silk_RSHIFT64(
93 silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n ), rshifts );
94 }
95 }
96 } else {
97 for( s = 0; s < nb_subfr; s++ ) {
98 int i;
99 opus_int32 d;
100 x_ptr = x + s * subfr_length;
101 celt_pitch_xcorr(x_ptr, x_ptr + 1, xcorr, subfr_length - D, D );
102 for( n = 1; n < D + 1; n++ ) {
103 for ( i = n + subfr_length - D, d = 0; i < subfr_length; i++ )
104 d = MAC16_16( d, x_ptr[ i ], x_ptr[ i - n ] );
105 xcorr[ n - 1 ] += d;
106 }
107 for( n = 1; n < D + 1; n++ ) {
108 C_first_row[ n - 1 ] += silk_LSHIFT32( xcorr[ n - 1 ], -rshifts );
109 }
110 }
111 }
112 silk_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
113
114 /* Initialize */
115 CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ) + 1; /* Q(-rshifts) */
116
117 invGain_Q30 = (opus_int32)1 << 30;
118 reached_max_gain = 0;
119 for( n = 0; n < D; n++ ) {
120 /* Update first row of correlation matrix (without first element) */
121 /* Update last row of correlation matrix (without last element, stored in reversed order) */
122 /* Update C * Af */
123 /* Update C * flipud(Af) (stored in reversed order) */
124 if( rshifts > -2 ) {
125 for( s = 0; s < nb_subfr; s++ ) {
126 x_ptr = x + s * subfr_length;
127 x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); /* Q(16-rshifts) */
128 x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); /* Q(16-rshifts) */
129 tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16) */
130 tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 ); /* Q(QA-16) */
131 for( k = 0; k < n; k++ ) {
132 C_first_row[ k ] = silk_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */
133 C_last_row[ k ] = silk_SMLAWB( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */
134 Atmp_QA = Af_QA[ k ];
135 tmp1 = silk_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); /* Q(QA-16) */
136 tmp2 = silk_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] ); /* Q(QA-16) */
137 }
138 tmp1 = silk_LSHIFT32( -tmp1, 32 - QA - rshifts ); /* Q(16-rshifts) */
139 tmp2 = silk_LSHIFT32( -tmp2, 32 - QA - rshifts ); /* Q(16-rshifts) */
140 for( k = 0; k <= n; k++ ) {
141 CAf[ k ] = silk_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); /* Q( -rshift ) */
142 CAb[ k ] = silk_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] ); /* Q( -rshift ) */
143 }
144 }
145 } else {
146 for( s = 0; s < nb_subfr; s++ ) {
147 x_ptr = x + s * subfr_length;
148 x1 = -silk_LSHIFT32( (opus_int32)x_ptr[ n ], -rshifts ); /* Q( -rshifts ) */
149 x2 = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts ); /* Q( -rshifts ) */
150 tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ], 17 ); /* Q17 */
151 tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 ); /* Q17 */
152 for( k = 0; k < n; k++ ) {
153 C_first_row[ k ] = silk_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts ) */
154 C_last_row[ k ] = silk_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts ) */
155 Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); /* Q17 */
156 tmp1 = silk_MLA( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); /* Q17 */
157 tmp2 = silk_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 ); /* Q17 */
158 }
159 tmp1 = -tmp1; /* Q17 */
160 tmp2 = -tmp2; /* Q17 */
161 for( k = 0; k <= n; k++ ) {
162 CAf[ k ] = silk_SMLAWW( CAf[ k ], tmp1,
163 silk_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) ); /* Q( -rshift ) */
164 CAb[ k ] = silk_SMLAWW( CAb[ k ], tmp2,
165 silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) ); /* Q( -rshift ) */
166 }
167 }
168 }
169
170 /* Calculate nominator and denominator for the next order reflection (parcor) coefficient */
171 tmp1 = C_first_row[ n ]; /* Q( -rshifts ) */
172 tmp2 = C_last_row[ n ]; /* Q( -rshifts ) */
173 num = 0; /* Q( -rshifts ) */
174 nrg = silk_ADD32( CAb[ 0 ], CAf[ 0 ] ); /* Q( 1-rshifts ) */
175 for( k = 0; k < n; k++ ) {
176 Atmp_QA = Af_QA[ k ];
177 lz = silk_CLZ32( silk_abs( Atmp_QA ) ) - 1;
178 lz = silk_min( 32 - QA, lz );
179 Atmp1 = silk_LSHIFT32( Atmp_QA, lz ); /* Q( QA + lz ) */
180
181 tmp1 = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( C_last_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */
182 tmp2 = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */
183 num = silk_ADD_LSHIFT32( num, silk_SMMUL( CAb[ n - k ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts ) */
184 nrg = silk_ADD_LSHIFT32( nrg, silk_SMMUL( silk_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ),
185 Atmp1 ), 32 - QA - lz ); /* Q( 1-rshifts ) */
186 }
187 CAf[ n + 1 ] = tmp1; /* Q( -rshifts ) */
188 CAb[ n + 1 ] = tmp2; /* Q( -rshifts ) */
189 num = silk_ADD32( num, tmp2 ); /* Q( -rshifts ) */
190 num = silk_LSHIFT32( -num, 1 ); /* Q( 1-rshifts ) */
191
192 /* Calculate the next order reflection (parcor) coefficient */
193 if( silk_abs( num ) < nrg ) {
194 rc_Q31 = silk_DIV32_varQ( num, nrg, 31 );
195 } else {
196 rc_Q31 = ( num > 0 ) ? silk_int32_MAX : silk_int32_MIN;
197 }
198
199 /* Update inverse prediction gain */
200 tmp1 = ( (opus_int32)1 << 30 ) - silk_SMMUL( rc_Q31, rc_Q31 );
201 tmp1 = silk_LSHIFT( silk_SMMUL( invGain_Q30, tmp1 ), 2 );
202 if( tmp1 <= minInvGain_Q30 ) {
203 /* Max prediction gain exceeded; set reflection coefficient such that max prediction gain is exactly hit */
204 tmp2 = ( (opus_int32)1 << 30 ) - silk_DIV32_varQ( minInvGain_Q30, invGain_Q30, 30 ); /* Q30 */
205 rc_Q31 = silk_SQRT_APPROX( tmp2 ); /* Q15 */
206 /* Newton-Raphson iteration */
207 rc_Q31 = silk_RSHIFT32( rc_Q31 + silk_DIV32( tmp2, rc_Q31 ), 1 ); /* Q15 */
208 rc_Q31 = silk_LSHIFT32( rc_Q31, 16 ); /* Q31 */
209 if( num < 0 ) {
210 /* Ensure adjusted reflection coefficients has the original sign */
211 rc_Q31 = -rc_Q31;
212 }
213 invGain_Q30 = minInvGain_Q30;
214 reached_max_gain = 1;
215 } else {
216 invGain_Q30 = tmp1;
217 }
218
219 /* Update the AR coefficients */
220 for( k = 0; k < (n + 1) >> 1; k++ ) {
221 tmp1 = Af_QA[ k ]; /* QA */
222 tmp2 = Af_QA[ n - k - 1 ]; /* QA */
223 Af_QA[ k ] = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 ); /* QA */
224 Af_QA[ n - k - 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 ); /* QA */
225 }
226 Af_QA[ n ] = silk_RSHIFT32( rc_Q31, 31 - QA ); /* QA */
227
228 if( reached_max_gain ) {
229 /* Reached max prediction gain; set remaining coefficients to zero and exit loop */
230 for( k = n + 1; k < D; k++ ) {
231 Af_QA[ k ] = 0;
232 }
233 break;
234 }
235
236 /* Update C * Af and C * Ab */
237 for( k = 0; k <= n + 1; k++ ) {
238 tmp1 = CAf[ k ]; /* Q( -rshifts ) */
239 tmp2 = CAb[ n - k + 1 ]; /* Q( -rshifts ) */
240 CAf[ k ] = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 ); /* Q( -rshifts ) */
241 CAb[ n - k + 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 ); /* Q( -rshifts ) */
242 }
243 }
244
245 if( reached_max_gain ) {
246 for( k = 0; k < D; k++ ) {
247 /* Scale coefficients */
248 A_Q16[ k ] = -silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 );
249 }
250 /* Subtract energy of preceding samples from C0 */
251 if( rshifts > 0 ) {
252 for( s = 0; s < nb_subfr; s++ ) {
253 x_ptr = x + s * subfr_length;
254 C0 -= (opus_int32)silk_RSHIFT64( silk_inner_prod16_aligned_64( x_ptr, x_ptr, D ), rshifts );
255 }
256 } else {
257 for( s = 0; s < nb_subfr; s++ ) {
258 x_ptr = x + s * subfr_length;
259 C0 -= silk_LSHIFT32( silk_inner_prod_aligned( x_ptr, x_ptr, D ), -rshifts );
260 }
261 }
262 /* Approximate residual energy */
263 *res_nrg = silk_LSHIFT( silk_SMMUL( invGain_Q30, C0 ), 2 );
264 *res_nrg_Q = -rshifts;
265 } else {
266 /* Return residual energy */
267 nrg = CAf[ 0 ]; /* Q( -rshifts ) */
268 tmp1 = (opus_int32)1 << 16; /* Q16 */
269 for( k = 0; k < D; k++ ) {
270 Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); /* Q16 */
271 nrg = silk_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 ); /* Q( -rshifts ) */
272 tmp1 = silk_SMLAWW( tmp1, Atmp1, Atmp1 ); /* Q16 */
273 A_Q16[ k ] = -Atmp1;
274 }
275 *res_nrg = silk_SMLAWW( nrg, silk_SMMUL( SILK_FIX_CONST( FIND_LPC_COND_FAC, 32 ), C0 ), -tmp1 );/* Q( -rshifts ) */
276 *res_nrg_Q = -rshifts;
277 }
278 }
279