• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***********************************************************************
2 Copyright (c) 2006-2011, Skype Limited. All rights reserved.
3 Redistribution and use in source and binary forms, with or without
4 modification, are permitted provided that the following conditions
5 are met:
6 - Redistributions of source code must retain the above copyright notice,
7 this list of conditions and the following disclaimer.
8 - Redistributions in binary form must reproduce the above copyright
9 notice, this list of conditions and the following disclaimer in the
10 documentation and/or other materials provided with the distribution.
11 - Neither the name of Internet Society, IETF or IETF Trust, nor the
12 names of specific contributors, may be used to endorse or promote
13 products derived from this software without specific prior written
14 permission.
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 POSSIBILITY OF SUCH DAMAGE.
26 ***********************************************************************/
27 
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31 
32 #include "main_FIX.h"
33 #include "tuning_parameters.h"
34 
35 /* Head room for correlations */
36 #define LTP_CORRS_HEAD_ROOM                             2
37 
38 void silk_fit_LTP(
39     opus_int32 LTP_coefs_Q16[ LTP_ORDER ],
40     opus_int16 LTP_coefs_Q14[ LTP_ORDER ]
41 );
42 
silk_find_LTP_FIX(opus_int16 b_Q14[MAX_NB_SUBFR * LTP_ORDER],opus_int32 WLTP[MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER],opus_int * LTPredCodGain_Q7,const opus_int16 r_lpc[],const opus_int lag[MAX_NB_SUBFR],const opus_int32 Wght_Q15[MAX_NB_SUBFR],const opus_int subfr_length,const opus_int nb_subfr,const opus_int mem_offset,opus_int corr_rshifts[MAX_NB_SUBFR],int arch)43 void silk_find_LTP_FIX(
44     opus_int16                      b_Q14[ MAX_NB_SUBFR * LTP_ORDER ],      /* O    LTP coefs                                                                   */
45     opus_int32                      WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O    Weight for LTP quantization                                           */
46     opus_int                        *LTPredCodGain_Q7,                      /* O    LTP coding gain                                                             */
47     const opus_int16                r_lpc[],                                /* I    residual signal after LPC signal + state for first 10 ms                    */
48     const opus_int                  lag[ MAX_NB_SUBFR ],                    /* I    LTP lags                                                                    */
49     const opus_int32                Wght_Q15[ MAX_NB_SUBFR ],               /* I    weights                                                                     */
50     const opus_int                  subfr_length,                           /* I    subframe length                                                             */
51     const opus_int                  nb_subfr,                               /* I    number of subframes                                                         */
52     const opus_int                  mem_offset,                             /* I    number of samples in LTP memory                                             */
53     opus_int                        corr_rshifts[ MAX_NB_SUBFR ],           /* O    right shifts applied to correlations                                        */
54     int                             arch                                    /* I    Run-time architecture                                                       */
55 )
56 {
57     opus_int   i, k, lshift;
58     const opus_int16 *r_ptr, *lag_ptr;
59     opus_int16 *b_Q14_ptr;
60 
61     opus_int32 regu;
62     opus_int32 *WLTP_ptr;
63     opus_int32 b_Q16[ LTP_ORDER ], delta_b_Q14[ LTP_ORDER ], d_Q14[ MAX_NB_SUBFR ], nrg[ MAX_NB_SUBFR ], g_Q26;
64     opus_int32 w[ MAX_NB_SUBFR ], WLTP_max, max_abs_d_Q14, max_w_bits;
65 
66     opus_int32 temp32, denom32;
67     opus_int   extra_shifts;
68     opus_int   rr_shifts, maxRshifts, maxRshifts_wxtra, LZs;
69     opus_int32 LPC_res_nrg, LPC_LTP_res_nrg, div_Q16;
70     opus_int32 Rr[ LTP_ORDER ], rr[ MAX_NB_SUBFR ];
71     opus_int32 wd, m_Q12;
72 
73     b_Q14_ptr = b_Q14;
74     WLTP_ptr  = WLTP;
75     r_ptr     = &r_lpc[ mem_offset ];
76     for( k = 0; k < nb_subfr; k++ ) {
77         lag_ptr = r_ptr - ( lag[ k ] + LTP_ORDER / 2 );
78 
79         silk_sum_sqr_shift( &rr[ k ], &rr_shifts, r_ptr, subfr_length ); /* rr[ k ] in Q( -rr_shifts ) */
80 
81         /* Assure headroom */
82         LZs = silk_CLZ32( rr[k] );
83         if( LZs < LTP_CORRS_HEAD_ROOM ) {
84             rr[ k ] = silk_RSHIFT_ROUND( rr[ k ], LTP_CORRS_HEAD_ROOM - LZs );
85             rr_shifts += ( LTP_CORRS_HEAD_ROOM - LZs );
86         }
87         corr_rshifts[ k ] = rr_shifts;
88         silk_corrMatrix_FIX( lag_ptr, subfr_length, LTP_ORDER, LTP_CORRS_HEAD_ROOM, WLTP_ptr, &corr_rshifts[ k ], arch );  /* WLTP_fix_ptr in Q( -corr_rshifts[ k ] ) */
89 
90         /* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */
91         silk_corrVector_FIX( lag_ptr, r_ptr, subfr_length, LTP_ORDER, Rr, corr_rshifts[ k ], arch );  /* Rr_fix_ptr   in Q( -corr_rshifts[ k ] ) */
92         if( corr_rshifts[ k ] > rr_shifts ) {
93             rr[ k ] = silk_RSHIFT( rr[ k ], corr_rshifts[ k ] - rr_shifts ); /* rr[ k ] in Q( -corr_rshifts[ k ] ) */
94         }
95         silk_assert( rr[ k ] >= 0 );
96 
97         regu = 1;
98         regu = silk_SMLAWB( regu, rr[ k ], SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
99         regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
100         regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
101         silk_regularize_correlations_FIX( WLTP_ptr, &rr[k], regu, LTP_ORDER );
102 
103         silk_solve_LDL_FIX( WLTP_ptr, LTP_ORDER, Rr, b_Q16 ); /* WLTP_fix_ptr and Rr_fix_ptr both in Q(-corr_rshifts[k]) */
104 
105         /* Limit and store in Q14 */
106         silk_fit_LTP( b_Q16, b_Q14_ptr );
107 
108         /* Calculate residual energy */
109         nrg[ k ] = silk_residual_energy16_covar_FIX( b_Q14_ptr, WLTP_ptr, Rr, rr[ k ], LTP_ORDER, 14 ); /* nrg_fix in Q( -corr_rshifts[ k ] ) */
110 
111         /* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */
112         extra_shifts = silk_min_int( corr_rshifts[ k ], LTP_CORRS_HEAD_ROOM );
113         denom32 = silk_LSHIFT_SAT32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 + extra_shifts ) + /* Q( -corr_rshifts[ k ] + extra_shifts ) */
114             silk_RSHIFT( silk_SMULWB( (opus_int32)subfr_length, 655 ), corr_rshifts[ k ] - extra_shifts );    /* Q( -corr_rshifts[ k ] + extra_shifts ) */
115         denom32 = silk_max( denom32, 1 );
116         silk_assert( ((opus_int64)Wght_Q15[ k ] << 16 ) < silk_int32_MAX );                       /* Wght always < 0.5 in Q0 */
117         temp32 = silk_DIV32( silk_LSHIFT( (opus_int32)Wght_Q15[ k ], 16 ), denom32 );             /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */
118         temp32 = silk_RSHIFT( temp32, 31 + corr_rshifts[ k ] - extra_shifts - 26 );               /* Q26 */
119 
120         /* Limit temp such that the below scaling never wraps around */
121         WLTP_max = 0;
122         for( i = 0; i < LTP_ORDER * LTP_ORDER; i++ ) {
123             WLTP_max = silk_max( WLTP_ptr[ i ], WLTP_max );
124         }
125         lshift = silk_CLZ32( WLTP_max ) - 1 - 3; /* keep 3 bits free for vq_nearest_neighbor_fix */
126         silk_assert( 26 - 18 + lshift >= 0 );
127         if( 26 - 18 + lshift < 31 ) {
128             temp32 = silk_min_32( temp32, silk_LSHIFT( (opus_int32)1, 26 - 18 + lshift ) );
129         }
130 
131         silk_scale_vector32_Q26_lshift_18( WLTP_ptr, temp32, LTP_ORDER * LTP_ORDER ); /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */
132 
133         w[ k ] = matrix_ptr( WLTP_ptr, LTP_ORDER/2, LTP_ORDER/2, LTP_ORDER ); /* w in Q( 18 - corr_rshifts[ k ] ) */
134         silk_assert( w[k] >= 0 );
135 
136         r_ptr     += subfr_length;
137         b_Q14_ptr += LTP_ORDER;
138         WLTP_ptr  += LTP_ORDER * LTP_ORDER;
139     }
140 
141     maxRshifts = 0;
142     for( k = 0; k < nb_subfr; k++ ) {
143         maxRshifts = silk_max_int( corr_rshifts[ k ], maxRshifts );
144     }
145 
146     /* Compute LTP coding gain */
147     if( LTPredCodGain_Q7 != NULL ) {
148         LPC_LTP_res_nrg = 0;
149         LPC_res_nrg     = 0;
150         silk_assert( LTP_CORRS_HEAD_ROOM >= 2 ); /* Check that no overflow will happen when adding */
151         for( k = 0; k < nb_subfr; k++ ) {
152             LPC_res_nrg     = silk_ADD32( LPC_res_nrg,     silk_RSHIFT( silk_ADD32( silk_SMULWB(  rr[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /* Q( -maxRshifts ) */
153             LPC_LTP_res_nrg = silk_ADD32( LPC_LTP_res_nrg, silk_RSHIFT( silk_ADD32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /* Q( -maxRshifts ) */
154         }
155         LPC_LTP_res_nrg = silk_max( LPC_LTP_res_nrg, 1 ); /* avoid division by zero */
156 
157         div_Q16 = silk_DIV32_varQ( LPC_res_nrg, LPC_LTP_res_nrg, 16 );
158         *LTPredCodGain_Q7 = ( opus_int )silk_SMULBB( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) );
159 
160         silk_assert( *LTPredCodGain_Q7 == ( opus_int )silk_SAT16( silk_MUL( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ) ) );
161     }
162 
163     /* smoothing */
164     /* d = sum( B, 1 ); */
165     b_Q14_ptr = b_Q14;
166     for( k = 0; k < nb_subfr; k++ ) {
167         d_Q14[ k ] = 0;
168         for( i = 0; i < LTP_ORDER; i++ ) {
169             d_Q14[ k ] += b_Q14_ptr[ i ];
170         }
171         b_Q14_ptr += LTP_ORDER;
172     }
173 
174     /* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */
175 
176     /* Find maximum absolute value of d_Q14 and the bits used by w in Q0 */
177     max_abs_d_Q14 = 0;
178     max_w_bits    = 0;
179     for( k = 0; k < nb_subfr; k++ ) {
180         max_abs_d_Q14 = silk_max_32( max_abs_d_Q14, silk_abs( d_Q14[ k ] ) );
181         /* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */
182         /* Find bits needed in Q( 18 - maxRshifts ) */
183         max_w_bits = silk_max_32( max_w_bits, 32 - silk_CLZ32( w[ k ] ) + corr_rshifts[ k ] - maxRshifts );
184     }
185 
186     /* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -silk_int16_MIN */
187     silk_assert( max_abs_d_Q14 <= ( 5 << 15 ) );
188 
189     /* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */
190     extra_shifts = max_w_bits + 32 - silk_CLZ32( max_abs_d_Q14 ) - 14;
191 
192     /* Subtract what we got available; bits in output var plus maxRshifts */
193     extra_shifts -= ( 32 - 1 - 2 + maxRshifts ); /* Keep sign bit free as well as 2 bits for accumulation */
194     extra_shifts = silk_max_int( extra_shifts, 0 );
195 
196     maxRshifts_wxtra = maxRshifts + extra_shifts;
197 
198     temp32 = silk_RSHIFT( 262, maxRshifts + extra_shifts ) + 1; /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */
199     wd = 0;
200     for( k = 0; k < nb_subfr; k++ ) {
201         /* w has at least 2 bits of headroom so no overflow should happen */
202         temp32 = silk_ADD32( temp32,                     silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ) );                      /* Q( 18 - maxRshifts_wxtra ) */
203         wd     = silk_ADD32( wd, silk_LSHIFT( silk_SMULWW( silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ), d_Q14[ k ] ), 2 ) ); /* Q( 18 - maxRshifts_wxtra ) */
204     }
205     m_Q12 = silk_DIV32_varQ( wd, temp32, 12 );
206 
207     b_Q14_ptr = b_Q14;
208     for( k = 0; k < nb_subfr; k++ ) {
209         /* w_fix[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */
210         if( 2 - corr_rshifts[k] > 0 ) {
211             temp32 = silk_RSHIFT( w[ k ], 2 - corr_rshifts[ k ] );
212         } else {
213             temp32 = silk_LSHIFT_SAT32( w[ k ], corr_rshifts[ k ] - 2 );
214         }
215 
216         g_Q26 = silk_MUL(
217             silk_DIV32(
218                 SILK_FIX_CONST( LTP_SMOOTHING, 26 ),
219                 silk_RSHIFT( SILK_FIX_CONST( LTP_SMOOTHING, 26 ), 10 ) + temp32 ),                          /* Q10 */
220             silk_LSHIFT_SAT32( silk_SUB_SAT32( (opus_int32)m_Q12, silk_RSHIFT( d_Q14[ k ], 2 ) ), 4 ) );    /* Q16 */
221 
222         temp32 = 0;
223         for( i = 0; i < LTP_ORDER; i++ ) {
224             delta_b_Q14[ i ] = silk_max_16( b_Q14_ptr[ i ], 1638 );     /* 1638_Q14 = 0.1_Q0 */
225             temp32 += delta_b_Q14[ i ];                                 /* Q14 */
226         }
227         temp32 = silk_DIV32( g_Q26, temp32 );                           /* Q14 -> Q12 */
228         for( i = 0; i < LTP_ORDER; i++ ) {
229             b_Q14_ptr[ i ] = silk_LIMIT_32( (opus_int32)b_Q14_ptr[ i ] + silk_SMULWB( silk_LSHIFT_SAT32( temp32, 4 ), delta_b_Q14[ i ] ), -16000, 28000 );
230         }
231         b_Q14_ptr += LTP_ORDER;
232     }
233 }
234 
silk_fit_LTP(opus_int32 LTP_coefs_Q16[LTP_ORDER],opus_int16 LTP_coefs_Q14[LTP_ORDER])235 void silk_fit_LTP(
236     opus_int32 LTP_coefs_Q16[ LTP_ORDER ],
237     opus_int16 LTP_coefs_Q14[ LTP_ORDER ]
238 )
239 {
240     opus_int i;
241 
242     for( i = 0; i < LTP_ORDER; i++ ) {
243         LTP_coefs_Q14[ i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( LTP_coefs_Q16[ i ], 2 ) );
244     }
245 }
246