• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2011 Apple Inc. All rights reserved.
3  * Copyright (C) 2012-2014 Erik de Castro Lopo <erikd@mega-nerd.com>
4  *
5  * @APPLE_APACHE_LICENSE_HEADER_START@
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License") ;
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *     http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  * @APPLE_APACHE_LICENSE_HEADER_END@
20  */
21 
22 /*
23 	File:		matrix_dec.c
24 
25 	Contains:	ALAC mixing/matrixing decode routines.
26 
27 	Copyright:	(c) 2004-2011 Apple, Inc.
28 */
29 
30 #include "matrixlib.h"
31 #include "ALACAudioTypes.h"
32 #include "shift.h"
33 
34 // up to 24-bit "offset" macros for the individual bytes of a 20/24-bit word
35 #if TARGET_RT_BIG_ENDIAN
36 	#define LBYTE	2
37 	#define MBYTE	1
38 	#define HBYTE	0
39 #else
40 	#define LBYTE	0
41 	#define MBYTE	1
42 	#define HBYTE	2
43 #endif
44 
45 /*
46     There is no plain middle-side option ; instead there are various mixing
47     modes including middle-side, each lossless, as embodied in the mix ()
48     and unmix () functions.  These functions exploit a generalized middle-side
49     transformation:
50 
51     u := [(rL + (m-r)R)/m] ;
52     v := L - R ;
53 
54     where [ ] denotes integer floor.  The (lossless) inverse is
55 
56     L = u + v - [rV/m] ;
57     R = L - v ;
58 */
59 
60 // 16-bit routines
61 
62 void
unmix16(const int32_t * u,int32_t * v,int32_t * out,uint32_t stride,int32_t numSamples,int32_t mixbits,int32_t mixres)63 unmix16 (const int32_t * u, int32_t * v, int32_t * out, uint32_t stride, int32_t numSamples, int32_t mixbits, int32_t mixres)
64 {
65 	int32_t 	j ;
66 
67 	if (mixres != 0)
68 	{
69 		/* matrixed stereo */
70 		for (j = 0 ; j < numSamples ; j++)
71 		{
72 			int32_t		l, r ;
73 
74 			l = u [j] + v [j] - ((mixres * v [j]) >> mixbits) ;
75 			r = l - v [j] ;
76 
77 			out [0] = arith_shift_left (l, 16) ;
78 			out [1] = arith_shift_left (r, 16) ;
79 			out += stride ;
80 		}
81 	}
82 	else
83 	{
84 		/* Conventional separated stereo. */
85 		for (j = 0 ; j < numSamples ; j++)
86 		{
87 			out [0] = u [j] << 16 ;
88 			out [1] = v [j] << 16 ;
89 			out += stride ;
90 		}
91 	}
92 }
93 
94 // 20-bit routines
95 // - the 20 bits of data are left-justified in 3 bytes of storage but right-aligned for input/output predictor buffers
96 
97 void
unmix20(const int32_t * u,int32_t * v,int32_t * out,uint32_t stride,int32_t numSamples,int32_t mixbits,int32_t mixres)98 unmix20 (const int32_t * u, int32_t * v, int32_t * out, uint32_t stride, int32_t numSamples, int32_t mixbits, int32_t mixres)
99 {
100 	int32_t 	j ;
101 
102 	if (mixres != 0)
103 	{
104 		/* matrixed stereo */
105 		for (j = 0 ; j < numSamples ; j++)
106 		{
107 			int32_t		l, r ;
108 
109 			l = u [j] + v [j] - ((mixres * v [j]) >> mixbits) ;
110 			r = l - v [j] ;
111 
112 			out [0] = arith_shift_left (l, 12) ;
113 			out [1] = arith_shift_left (r, 12) ;
114 			out += stride ;
115 		}
116 	}
117 	else
118 	{
119 		/* Conventional separated stereo. */
120 		for (j = 0 ; j < numSamples ; j++)
121 		{
122 			out [0] = arith_shift_left (u [j], 12) ;
123 			out [1] = arith_shift_left (v [j], 12) ;
124 			out += stride ;
125 		}
126 	}
127 }
128 
129 // 24-bit routines
130 // - the 24 bits of data are right-justified in the input/output predictor buffers
131 
132 void
unmix24(const int32_t * u,int32_t * v,int32_t * out,uint32_t stride,int32_t numSamples,int32_t mixbits,int32_t mixres,uint16_t * shiftUV,int32_t bytesShifted)133 unmix24 (const int32_t * u, int32_t * v, int32_t * out, uint32_t stride, int32_t numSamples,
134 				int32_t mixbits, int32_t mixres, uint16_t * shiftUV, int32_t bytesShifted)
135 {
136 	int32_t		shift = bytesShifted * 8 ;
137 	int32_t		l, r ;
138 	int32_t 		j, k ;
139 
140 	if (mixres != 0)
141 	{
142 		/* matrixed stereo */
143 		if (bytesShifted != 0)
144 		{
145 			for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
146 			{
147 				l = u [j] + v [j] - ((mixres * v [j]) >> mixbits) ;
148 				r = l - v [j] ;
149 
150 				l = arith_shift_left (l, shift) | (uint32_t) shiftUV [k + 0] ;
151 				r = arith_shift_left (r, shift) | (uint32_t) shiftUV [k + 1] ;
152 
153 				out [0] = arith_shift_left (l, 8) ;
154 				out [1] = arith_shift_left (r, 8) ;
155 				out += stride ;
156 			}
157 		}
158 		else
159 		{
160 			for (j = 0 ; j < numSamples ; j++)
161 			{
162 				l = u [j] + v [j] - ((mixres * v [j]) >> mixbits) ;
163 				r = l - v [j] ;
164 
165 				out [0] = l << 8 ;
166 				out [1] = r << 8 ;
167 				out += stride ;
168 			}
169 		}
170 	}
171 	else
172 	{
173 		/* Conventional separated stereo. */
174 		if (bytesShifted != 0)
175 		{
176 			for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
177 			{
178 				l = u [j] ;
179 				r = v [j] ;
180 
181 				l = (l << shift) | (uint32_t) shiftUV [k + 0] ;
182 				r = (r << shift) | (uint32_t) shiftUV [k + 1] ;
183 
184 				out [0] = l << 8 ;
185 				out [1] = r << 8 ;
186 				out += stride ;
187 			}
188 		}
189 		else
190 		{
191 			for (j = 0 ; j < numSamples ; j++)
192 			{
193 				out [0] = u [j] << 8 ;
194 				out [1] = v [j] << 8 ;
195 				out += stride ;
196 			}
197 		}
198 	}
199 }
200 
201 // 32-bit routines
202 // - note that these really expect the internal data width to be < 32 but the arrays are 32-bit
203 // - otherwise, the calculations might overflow into the 33rd bit and be lost
204 // - therefore, these routines deal with the specified "unused lower" bytes in the "shift" buffers
205 
206 void
unmix32(const int32_t * u,int32_t * v,int32_t * out,uint32_t stride,int32_t numSamples,int32_t mixbits,int32_t mixres,uint16_t * shiftUV,int32_t bytesShifted)207 unmix32 (const int32_t * u, int32_t * v, int32_t * out, uint32_t stride, int32_t numSamples,
208 				int32_t mixbits, int32_t mixres, uint16_t * shiftUV, int32_t bytesShifted)
209 {
210 	int32_t		shift = bytesShifted * 8 ;
211 	int32_t		l, r ;
212 	int32_t 	j, k ;
213 
214 	if (mixres != 0)
215 	{
216 		//Assert (bytesShifted != 0) ;
217 
218 		/* matrixed stereo with shift */
219 		for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
220 		{
221 			int32_t		lt, rt ;
222 
223 			lt = u [j] ;
224 			rt = v [j] ;
225 
226 			l = lt + rt - ((mixres * rt) >> mixbits) ;
227 			r = l - rt ;
228 
229 			out [0] = arith_shift_left (l, shift) | (uint32_t) shiftUV [k + 0] ;
230 			out [1] = arith_shift_left (r, shift) | (uint32_t) shiftUV [k + 1] ;
231 			out += stride ;
232 		}
233 	}
234 	else
235 	{
236 		if (bytesShifted == 0)
237 		{
238 			/* interleaving w/o shift */
239 			for (j = 0 ; j < numSamples ; j++)
240 			{
241 				out [0] = u [j] ;
242 				out [1] = v [j] ;
243 				out += stride ;
244 			}
245 		}
246 		else
247 		{
248 			/* interleaving with shift */
249 			for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
250 			{
251 				out [0] = (u [j] << shift) | (uint32_t) shiftUV [k + 0] ;
252 				out [1] = (v [j] << shift) | (uint32_t) shiftUV [k + 1] ;
253 				out += stride ;
254 			}
255 		}
256 	}
257 }
258 
259 // 20/24-bit <-> 32-bit helper routines (not really matrixing but convenient to put here)
260 
261 void
copyPredictorTo24(const int32_t * in,int32_t * out,uint32_t stride,int32_t numSamples)262 copyPredictorTo24 (const int32_t * in, int32_t * out, uint32_t stride, int32_t numSamples)
263 {
264 	int32_t		j ;
265 
266 	for (j = 0 ; j < numSamples ; j++)
267 	{
268 		out [0] = in [j] << 8 ;
269 		out += stride ;
270 	}
271 }
272 
273 void
copyPredictorTo24Shift(const int32_t * in,uint16_t * shift,int32_t * out,uint32_t stride,int32_t numSamples,int32_t bytesShifted)274 copyPredictorTo24Shift (const int32_t * in, uint16_t * shift, int32_t * out, uint32_t stride, int32_t numSamples, int32_t bytesShifted)
275 {
276 	int32_t		shiftVal = bytesShifted * 8 ;
277 	int32_t		j ;
278 
279 	//Assert (bytesShifted != 0) ;
280 
281 	for (j = 0 ; j < numSamples ; j++)
282 	{
283 		int32_t		val = in [j] ;
284 
285 		val = arith_shift_left (val, shiftVal) | (uint32_t) shift [j] ;
286 		out [0] = arith_shift_left (val, 8) ;
287 		out += stride ;
288 	}
289 }
290 
291 void
copyPredictorTo20(const int32_t * in,int32_t * out,uint32_t stride,int32_t numSamples)292 copyPredictorTo20 (const int32_t * in, int32_t * out, uint32_t stride, int32_t numSamples)
293 {
294 	int32_t		j ;
295 
296 	// 32-bit predictor values are right-aligned but 20-bit output values should be left-aligned
297 	// in the 24-bit output buffer
298 	for (j = 0 ; j < numSamples ; j++)
299 	{
300 		out [0] = arith_shift_left (in [j], 12) ;
301 		out += stride ;
302 	}
303 }
304 
305 void
copyPredictorTo32(const int32_t * in,int32_t * out,uint32_t stride,int32_t numSamples)306 copyPredictorTo32 (const int32_t * in, int32_t * out, uint32_t stride, int32_t numSamples)
307 {
308 	int32_t			i, j ;
309 
310 	// this is only a subroutine to abstract the "iPod can only output 16-bit data" problem
311 	for (i = 0, j = 0 ; i < numSamples ; i++, j += stride)
312 		out [j] = arith_shift_left (in [i], 8) ;
313 }
314 
315 void
copyPredictorTo32Shift(const int32_t * in,uint16_t * shift,int32_t * out,uint32_t stride,int32_t numSamples,int32_t bytesShifted)316 copyPredictorTo32Shift (const int32_t * in, uint16_t * shift, int32_t * out, uint32_t stride, int32_t numSamples, int32_t bytesShifted)
317 {
318 	int32_t *		op = out ;
319 	uint32_t		shiftVal = bytesShifted * 8 ;
320 	int32_t			j ;
321 
322 	//Assert (bytesShifted != 0) ;
323 
324 	// this is only a subroutine to abstract the "iPod can only output 16-bit data" problem
325 	for (j = 0 ; j < numSamples ; j++)
326 	{
327 		op [0] = arith_shift_left (in [j], shiftVal) | (uint32_t) shift [j] ;
328 		op += stride ;
329 	}
330 }
331