1@ Tremolo library 2@----------------------------------------------------------------------- 3@ Copyright (C) 2002-2009, Xiph.org Foundation 4@ Copyright (C) 2010, Robin Watts for Pinknoise Productions Ltd 5@ All rights reserved. 6 7@ Redistribution and use in source and binary forms, with or without 8@ modification, are permitted provided that the following conditions 9@ are met: 10 11@ * Redistributions of source code must retain the above copyright 12@ notice, this list of conditions and the following disclaimer. 13@ * Redistributions in binary form must reproduce the above 14@ copyright notice, this list of conditions and the following disclaimer 15@ in the documentation and/or other materials provided with the 16@ distribution. 17@ * Neither the names of the Xiph.org Foundation nor Pinknoise 18@ Productions Ltd nor the names of its contributors may be used to 19@ endorse or promote products derived from this software without 20@ specific prior written permission. 21@ 22@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26@ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27@ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33@ ---------------------------------------------------------------------- 34 35 .text 36 37 @ full accuracy version 38 39 .global mdct_backwardARM 40 .global mdct_shift_right 41 .global mdct_unroll_prelap 42 .global mdct_unroll_part2 43 .global mdct_unroll_part3 44 .global mdct_unroll_postlap 45 46 .extern sincos_lookup0 47 .extern sincos_lookup1 48 49mdct_unroll_prelap: 50 @ r0 = out 51 @ r1 = post 52 @ r2 = r 53 @ r3 = step 54 STMFD r13!,{r4-r7,r14} 55 MVN r4, #0x8000 56 MOV r3, r3, LSL #1 57 SUB r1, r2, r1 @ r1 = r - post 58 SUBS r1, r1, #16 @ r1 = r - post - 16 59 BLT unroll_over 60unroll_loop: 61 LDMDB r2!,{r5,r6,r7,r12} 62 63 MOV r5, r5, ASR #9 @ r5 = (*--r)>>9 64 MOV r6, r6, ASR #9 @ r6 = (*--r)>>9 65 MOV r7, r7, ASR #9 @ r7 = (*--r)>>9 66 MOV r12,r12,ASR #9 @ r12= (*--r)>>9 67 68 MOV r14,r12,ASR #15 69 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 70 EORNE r12,r4, r14,ASR #31 71 STRH r12,[r0], r3 72 73 MOV r14,r7, ASR #15 74 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 75 EORNE r7, r4, r14,ASR #31 76 STRH r7, [r0], r3 77 78 MOV r14,r6, ASR #15 79 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 80 EORNE r6, r4, r14,ASR #31 81 STRH r6, [r0], r3 82 83 MOV r14,r5, ASR #15 84 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 85 EORNE r5, r4, r14,ASR #31 86 STRH r5, [r0], r3 87 88 SUBS r1, r1, #16 89 BGE unroll_loop 90 91unroll_over: 92 ADDS r1, r1, #16 93 BLE unroll_end 94unroll_loop2: 95 LDR r5,[r2,#-4]! 96 @ stall 97 @ stall (Xscale) 98 MOV r5, r5, ASR #9 @ r5 = (*--r)>>9 99 MOV r14,r5, ASR #15 100 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 101 EORNE r5, r4, r14,ASR #31 102 STRH r5, [r0], r3 103 SUBS r1, r1, #4 104 BGT unroll_loop2 105unroll_end: 106 LDMFD r13!,{r4-r7,PC} 107 108mdct_unroll_postlap: 109 @ r0 = out 110 @ r1 = post 111 @ r2 = l 112 @ r3 = step 113 STMFD r13!,{r4-r7,r14} 114 MVN r4, #0x8000 115 MOV r3, r3, LSL #1 116 SUB r1, r1, r2 @ r1 = post - l 117 MOV r1, r1, ASR #1 @ r1 = (post - l)>>1 118 SUBS r1, r1, #16 @ r1 = ((post - l)>>1) - 4 119 BLT unroll_over3 120unroll_loop3: 121 LDR r12,[r2],#8 122 LDR r7, [r2],#8 123 LDR r6, [r2],#8 124 LDR r5, [r2],#8 125 126 RSB r12,r12,#0 127 RSB r5, r5, #0 128 RSB r6, r6, #0 129 RSB r7, r7, #0 130 131 MOV r12, r12,ASR #9 @ r12= (-*l)>>9 132 MOV r5, r5, ASR #9 @ r5 = (-*l)>>9 133 MOV r6, r6, ASR #9 @ r6 = (-*l)>>9 134 MOV r7, r7, ASR #9 @ r7 = (-*l)>>9 135 136 MOV r14,r12,ASR #15 137 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 138 EORNE r12,r4, r14,ASR #31 139 STRH r12,[r0], r3 140 141 MOV r14,r7, ASR #15 142 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 143 EORNE r7, r4, r14,ASR #31 144 STRH r7, [r0], r3 145 146 MOV r14,r6, ASR #15 147 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 148 EORNE r6, r4, r14,ASR #31 149 STRH r6, [r0], r3 150 151 MOV r14,r5, ASR #15 152 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 153 EORNE r5, r4, r14,ASR #31 154 STRH r5, [r0], r3 155 156 SUBS r1, r1, #16 157 BGE unroll_loop3 158 159unroll_over3: 160 ADDS r1, r1, #16 161 BLE unroll_over4 162unroll_loop4: 163 LDR r5,[r2], #8 164 @ stall 165 @ stall (Xscale) 166 RSB r5, r5, #0 167 MOV r5, r5, ASR #9 @ r5 = (-*l)>>9 168 MOV r14,r5, ASR #15 169 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 170 EORNE r5, r4, r14,ASR #31 171 STRH r5, [r0], r3 172 SUBS r1, r1, #4 173 BGT unroll_loop4 174unroll_over4: 175 LDMFD r13!,{r4-r7,PC} 176 177mdct_unroll_part2: 178 @ r0 = out 179 @ r1 = post 180 @ r2 = l 181 @ r3 = r 182 @ <> = step 183 @ <> = wL 184 @ <> = wR 185 MOV r12,r13 186 STMFD r13!,{r4,r6-r11,r14} 187 LDMFD r12,{r8,r9,r10} @ r8 = step 188 @ r9 = wL 189 @ r10= wR 190 MVN r4, #0x8000 191 MOV r8, r8, LSL #1 192 SUBS r1, r3, r1 @ r1 = (r - post) 193 BLE unroll_over5 194unroll_loop5: 195 LDR r12,[r2, #-8]! @ r12= *l (but l -= 2 first) 196 LDR r11,[r9],#4 @ r11= *wL++ 197 LDR r7, [r3, #-4]! @ r7 = *--r 198 LDR r6, [r10,#-4]! @ r6 = *--wR 199 200 @ Can save a cycle here, at the cost of 1bit errors in rounding 201 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 202 SMULL r14,r6, r7, r6 @ (r14,r6) = *--r * *--wR 203 ADD r6, r6, r11 204 MOV r6, r6, ASR #8 205 MOV r14,r6, ASR #15 206 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 207 EORNE r6, r4, r14,ASR #31 208 STRH r6, [r0], r8 209 210 SUBS r1, r1, #4 211 BGT unroll_loop5 212 213unroll_over5: 214 LDMFD r13!,{r4,r6-r11,PC} 215 216mdct_unroll_part3: 217 @ r0 = out 218 @ r1 = post 219 @ r2 = l 220 @ r3 = r 221 @ <> = step 222 @ <> = wL 223 @ <> = wR 224 MOV r12,r13 225 STMFD r13!,{r4,r6-r11,r14} 226 LDMFD r12,{r8,r9,r10} @ r8 = step 227 @ r9 = wL 228 @ r10= wR 229 MVN r4, #0x8000 230 MOV r8, r8, LSL #1 231 SUBS r1, r1, r3 @ r1 = (post - r) 232 BLE unroll_over6 233unroll_loop6: 234 LDR r12,[r2],#8 @ r12= *l (but l += 2 first) 235 LDR r11,[r9],#4 @ r11= *wL++ 236 LDR r7, [r3],#4 @ r7 = *r++ 237 LDR r6, [r10,#-4]! @ r6 = *--wR 238 239 @ Can save a cycle here, at the cost of 1bit errors in rounding 240 SMULL r14,r11,r12,r11 @ (r14,r11) = *l * *wL++ 241 SMULL r14,r6, r7, r6 @ (r14,r6) = *--r * *--wR 242 SUB r6, r6, r11 243 MOV r6, r6, ASR #8 244 MOV r14,r6, ASR #15 245 TEQ r14,r14,ASR #31 @ if r14==0 || r14==-1 then in range 246 EORNE r6, r4, r14,ASR #31 247 STRH r6, [r0], r8 248 249 SUBS r1, r1, #4 250 BGT unroll_loop6 251 252unroll_over6: 253 LDMFD r13!,{r4,r6-r11,PC} 254 255mdct_shift_right: 256 @ r0 = n 257 @ r1 = in 258 @ r2 = right 259 STMFD r13!,{r4-r11,r14} 260 261 MOV r0, r0, LSR #2 @ n >>= 2 262 ADD r1, r1, #4 263 264 SUBS r0, r0, #8 265 BLT sr_less_than_8 266sr_loop: 267 LDR r3, [r1], #8 268 LDR r4, [r1], #8 269 LDR r5, [r1], #8 270 LDR r6, [r1], #8 271 LDR r7, [r1], #8 272 LDR r8, [r1], #8 273 LDR r12,[r1], #8 274 LDR r14,[r1], #8 275 SUBS r0, r0, #8 276 STMIA r2!,{r3,r4,r5,r6,r7,r8,r12,r14} 277 BGE sr_loop 278sr_less_than_8: 279 ADDS r0, r0, #8 280 BEQ sr_end 281sr_loop2: 282 LDR r3, [r1], #8 283 SUBS r0, r0, #1 284 STR r3, [r2], #4 285 BGT sr_loop2 286sr_end: 287 LDMFD r13!,{r4-r11,PC} 288 289mdct_backwardARM: 290 @ r0 = n 291 @ r1 = in 292 STMFD r13!,{r4-r11,r14} 293 294 MOV r2,#1<<4 @ r2 = 1<<shift 295 MOV r3,#13-4 @ r3 = 13-shift 296find_shift_loop: 297 TST r0,r2 @ if (n & (1<<shift)) == 0 298 MOV r2,r2,LSL #1 299 SUBEQ r3,r3,#1 @ shift-- 300 BEQ find_shift_loop 301 MOV r2,#2 302 MOV r2,r2,LSL r3 @ r2 = step = 2<<shift 303 304 @ presymmetry 305 @ r0 = n (a multiple of 4) 306 @ r1 = in 307 @ r2 = step 308 @ r3 = shift 309 310 ADD r4, r1, r0, LSL #1 @ r4 = aX = in+(n>>1) 311 ADD r14,r1, r0 @ r14= in+(n>>2) 312 SUB r4, r4, #3*4 @ r4 = aX = in+n2-3 313 LDR r5, =sincos_lookup0 @ r5 = T=sincos_lookup0 314 315presymmetry_loop1: 316 LDR r7, [r4,#8] @ r6 = s2 = aX[2] 317 LDR r11,[r5,#4] @ r11= T[1] 318 LDR r6, [r4] @ r6 = s0 = aX[0] 319 LDR r10,[r5],r2,LSL #2 @ r10= T[0] T += step 320 321 @ XPROD31(s0, s2, T[0], T[1], 0xaX[0], &ax[2]) 322 SMULL r8, r9, r7, r11 @ (r8, r9) = s2*T[1] 323 @ stall 324 @ stall ? 325 SMLAL r8, r9, r6, r10 @ (r8, r9) += s0*T[0] 326 RSB r6, r6, #0 327 @ stall ? 328 SMULL r8, r12,r7, r10 @ (r8, r12) = s2*T[0] 329 MOV r9, r9, LSL #1 330 @ stall ? 331 SMLAL r8, r12,r6, r11 @ (r8, r12) -= s0*T[1] 332 STR r9, [r4],#-16 @ aX[0] = r9 333 CMP r4,r14 334 MOV r12,r12,LSL #1 335 STR r12,[r4,#8+16] @ aX[2] = r12 336 337 BGE presymmetry_loop1 @ while (aX >= in+n4) 338 339presymmetry_loop2: 340 LDR r6,[r4] @ r6 = s0 = aX[0] 341 LDR r10,[r5,#4] @ r10= T[1] 342 LDR r7,[r4,#8] @ r6 = s2 = aX[2] 343 LDR r11,[r5],-r2,LSL #2 @ r11= T[0] T -= step 344 345 @ XPROD31(s0, s2, T[1], T[0], 0xaX[0], &ax[2]) 346 SMULL r8, r9, r6, r10 @ (r8, r9) = s0*T[1] 347 @ stall 348 @ stall ? 349 SMLAL r8, r9, r7, r11 @ (r8, r9) += s2*T[0] 350 RSB r6, r6, #0 351 @ stall ? 352 SMULL r8, r12,r7, r10 @ (r8, r12) = s2*T[1] 353 MOV r9, r9, LSL #1 354 @ stall ? 355 SMLAL r8, r12,r6, r11 @ (r8, r12) -= s0*T[0] 356 STR r9, [r4],#-16 @ aX[0] = r9 357 CMP r4,r1 358 MOV r12,r12,LSL #1 359 STR r12,[r4,#8+16] @ aX[2] = r12 360 361 BGE presymmetry_loop2 @ while (aX >= in) 362 363 @ r0 = n 364 @ r1 = in 365 @ r2 = step 366 @ r3 = shift 367 STMFD r13!,{r3} 368 LDR r5, =sincos_lookup0 @ r5 = T=sincos_lookup0 369 ADD r4, r1, r0, LSL #1 @ r4 = aX = in+(n>>1) 370 SUB r4, r4, #4*4 @ r4 = aX = in+(n>>1)-4 371 LDR r11,[r5,#4] @ r11= T[1] 372 LDR r10,[r5],r2, LSL #2 @ r10= T[0] T += step 373presymmetry_loop3: 374 LDR r8,[r1],#16 @ r8 = ro0 = bX[0] 375 LDR r9,[r1,#8-16] @ r9 = ro2 = bX[2] 376 LDR r6,[r4] @ r6 = ri0 = aX[0] 377 378 @ XNPROD31( ro2, ro0, T[1], T[0], 0xaX[0], &aX[2] ) 379 @ aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31 380 SMULL r14,r12,r8, r11 @ (r14,r12) = ro0*T[1] 381 RSB r8,r8,#0 @ r8 = -ro0 382 @ Stall ? 383 SMLAL r14,r12,r9, r10 @ (r14,r12) += ro2*T[0] 384 LDR r7,[r4,#8] @ r7 = ri2 = aX[2] 385 @ Stall ? 386 SMULL r14,r3, r9, r11 @ (r14,r3) = ro2*T[1] 387 MOV r12,r12,LSL #1 388 LDR r11,[r5,#4] @ r11= T[1] 389 SMLAL r14,r3, r8, r10 @ (r14,r3) -= ro0*T[0] 390 LDR r10,[r5],r2, LSL #2 @ r10= T[0] T += step 391 STR r12,[r4,#8] 392 MOV r3, r3, LSL #1 393 STR r3, [r4],#-16 394 395 @ XNPROD31( ri2, ri0, T[0], T[1], 0xbX[0], &bX[2] ) 396 @ bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31 397 SMULL r14,r12,r6, r10 @ (r14,r12) = ri0*T[0] 398 RSB r6,r6,#0 @ r6 = -ri0 399 @ stall ? 400 SMLAL r14,r12,r7, r11 @ (r14,r12) += ri2*T[1] 401 @ stall ? 402 @ stall ? 403 SMULL r14,r3, r7, r10 @ (r14,r3) = ri2*T[0] 404 MOV r12,r12,LSL #1 405 @ stall ? 406 SMLAL r14,r3, r6, r11 @ (r14,r3) -= ri0*T[1] 407 CMP r4,r1 408 STR r12,[r1,#8-16] 409 MOV r3, r3, LSL #1 410 STR r3, [r1,#-16] 411 412 BGE presymmetry_loop3 413 414 SUB r1,r1,r0 @ r1 = in -= n>>2 (i.e. restore in) 415 416 LDR r3,[r13] 417 STR r2,[r13,#-4]! 418 419 @ mdct_butterflies 420 @ r0 = n = (points * 2) 421 @ r1 = in = x 422 @ r2 = i 423 @ r3 = shift 424 STMFD r13!,{r0-r1} 425 RSBS r4,r3,#6 @ r4 = stages = 7-shift then --stages 426 LDR r5,=sincos_lookup0 427 BLE no_generics 428 MOV r14,#4 @ r14= 4 (i=0) 429 MOV r6, r14,LSL r3 @ r6 = (4<<i)<<shift 430mdct_butterflies_loop1: 431 MOV r0, r0, LSR #1 @ r0 = points>>i = POINTS 432 MOV r2, r14,LSR #2 @ r2 = (1<<i)-j (j=0) 433 STMFD r13!,{r4,r14} 434mdct_butterflies_loop2: 435 436 @ mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift)) 437 @ mdct_butterfly_generic(r1, r0, r6) 438 @ r0 = points 439 @ r1 = x 440 @ preserve r2 (external loop counter) 441 @ preserve r3 442 @ preserve r4 (external loop counter) 443 @ r5 = T = sincos_lookup0 444 @ r6 = step 445 @ preserve r14 446 447 STR r2,[r13,#-4]! @ stack r2 448 ADD r1,r1,r0,LSL #1 @ r1 = x2+4 = x + (POINTS>>1) 449 ADD r7,r1,r0,LSL #1 @ r7 = x1+4 = x + POINTS 450 ADD r12,r5,#1024*4 @ r12= sincos_lookup0+1024 451 452mdct_bufferfly_generic_loop1: 453 LDMDB r7!,{r2,r3,r8,r11} @ r2 = x1[0] 454 @ r3 = x1[1] 455 @ r8 = x1[2] 456 @ r11= x1[3] x1 -= 4 457 LDMDB r1!,{r4,r9,r10,r14} @ r4 = x2[0] 458 @ r9 = x2[1] 459 @ r10= x2[2] 460 @ r14= x2[3] x2 -= 4 461 462 SUB r2, r2, r3 @ r2 = s0 = x1[0] - x1[1] 463 ADD r3, r2, r3, LSL #1 @ r3 = x1[0] + x1[1] (-> x1[0]) 464 SUB r11,r11,r8 @ r11= s1 = x1[3] - x1[2] 465 ADD r8, r11,r8, LSL #1 @ r8 = x1[3] + x1[2] (-> x1[2]) 466 SUB r9, r9, r4 @ r9 = s2 = x2[1] - x2[0] 467 ADD r4, r9, r4, LSL #1 @ r4 = x2[1] + x2[0] (-> x1[1]) 468 SUB r14,r14,r10 @ r14= s3 = x2[3] - x2[2] 469 ADD r10,r14,r10,LSL #1 @ r10= x2[3] + x2[2] (-> x1[3]) 470 STMIA r7,{r3,r4,r8,r10} 471 472 @ r0 = points 473 @ r1 = x2 474 @ r2 = s0 475 @ r3 free 476 @ r4 free 477 @ r5 = T 478 @ r6 = step 479 @ r7 = x1 480 @ r8 free 481 @ r9 = s2 482 @ r10 free 483 @ r11= s1 484 @ r12= limit 485 @ r14= s3 486 487 LDR r8, [r5,#4] @ r8 = T[1] 488 LDR r10,[r5],r6,LSL #2 @ r10= T[0] T += step 489 490 @ XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2]) 491 @ x2[0] = (s1*T[0] + s0*T[1])>>31 x2[2] = (s0*T[0] - s1*T[1])>>31 492 @ stall Xscale 493 SMULL r4, r3, r2, r8 @ (r4, r3) = s0*T[1] 494 SMLAL r4, r3, r11,r10 @ (r4, r3) += s1*T[0] 495 RSB r11,r11,#0 496 SMULL r11,r4, r8, r11 @ (r11,r4) = -s1*T[1] 497 SMLAL r11,r4, r2, r10 @ (r11,r4) += s0*T[0] 498 MOV r2, r3, LSL #1 @ r2 = r3<<1 = Value for x2[0] 499 500 @ XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3]) 501 @ x2[1] = (s2*T[0] + s3*T[1])>>31 x2[3] = (s3*T[0] - s2*T[1])>>31 502 SMULL r11,r3, r9, r10 @ (r11,r3) = s2*T[0] 503 MOV r4, r4, LSL #1 @ r4 = r4<<1 = Value for x2[2] 504 SMLAL r11,r3, r14,r8 @ (r11,r3) += s3*T[1] 505 RSB r9, r9, #0 506 SMULL r10,r11,r14,r10 @ (r10,r11) = s3*T[0] 507 MOV r3, r3, LSL #1 @ r3 = r3<<1 = Value for x2[1] 508 SMLAL r10,r11,r9,r8 @ (r10,r11) -= s2*T[1] 509 CMP r5, r12 510 MOV r11,r11,LSL #1 @ r11= r11<<1 = Value for x2[3] 511 512 STMIA r1,{r2,r3,r4,r11} 513 514 BLT mdct_bufferfly_generic_loop1 515 516 SUB r12,r12,#1024*4 517mdct_bufferfly_generic_loop2: 518 LDMDB r7!,{r2,r3,r9,r10} @ r2 = x1[0] 519 @ r3 = x1[1] 520 @ r9 = x1[2] 521 @ r10= x1[3] x1 -= 4 522 LDMDB r1!,{r4,r8,r11,r14} @ r4 = x2[0] 523 @ r8 = x2[1] 524 @ r11= x2[2] 525 @ r14= x2[3] x2 -= 4 526 527 SUB r2, r2, r3 @ r2 = s0 = x1[0] - x1[1] 528 ADD r3, r2, r3, LSL #1 @ r3 = x1[0] + x1[1] (-> x1[0]) 529 SUB r9, r9,r10 @ r9 = s1 = x1[2] - x1[3] 530 ADD r10,r9,r10, LSL #1 @ r10= x1[2] + x1[3] (-> x1[2]) 531 SUB r4, r4, r8 @ r4 = s2 = x2[0] - x2[1] 532 ADD r8, r4, r8, LSL #1 @ r8 = x2[0] + x2[1] (-> x1[1]) 533 SUB r14,r14,r11 @ r14= s3 = x2[3] - x2[2] 534 ADD r11,r14,r11,LSL #1 @ r11= x2[3] + x2[2] (-> x1[3]) 535 STMIA r7,{r3,r8,r10,r11} 536 537 @ r0 = points 538 @ r1 = x2 539 @ r2 = s0 540 @ r3 free 541 @ r4 = s2 542 @ r5 = T 543 @ r6 = step 544 @ r7 = x1 545 @ r8 free 546 @ r9 = s1 547 @ r10 free 548 @ r11 free 549 @ r12= limit 550 @ r14= s3 551 552 LDR r8, [r5,#4] @ r8 = T[1] 553 LDR r10,[r5],-r6,LSL #2 @ r10= T[0] T -= step 554 555 @ XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2]) 556 @ x2[0] = (s0*T[0] - s1*T[1])>>31 x2[2] = (s1*T[0] + s0*T[1])>>31 557 @ stall Xscale 558 SMULL r3, r11,r2, r8 @ (r3, r11) = s0*T[1] 559 SMLAL r3, r11,r9, r10 @ (r3, r11) += s1*T[0] 560 RSB r9, r9, #0 561 SMULL r3, r2, r10,r2 @ (r3, r2) = s0*T[0] 562 SMLAL r3, r2, r9, r8 @ (r3, r2) += -s1*T[1] 563 MOV r9, r11,LSL #1 @ r9 = r11<<1 = Value for x2[2] 564 565 @ XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3]) 566 @ x2[1] = (s3*T[0] - s2*T[1])>>31 x2[3] = (s2*T[0] + s3*T[1])>>31 567 SMULL r3, r11,r4, r10 @ (r3,r11) = s2*T[0] 568 MOV r2, r2, LSL #1 @ r2 = r2<<1 = Value for x2[0] 569 SMLAL r3, r11,r14,r8 @ (r3,r11) += s3*T[1] 570 RSB r4, r4, #0 571 SMULL r10,r3,r14,r10 @ (r10,r3) = s3*T[0] 572 MOV r11,r11,LSL #1 @ r11= r11<<1 = Value for x2[3] 573 SMLAL r10,r3, r4, r8 @ (r10,r3) -= s2*T[1] 574 CMP r5, r12 575 MOV r3, r3, LSL #1 @ r3 = r3<<1 = Value for x2[1] 576 577 STMIA r1,{r2,r3,r9,r11} 578 579 BGT mdct_bufferfly_generic_loop2 580 581 LDR r2,[r13],#4 @ unstack r2 582 ADD r1, r1, r0, LSL #2 @ r1 = x+POINTS*j 583 @ stall Xscale 584 SUBS r2, r2, #1 @ r2-- (j++) 585 BGT mdct_butterflies_loop2 586 587 LDMFD r13!,{r4,r14} 588 589 LDR r1,[r13,#4] 590 591 SUBS r4, r4, #1 @ stages-- 592 MOV r14,r14,LSL #1 @ r14= 4<<i (i++) 593 MOV r6, r6, LSL #1 @ r6 = step <<= 1 (i++) 594 BGE mdct_butterflies_loop1 595 LDMFD r13,{r0-r1} 596no_generics: 597 @ mdct_butterflies part2 (loop around mdct_bufferfly_32) 598 @ r0 = points 599 @ r1 = in 600 @ r2 = step 601 @ r3 = shift 602 603mdct_bufferflies_loop3: 604 @ mdct_bufferfly_32 605 606 @ block1 607 ADD r4, r1, #16*4 @ r4 = &in[16] 608 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[16] 609 @ r6 = x[17] 610 @ r9 = x[18] 611 @ r10= x[19] 612 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[0] 613 @ r8 = x[1] 614 @ r11= x[2] 615 @ r12= x[3] 616 SUB r5, r5, r6 @ r5 = s0 = x[16] - x[17] 617 ADD r6, r5, r6, LSL #1 @ r6 = x[16] + x[17] -> x[16] 618 SUB r9, r9, r10 @ r9 = s1 = x[18] - x[19] 619 ADD r10,r9, r10,LSL #1 @ r10= x[18] + x[19] -> x[18] 620 SUB r8, r8, r7 @ r8 = s2 = x[ 1] - x[ 0] 621 ADD r7, r8, r7, LSL #1 @ r7 = x[ 1] + x[ 0] -> x[17] 622 SUB r12,r12,r11 @ r12= s3 = x[ 3] - x[ 2] 623 ADD r11,r12,r11, LSL #1 @ r11= x[ 3] + x[ 2] -> x[19] 624 STMIA r4!,{r6,r7,r10,r11} 625 626 LDR r6,cPI1_8 627 LDR r7,cPI3_8 628 629 @ XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] ) 630 @ x[0] = s0*cPI3_8 - s1*cPI1_8 x[2] = s1*cPI3_8 + s0*cPI1_8 631 @ stall Xscale 632 SMULL r14,r11,r5, r6 @ (r14,r11) = s0*cPI1_8 633 SMLAL r14,r11,r9, r7 @ (r14,r11) += s1*cPI3_8 634 RSB r9, r9, #0 635 SMULL r14,r5, r7, r5 @ (r14,r5) = s0*cPI3_8 636 SMLAL r14,r5, r9, r6 @ (r14,r5) -= s1*cPI1_8 637 MOV r11,r11,LSL #1 638 MOV r5, r5, LSL #1 639 640 @ XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] ) 641 @ x[1] = s2*cPI1_8 + s3*cPI3_8 x[3] = s3*cPI1_8 - s2*cPI3_8 642 SMULL r14,r9, r8, r6 @ (r14,r9) = s2*cPI1_8 643 SMLAL r14,r9, r12,r7 @ (r14,r9) += s3*cPI3_8 644 RSB r8,r8,#0 645 SMULL r14,r12,r6, r12 @ (r14,r12) = s3*cPI1_8 646 SMLAL r14,r12,r8, r7 @ (r14,r12) -= s2*cPI3_8 647 MOV r9, r9, LSL #1 648 MOV r12,r12,LSL #1 649 STMIA r1!,{r5,r9,r11,r12} 650 651 @ block2 652 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[20] 653 @ r6 = x[21] 654 @ r9 = x[22] 655 @ r10= x[23] 656 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[4] 657 @ r8 = x[5] 658 @ r11= x[6] 659 @ r12= x[7] 660 SUB r5, r5, r6 @ r5 = s0 = x[20] - x[21] 661 ADD r6, r5, r6, LSL #1 @ r6 = x[20] + x[21] -> x[20] 662 SUB r9, r9, r10 @ r9 = s1 = x[22] - x[23] 663 ADD r10,r9, r10,LSL #1 @ r10= x[22] + x[23] -> x[22] 664 SUB r8, r8, r7 @ r8 = s2 = x[ 5] - x[ 4] 665 ADD r7, r8, r7, LSL #1 @ r7 = x[ 5] + x[ 4] -> x[21] 666 SUB r12,r12,r11 @ r12= s3 = x[ 7] - x[ 6] 667 ADD r11,r12,r11, LSL #1 @ r11= x[ 7] + x[ 6] -> x[23] 668 LDR r14,cPI2_8 669 STMIA r4!,{r6,r7,r10,r11} 670 671 SUB r5, r5, r9 @ r5 = s0 - s1 672 ADD r9, r5, r9, LSL #1 @ r9 = s0 + s1 673 SMULL r6, r5, r14,r5 @ (r6,r5) = (s0-s1)*cPI2_8 674 SUB r12,r12,r8 @ r12= s3 - s2 675 ADD r8, r12,r8, LSL #1 @ r8 = s3 + s2 676 677 SMULL r6, r8, r14,r8 @ (r6,r8) = (s3+s2)*cPI2_8 678 MOV r5, r5, LSL #1 679 SMULL r6, r9, r14,r9 @ (r6,r9) = (s0+s1)*cPI2_8 680 MOV r8, r8, LSL #1 681 SMULL r6, r12,r14,r12 @ (r6,r12) = (s3-s2)*cPI2_8 682 MOV r9, r9, LSL #1 683 MOV r12,r12,LSL #1 684 STMIA r1!,{r5,r8,r9,r12} 685 686 @ block3 687 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[24] 688 @ r6 = x[25] 689 @ r9 = x[25] 690 @ r10= x[26] 691 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[8] 692 @ r8 = x[9] 693 @ r11= x[10] 694 @ r12= x[11] 695 SUB r5, r5, r6 @ r5 = s0 = x[24] - x[25] 696 ADD r6, r5, r6, LSL #1 @ r6 = x[24] + x[25] -> x[25] 697 SUB r9, r9, r10 @ r9 = s1 = x[26] - x[27] 698 ADD r10,r9, r10,LSL #1 @ r10= x[26] + x[27] -> x[26] 699 SUB r8, r8, r7 @ r8 = s2 = x[ 9] - x[ 8] 700 ADD r7, r8, r7, LSL #1 @ r7 = x[ 9] + x[ 8] -> x[25] 701 SUB r12,r12,r11 @ r12= s3 = x[11] - x[10] 702 ADD r11,r12,r11, LSL #1 @ r11= x[11] + x[10] -> x[27] 703 STMIA r4!,{r6,r7,r10,r11} 704 705 LDR r6,cPI3_8 706 LDR r7,cPI1_8 707 708 @ XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] ) 709 @ x[8] = s0*cPI1_8 - s1*cPI3_8 x[10] = s1*cPI1_8 + s0*cPI3_8 710 @ stall Xscale 711 SMULL r14,r11,r5, r6 @ (r14,r11) = s0*cPI3_8 712 SMLAL r14,r11,r9, r7 @ (r14,r11) += s1*cPI1_8 713 RSB r9, r9, #0 714 SMULL r14,r5, r7, r5 @ (r14,r5) = s0*cPI1_8 715 SMLAL r14,r5, r9, r6 @ (r14,r5) -= s1*cPI3_8 716 MOV r11,r11,LSL #1 717 MOV r5, r5, LSL #1 718 719 @ XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] ) 720 @ x[9] = s2*cPI3_8 + s3*cPI1_8 x[11] = s3*cPI3_8 - s2*cPI1_8 721 SMULL r14,r9, r8, r6 @ (r14,r9) = s2*cPI3_8 722 SMLAL r14,r9, r12,r7 @ (r14,r9) += s3*cPI1_8 723 RSB r8,r8,#0 724 SMULL r14,r12,r6, r12 @ (r14,r12) = s3*cPI3_8 725 SMLAL r14,r12,r8, r7 @ (r14,r12) -= s2*cPI1_8 726 MOV r9, r9, LSL #1 727 MOV r12,r12,LSL #1 728 STMIA r1!,{r5,r9,r11,r12} 729 730 @ block4 731 LDMIA r4,{r5,r6,r10,r11} @ r5 = x[28] 732 @ r6 = x[29] 733 @ r10= x[30] 734 @ r11= x[31] 735 LDMIA r1,{r8,r9,r12,r14} @ r8 = x[12] 736 @ r9 = x[13] 737 @ r12= x[14] 738 @ r14= x[15] 739 SUB r5, r5, r6 @ r5 = s0 = x[28] - x[29] 740 ADD r6, r5, r6, LSL #1 @ r6 = x[28] + x[29] -> x[28] 741 SUB r7, r14,r12 @ r7 = s3 = x[15] - x[14] 742 ADD r12,r7, r12, LSL #1 @ r12= x[15] + x[14] -> x[31] 743 SUB r10,r10,r11 @ r10= s1 = x[30] - x[31] 744 ADD r11,r10,r11,LSL #1 @ r11= x[30] + x[31] -> x[30] 745 SUB r14, r8, r9 @ r14= s2 = x[12] - x[13] 746 ADD r9, r14, r9, LSL #1 @ r9 = x[12] + x[13] -> x[29] 747 STMIA r4!,{r6,r9,r11,r12} 748 STMIA r1!,{r5,r7,r10,r14} 749 750 @ mdct_butterfly16 (1st version) 751 @ block 1 752 SUB r1,r1,#16*4 753 ADD r4,r1,#8*4 754 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[ 8] 755 @ r6 = x[ 9] 756 @ r9 = x[10] 757 @ r10= x[11] 758 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[0] 759 @ r8 = x[1] 760 @ r11= x[2] 761 @ r12= x[3] 762 SUB r5, r5, r6 @ r5 = s0 = x[ 8] - x[ 9] 763 ADD r6, r5, r6, LSL #1 @ r6 = x[ 8] + x[ 9] -> x[ 8] 764 SUB r9, r9, r10 @ r9 = s1 = x[10] - x[11] 765 ADD r10,r9, r10,LSL #1 @ r10= x[10] + x[11] -> x[10] 766 SUB r8, r8, r7 @ r8 = s2 = x[ 1] - x[ 0] 767 ADD r7, r8, r7, LSL #1 @ r7 = x[ 1] + x[ 0] -> x[ 9] 768 SUB r12,r12,r11 @ r12= s3 = x[ 3] - x[ 2] 769 ADD r11,r12,r11, LSL #1 @ r11= x[ 3] + x[ 2] -> x[11] 770 LDR r14,cPI2_8 771 STMIA r4!,{r6,r7,r10,r11} 772 773 SUB r5, r5, r9 @ r5 = s0 - s1 774 ADD r9, r5, r9, LSL #1 @ r9 = s0 + s1 775 SMULL r6, r5, r14,r5 @ (r6,r5) = (s0-s1)*cPI2_8 776 SUB r12,r12,r8 @ r12= s3 - s2 777 ADD r8, r12,r8, LSL #1 @ r8 = s3 + s2 778 779 SMULL r6, r8, r14,r8 @ (r6,r8) = (s3+s2)*cPI2_8 780 MOV r5, r5, LSL #1 781 SMULL r6, r9, r14,r9 @ (r6,r9) = (s0+s1)*cPI2_8 782 MOV r8, r8, LSL #1 783 SMULL r6, r12,r14,r12 @ (r6,r12) = (s3-s2)*cPI2_8 784 MOV r9, r9, LSL #1 785 MOV r12,r12,LSL #1 786 STMIA r1!,{r5,r8,r9,r12} 787 788 @ block4 789 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[12] 790 @ r6 = x[13] 791 @ r9 = x[14] 792 @ r10= x[15] 793 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[ 4] 794 @ r8 = x[ 5] 795 @ r11= x[ 6] 796 @ r12= x[ 7] 797 SUB r14,r7, r8 @ r14= s0 = x[ 4] - x[ 5] 798 ADD r8, r14,r8, LSL #1 @ r8 = x[ 4] + x[ 5] -> x[13] 799 SUB r7, r12,r11 @ r7 = s1 = x[ 7] - x[ 6] 800 ADD r11,r7, r11, LSL #1 @ r11= x[ 7] + x[ 6] -> x[15] 801 SUB r5, r5, r6 @ r5 = s2 = x[12] - x[13] 802 ADD r6, r5, r6, LSL #1 @ r6 = x[12] + x[13] -> x[12] 803 SUB r12,r9, r10 @ r12= s3 = x[14] - x[15] 804 ADD r10,r12,r10,LSL #1 @ r10= x[14] + x[15] -> x[14] 805 STMIA r4!,{r6,r8,r10,r11} 806 STMIA r1!,{r5,r7,r12,r14} 807 808 @ mdct_butterfly_8 809 LDMDB r1,{r6,r7,r8,r9,r10,r11,r12,r14} 810 @ r6 = x[0] 811 @ r7 = x[1] 812 @ r8 = x[2] 813 @ r9 = x[3] 814 @ r10= x[4] 815 @ r11= x[5] 816 @ r12= x[6] 817 @ r14= x[7] 818 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1] 819 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1] 820 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3] 821 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3] 822 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5] 823 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5] 824 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7] 825 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7] 826 827 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3 828 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3 829 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1 830 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1 831 SUB r10,r10,r6 @ r10= x[4] = s4 - s0 832 SUB r11,r12,r8 @ r11= x[5] = s6 - s2 833 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0 834 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2 835 STMDB r1,{r2,r3,r4,r5,r10,r11,r12,r14} 836 837 @ mdct_butterfly_8 838 LDMIA r1,{r6,r7,r8,r9,r10,r11,r12,r14} 839 @ r6 = x[0] 840 @ r7 = x[1] 841 @ r8 = x[2] 842 @ r9 = x[3] 843 @ r10= x[4] 844 @ r11= x[5] 845 @ r12= x[6] 846 @ r14= x[7] 847 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1] 848 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1] 849 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3] 850 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3] 851 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5] 852 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5] 853 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7] 854 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7] 855 856 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3 857 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3 858 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1 859 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1 860 SUB r10,r10,r6 @ r10= x[4] = s4 - s0 861 SUB r11,r12,r8 @ r11= x[5] = s6 - s2 862 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0 863 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2 864 STMIA r1,{r2,r3,r4,r5,r10,r11,r12,r14} 865 866 @ block 2 867 ADD r1,r1,#16*4-8*4 868 ADD r4,r1,#8*4 869 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[ 8] 870 @ r6 = x[ 9] 871 @ r9 = x[10] 872 @ r10= x[11] 873 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[0] 874 @ r8 = x[1] 875 @ r11= x[2] 876 @ r12= x[3] 877 SUB r5, r5, r6 @ r5 = s0 = x[ 8] - x[ 9] 878 ADD r6, r5, r6, LSL #1 @ r6 = x[ 8] + x[ 9] -> x[ 8] 879 SUB r9, r9, r10 @ r9 = s1 = x[10] - x[11] 880 ADD r10,r9, r10,LSL #1 @ r10= x[10] + x[11] -> x[10] 881 SUB r8, r8, r7 @ r8 = s2 = x[ 1] - x[ 0] 882 ADD r7, r8, r7, LSL #1 @ r7 = x[ 1] + x[ 0] -> x[ 9] 883 SUB r12,r12,r11 @ r12= s3 = x[ 3] - x[ 2] 884 ADD r11,r12,r11, LSL #1 @ r11= x[ 3] + x[ 2] -> x[11] 885 LDR r14,cPI2_8 886 STMIA r4!,{r6,r7,r10,r11} 887 888 SUB r5, r5, r9 @ r5 = s0 - s1 889 ADD r9, r5, r9, LSL #1 @ r9 = s0 + s1 890 SMULL r6, r5, r14,r5 @ (r6,r5) = (s0-s1)*cPI2_8 891 SUB r12,r12,r8 @ r12= s3 - s2 892 ADD r8, r12,r8, LSL #1 @ r8 = s3 + s2 893 894 SMULL r6, r8, r14,r8 @ (r6,r8) = (s3+s2)*cPI2_8 895 MOV r5, r5, LSL #1 896 SMULL r6, r9, r14,r9 @ (r6,r9) = (s0+s1)*cPI2_8 897 MOV r8, r8, LSL #1 898 SMULL r6, r12,r14,r12 @ (r6,r12) = (s3-s2)*cPI2_8 899 MOV r9, r9, LSL #1 900 MOV r12,r12,LSL #1 901 STMIA r1!,{r5,r8,r9,r12} 902 903 @ block4 904 LDMIA r4,{r5,r6,r9,r10} @ r5 = x[12] 905 @ r6 = x[13] 906 @ r9 = x[14] 907 @ r10= x[15] 908 LDMIA r1,{r7,r8,r11,r12} @ r7 = x[ 4] 909 @ r8 = x[ 5] 910 @ r11= x[ 6] 911 @ r12= x[ 7] 912 SUB r5, r5, r6 @ r5 = s2 = x[12] - x[13] 913 ADD r6, r5, r6, LSL #1 @ r6 = x[12] + x[13] -> x[12] 914 SUB r9, r9, r10 @ r9 = s3 = x[14] - x[15] 915 ADD r10,r9, r10,LSL #1 @ r10= x[14] + x[15] -> x[14] 916 SUB r14,r7, r8 @ r14= s0 = x[ 4] - x[ 5] 917 ADD r8, r14,r8, LSL #1 @ r8 = x[ 4] + x[ 5] -> x[13] 918 SUB r7, r12,r11 @ r7 = s1 = x[ 7] - x[ 6] 919 ADD r11,r7, r11, LSL #1 @ r11= x[ 7] + x[ 6] -> x[15] 920 STMIA r4!,{r6,r8,r10,r11} 921 STMIA r1!,{r5,r7,r9,r14} 922 923 @ mdct_butterfly_8 924 LDMDB r1,{r6,r7,r8,r9,r10,r11,r12,r14} 925 @ r6 = x[0] 926 @ r7 = x[1] 927 @ r8 = x[2] 928 @ r9 = x[3] 929 @ r10= x[4] 930 @ r11= x[5] 931 @ r12= x[6] 932 @ r14= x[7] 933 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1] 934 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1] 935 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3] 936 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3] 937 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5] 938 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5] 939 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7] 940 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7] 941 942 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3 943 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3 944 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1 945 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1 946 SUB r10,r10,r6 @ r10= x[4] = s4 - s0 947 SUB r11,r12,r8 @ r11= x[5] = s6 - s2 948 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0 949 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2 950 STMDB r1,{r2,r3,r4,r5,r10,r11,r12,r14} 951 952 @ mdct_butterfly_8 953 LDMIA r1,{r6,r7,r8,r9,r10,r11,r12,r14} 954 @ r6 = x[0] 955 @ r7 = x[1] 956 @ r8 = x[2] 957 @ r9 = x[3] 958 @ r10= x[4] 959 @ r11= x[5] 960 @ r12= x[6] 961 @ r14= x[7] 962 ADD r6, r6, r7 @ r6 = s0 = x[0] + x[1] 963 SUB r7, r6, r7, LSL #1 @ r7 = s1 = x[0] - x[1] 964 ADD r8, r8, r9 @ r8 = s2 = x[2] + x[3] 965 SUB r9, r8, r9, LSL #1 @ r9 = s3 = x[2] - x[3] 966 ADD r10,r10,r11 @ r10= s4 = x[4] + x[5] 967 SUB r11,r10,r11,LSL #1 @ r11= s5 = x[4] - x[5] 968 ADD r12,r12,r14 @ r12= s6 = x[6] + x[7] 969 SUB r14,r12,r14,LSL #1 @ r14= s7 = x[6] - x[7] 970 971 ADD r2, r11,r9 @ r2 = x[0] = s5 + s3 972 SUB r4, r2, r9, LSL #1 @ r4 = x[2] = s5 - s3 973 SUB r3, r14,r7 @ r3 = x[1] = s7 - s1 974 ADD r5, r3, r7, LSL #1 @ r5 = x[3] = s7 + s1 975 SUB r10,r10,r6 @ r10= x[4] = s4 - s0 976 SUB r11,r12,r8 @ r11= x[5] = s6 - s2 977 ADD r12,r10,r6, LSL #1 @ r12= x[6] = s4 + s0 978 ADD r14,r11,r8, LSL #1 @ r14= x[7] = s6 + s2 979 STMIA r1,{r2,r3,r4,r5,r10,r11,r12,r14} 980 981 ADD r1,r1,#8*4 982 SUBS r0,r0,#64 983 BGT mdct_bufferflies_loop3 984 985 LDMFD r13,{r0-r3} 986 987mdct_bitreverseARM: 988 @ r0 = points = n 989 @ r1 = in 990 @ r2 = step 991 @ r3 = shift 992 993 MOV r4, #0 @ r4 = bit = 0 994 ADD r5, r1, r0, LSL #1 @ r5 = w = x + (n>>1) 995 ADR r6, bitrev 996 SUB r5, r5, #8 997brev_lp: 998 LDRB r7, [r6, r4, LSR #6] 999 AND r8, r4, #0x3f 1000 LDRB r8, [r6, r8] 1001 ADD r4, r4, #1 @ bit++ 1002 @ stall XScale 1003 ORR r7, r7, r8, LSL #6 @ r7 = bitrev[bit] 1004 MOV r7, r7, LSR r3 1005 ADD r9, r1, r7, LSL #2 @ r9 = xx = x + (b>>shift) 1006 CMP r5, r9 @ if (w > xx) 1007 LDR r10,[r5],#-8 @ r10 = w[0] w -= 2 1008 LDRGT r11,[r5,#12] @ r11 = w[1] 1009 LDRGT r12,[r9] @ r12 = xx[0] 1010 LDRGT r14,[r9,#4] @ r14 = xx[1] 1011 STRGT r10,[r9] @ xx[0]= w[0] 1012 STRGT r11,[r9,#4] @ xx[1]= w[1] 1013 STRGT r12,[r5,#8] @ w[0] = xx[0] 1014 STRGT r14,[r5,#12] @ w[1] = xx[1] 1015 CMP r5,r1 1016 BGT brev_lp 1017 1018 @ mdct_step7 1019 @ r0 = points 1020 @ r1 = in 1021 @ r2 = step 1022 @ r3 = shift 1023 1024 CMP r2, #4 @ r5 = T = (step>=4) ? 1025 LDRGE r5, =sincos_lookup0 @ sincos_lookup0 + 1026 LDRLT r5, =sincos_lookup1 @ sincos_lookup0 + 1027 ADD r7, r1, r0, LSL #1 @ r7 = w1 = x + (n>>1) 1028 ADDGE r5, r5, r2, LSL #1 @ (step>>1) 1029 ADD r8, r5, #1024*4 @ r8 = Ttop 1030step7_loop1: 1031 LDR r6, [r1] @ r6 = w0[0] 1032 LDR r9, [r1,#4] @ r9 = w0[1] 1033 LDR r10,[r7,#-8]! @ r10= w1[0] w1 -= 2 1034 LDR r11,[r7,#4] @ r11= w1[1] 1035 LDR r14,[r5,#4] @ r14= T[1] 1036 LDR r12,[r5],r2,LSL #2 @ r12= T[0] T += step 1037 1038 ADD r6, r6, r10 @ r6 = s0 = w0[0] + w1[0] 1039 SUB r10,r6, r10,LSL #1 @ r10= s1b= w0[0] - w1[0] 1040 SUB r11,r11,r9 @ r11= s1 = w1[1] - w0[1] 1041 ADD r9, r11,r9, LSL #1 @ r9 = s0b= w1[1] + w0[1] 1042 1043 @ Can save 1 cycle by using SMULL SMLAL - at the cost of being 1044 @ 1 off. 1045 SMULL r0, r3, r6, r14 @ (r0,r3) = s0*T[1] 1046 SMULL r0, r4, r11,r12 @ (r0,r4) += s1*T[0] = s2 1047 ADD r3, r3, r4 1048 SMULL r0, r14,r11,r14 @ (r0,r14) = s1*T[1] 1049 SMULL r0, r12,r6, r12 @ (r0,r12) += s0*T[0] = s3 1050 SUB r14,r14,r12 1051 1052 @ r9 = s0b<<1 1053 @ r10= s1b<<1 1054 ADD r9, r3, r9, ASR #1 @ r9 = s0b + s2 1055 SUB r3, r9, r3, LSL #1 @ r3 = s0b - s2 1056 1057 SUB r12,r14,r10,ASR #1 @ r12= s3 - s1b 1058 ADD r10,r14,r10,ASR #1 @ r10= s3 + s1b 1059 STR r9, [r1],#4 1060 STR r10,[r1],#4 @ w0 += 2 1061 STR r3, [r7] 1062 STR r12,[r7,#4] 1063 1064 CMP r5,r8 1065 BLT step7_loop1 1066 1067step7_loop2: 1068 LDR r6, [r1] @ r6 = w0[0] 1069 LDR r9, [r1,#4] @ r9 = w0[1] 1070 LDR r10,[r7,#-8]! @ r10= w1[0] w1 -= 2 1071 LDR r11,[r7,#4] @ r11= w1[1] 1072 LDR r14,[r5,-r2,LSL #2]! @ r12= T[1] T -= step 1073 LDR r12,[r5,#4] @ r14= T[0] 1074 1075 ADD r6, r6, r10 @ r6 = s0 = w0[0] + w1[0] 1076 SUB r10,r6, r10,LSL #1 @ r10= s1b= w0[0] - w1[0] 1077 SUB r11,r11,r9 @ r11= s1 = w1[1] - w0[1] 1078 ADD r9, r11,r9, LSL #1 @ r9 = s0b= w1[1] + w0[1] 1079 1080 @ Can save 1 cycle by using SMULL SMLAL - at the cost of being 1081 @ 1 off. 1082 SMULL r0, r3, r6, r14 @ (r0,r3) = s0*T[0] 1083 SMULL r0, r4, r11,r12 @ (r0,r4) += s1*T[1] = s2 1084 ADD r3, r3, r4 1085 SMULL r0, r14,r11,r14 @ (r0,r14) = s1*T[0] 1086 SMULL r0, r12,r6, r12 @ (r0,r12) += s0*T[1] = s3 1087 SUB r14,r14,r12 1088 1089 @ r9 = s0b<<1 1090 @ r10= s1b<<1 1091 ADD r9, r3, r9, ASR #1 @ r9 = s0b + s2 1092 SUB r3, r9, r3, LSL #1 @ r3 = s0b - s2 1093 1094 SUB r12,r14,r10,ASR #1 @ r12= s3 - s1b 1095 ADD r10,r14,r10,ASR #1 @ r10= s3 + s1b 1096 STR r9, [r1],#4 1097 STR r10,[r1],#4 @ w0 += 2 1098 STR r3, [r7] 1099 STR r12,[r7,#4] 1100 1101 CMP r1,r7 1102 BLT step7_loop2 1103 1104 LDMFD r13!,{r0-r3} 1105 1106 @ r0 = points 1107 @ r1 = in 1108 @ r2 = step 1109 @ r3 = shift 1110 MOV r2, r2, ASR #2 @ r2 = step >>= 2 1111 CMP r2, #0 1112 CMPNE r2, #1 1113 BEQ mdct_end 1114 1115 @ step > 1 (default case) 1116 CMP r2, #4 @ r5 = T = (step>=4) ? 1117 LDRGE r5, =sincos_lookup0 @ sincos_lookup0 + 1118 LDRLT r5, =sincos_lookup1 @ sincos_lookup1 1119 ADD r7, r1, r0, LSL #1 @ r7 = iX = x + (n>>1) 1120 ADDGE r5, r5, r2, LSL #1 @ (step>>1) 1121mdct_step8_default: 1122 LDR r6, [r1],#4 @ r6 = s0 = x[0] 1123 LDR r8, [r1],#4 @ r8 = -s1 = x[1] 1124 LDR r12,[r5,#4] @ r12= T[1] 1125 LDR r14,[r5],r2,LSL #2 @ r14= T[0] T += step 1126 RSB r8, r8, #0 @ r8 = s1 1127 1128 @ XPROD31(s0, s1, T[0], T[1], x, x+1) 1129 @ x[0] = s0 * T[0] + s1 * T[1] x[1] = s1 * T[0] - s0 * T[1] 1130 SMULL r9, r10, r8, r12 @ (r9,r10) = s1 * T[1] 1131 CMP r1, r7 1132 SMLAL r9, r10, r6, r14 @ (r9,r10) += s0 * T[0] 1133 RSB r6, r6, #0 @ r6 = -s0 1134 SMULL r9, r11, r8, r14 @ (r9,r11) = s1 * T[0] 1135 MOV r10,r10,LSL #1 1136 SMLAL r9, r11, r6, r12 @ (r9,r11) -= s0 * T[1] 1137 STR r10,[r1,#-8] 1138 MOV r11,r11,LSL #1 1139 STR r11,[r1,#-4] 1140 BLT mdct_step8_default 1141 1142mdct_end: 1143 MOV r0, r2 1144 LDMFD r13!,{r4-r11,PC} 1145 1146cPI1_8: 1147 .word 0x7641af3d 1148cPI2_8: 1149 .word 0x5a82799a 1150cPI3_8: 1151 .word 0x30fbc54d 1152bitrev: 1153 .byte 0 1154 .byte 32 1155 .byte 16 1156 .byte 48 1157 .byte 8 1158 .byte 40 1159 .byte 24 1160 .byte 56 1161 .byte 4 1162 .byte 36 1163 .byte 20 1164 .byte 52 1165 .byte 12 1166 .byte 44 1167 .byte 28 1168 .byte 60 1169 .byte 2 1170 .byte 34 1171 .byte 18 1172 .byte 50 1173 .byte 10 1174 .byte 42 1175 .byte 26 1176 .byte 58 1177 .byte 6 1178 .byte 38 1179 .byte 22 1180 .byte 54 1181 .byte 14 1182 .byte 46 1183 .byte 30 1184 .byte 62 1185 .byte 1 1186 .byte 33 1187 .byte 17 1188 .byte 49 1189 .byte 9 1190 .byte 41 1191 .byte 25 1192 .byte 57 1193 .byte 5 1194 .byte 37 1195 .byte 21 1196 .byte 53 1197 .byte 13 1198 .byte 45 1199 .byte 29 1200 .byte 61 1201 .byte 3 1202 .byte 35 1203 .byte 19 1204 .byte 51 1205 .byte 11 1206 .byte 43 1207 .byte 27 1208 .byte 59 1209 .byte 7 1210 .byte 39 1211 .byte 23 1212 .byte 55 1213 .byte 15 1214 .byte 47 1215 .byte 31 1216 .byte 63 1217 1218 @ END 1219