Lines Matching refs:x1r
46 LDRD r8, [r1] @r8=x1r, r9=x1i
58 ADD r8, r8, r10 @x1r = x1r + x3r@
60 SUB r1, r8, r10, lsl#1 @x3r = x1r - (x3r << 1)@
63 ADD r4, r4, r8 @x0r = x0r + x1r@
65 SUB r8, r4, r8, lsl#1 @x1r = x0r - (x1r << 1)@
97 LDRD r6, [r12] @r6=x1r, r7=x1i
116 ADD r6, r6, r10 @x1r = x1r + x3r@
118 SUB r2, r6, r10, lsl #1 @x3r = x1r - (x3r << 1)@
121 ADD r4, r4, r6 @x0r = x0r + x1r@
125 SUB r6, r4, r6, lsl #1 @x1r = x0r - (x1r << 1)@
134 STRD r6, [r12] @r6=x1r, r7=x1i
179 LDRD r6, [r12, r0]! @r6=x1r, r7=x1i
187 SMULL r3, r4, r6, r2 @ixheaacd_mult32(x1r,w1l)
190 SMULL r3, r6, r6, r1 @mult32x16hin32(x1r,W1h)
196 SMULL r3, r7, r7, r2 @ixheaacd_mac32(ixheaacd_mult32(x1r,w1h) ,x1i,w1l)
214 SMULL r3, r9, r9, r2 @ixheaacd_mac32(ixheacd_mult32(x1r,w1h) ,x1i,w1l)
248 ADD r6, r6, r10 @x1r = x1r + x3r@
250 SUB r10, r6, r10, lsl#1 @x3r = x1r - (x3r << 1)@
253 ADD r4, r4, r6 @x0r = x0r + x1r@
255 SUB r6, r4, r6, lsl#1 @x1r = x0r - (x1r << 1)@
267 STRD r6, [r12] @r6=x1r, r7=x1i
308 LDRD r6, [r12, r0]! @r6=x1r, r7=x1i
315 SMULL r3, r4, r6, r2 @ixheaacd_mult32(x1r,w1l)
318 SMULL r3, r6, r6, r1 @mult32x16hin32(x1r,W1h)
324 SMULL r3, r7, r7, r2 @ixheaacd_mac32(ixheaacd_mult32(x1r,w1h) ,x1i,w1l)
342 SMULL r3, r9, r9, r2 @ixheaacd_mac32(ixheacd_mult32(x1r,w1h) ,x1i,w1l)
376 ADD r6, r6, r10 @x1r = x1r + x3r@
378 SUB r10, r6, r10, lsl#1 @x3r = x1r - (x3r << 1)@
381 ADD r4, r4, r6 @x0r = x0r + x1r@
383 SUB r6, r4, r6, lsl#1 @x1r = x0r - (x1r << 1)@
395 STRD r6, [r12] @r6=x1r, r7=x1i
439 LDRD r6, [r12, r0]! @r6=x1r, r7=x1i
447 SMULL r3, r4, r6, r2 @ixheaacd_mult32(x1r,w1l)
450 SMULL r3, r6, r6, r1 @mult32x16hin32(x1r,W1h)
456 SMULL r3, r7, r7, r2 @ixheaacd_mac32(ixheaacd_mult32(x1r,w1h) ,x1i,w1l)
474 SMULL r3, r9, r9, r2 @ixheaacd_mac32(ixheacd_mult32(x1r,w1h) ,x1i,w1l)
508 ADD r6, r6, r10 @x1r = x1r + x3r@
510 SUB r10, r6, r10, lsl#1 @x3r = x1r - (x3r << 1)@
513 ADD r4, r4, r6 @x0r = x0r + x1r@
515 SUB r6, r4, r6, lsl#1 @x1r = x0r - (x1r << 1)@
527 STRD r6, [r12] @r6=x1r, r7=x1i
569 LDRD r6, [r12, r0]! @r6=x1r, r7=x1i
577 SMULL r3, r4, r6, r2 @ixheaacd_mult32(x1r,w1l)
580 SMULL r3, r6, r6, r1 @mult32x16hin32(x1r,W1h)
586 SMULL r3, r7, r7, r2 @ixheaacd_mac32(ixheaacd_mult32(x1r,w1h) ,x1i,w1l)
604 SMULL r3, r9, r9, r2 @ixheaacd_mac32(ixheacd_mult32(x1r,w1h) ,x1i,w1l)
638 ADD r6, r6, r10 @x1r = x1r + x3r@
640 SUB r10, r6, r10, lsl#1 @x3r = x1r - (x3r << 1)@
643 ADD r4, r4, r6 @x0r = x0r + x1r@
645 SUB r6, r4, r6, lsl#1 @x1r = x0r - (x1r << 1)@
657 STRD r6, [r12] @r6=x1r, r7=x1i
707 LDRD r8, [r5] @r8 = x1r
713 SMULL r1, r11, r8, r2 @mult32x16hin32(x1r,W1h)
726 SMULL r1, r8, r8, r2 @ixheaacd_mult32(x1r,w1l)
729 SMULL r1, r9, r9, r2 @ixheaacd_mac32(ixheacd_mult32(x1r,w1h) ,x1i,w1l)
737 ADD r10, r8, r6 @(x0r/2) + (x1r/2)
741 SUB r8, r6, r8 @(x0r/2) - (x1r/2)
759 LDRD r8, [r5] @r8 = x1r
766 SMULL r1, r11, r8, r2 @mult32x16hin32(x1r,W1h)
779 SMULL r1, r8, r8, r2 @ixheaacd_mult32(x1r,w1l)
782 SMULL r1, r9, r9, r2 @ixheaacd_mac32(ixheacd_mult32(x1r,w1h) ,x1i,w1l)
790 ADD r10, r8, r6 @(x0r>>1) + (x1r)
794 SUB r8, r6, r8 @(x0r>>1) - (x1r)