• Home
  • Raw
  • Download

Lines Matching refs:SVMUL

121       cr2 = VADD(cc[i], SVMUL(taur,tr2));  in passf3_ps()
124 ci2 = VADD(cc[i +1], SVMUL(taur,ti2)); in passf3_ps()
126 cr3 = SVMUL(taui, VSUB(cc[i+ido], cc[i+2*ido])); in passf3_ps()
127 ci3 = SVMUL(taui, VSUB(cc[i+ido+1], cc[i+2*ido+1])); in passf3_ps()
245 cr2 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr11, tr2),SVMUL(tr12, tr3))); in passf5_ps()
246 ci2 = VADD(cc_ref(i , 1), VADD(SVMUL(tr11, ti2),SVMUL(tr12, ti3))); in passf5_ps()
247 cr3 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr12, tr2),SVMUL(tr11, tr3))); in passf5_ps()
248 ci3 = VADD(cc_ref(i , 1), VADD(SVMUL(tr12, ti2),SVMUL(tr11, ti3))); in passf5_ps()
249 cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4)); in passf5_ps()
250 ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); in passf5_ps()
251 cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4)); in passf5_ps()
252 ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); in passf5_ps()
305 ch[2*k + ido] = SVMUL(minus_one, cc[ido-1 + k + l1ido]); in radf2_ps()
340 ch[k + ido-1 + l1ido] = SVMUL(minus_two, b); in radb2_ps()
353 ch[(3*k+2)*ido] = SVMUL(taui, VSUB(cc[(k + l1*2)*ido], cc[(k + l1)*ido])); in radf3_ps()
354 ch[ido-1 + (3*k + 1)*ido] = VADD(cc[k*ido], SVMUL(taur, cr2)); in radf3_ps()
372 tr2 = VADD(cc[i - 1 + k*ido], SVMUL(taur, cr2)); in radf3_ps()
373 ti2 = VADD(cc[i + k*ido], SVMUL(taur, ci2)); in radf3_ps()
374 tr3 = SVMUL(taui, VSUB(di2, di3)); in radf3_ps()
375 ti3 = SVMUL(taui, VSUB(dr3, dr2)); in radf3_ps()
397 ci3 = SVMUL(taui_2, cc[(3*k + 2)*ido]); in radb3_ps()
411 cr3 = SVMUL(taui, VSUB(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido])); in radb3_ps()
412 ci3 = SVMUL(taui, VADD(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido])); in radb3_ps()
501 v4sf ti1 = SVMUL(minus_hsqt2, VADD(a, b)); in radf4_ps()
502 v4sf tr1 = SVMUL(minus_hsqt2, VSUB(b, a)); in radf4_ps()
524 tr3 = SVMUL(two,d); in radb4_ps()
527 tr4 = SVMUL(two,c); in radb4_ps()
585 ch[ido-1 + k + 1*l1ido] = SVMUL(minus_sqrt2, VSUB(ti1, tr1)); in radb4_ps()
587 ch[ido-1 + k + 3*l1ido] = SVMUL(minus_sqrt2, VADD(ti1, tr1)); in radb4_ps()
625 ch_ref(ido, 2, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3))); in radf5_ps()
626 ch_ref(1, 3, k) = VADD(SVMUL(ti11, ci5), SVMUL(ti12, ci4)); in radf5_ps()
627 ch_ref(ido, 4, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3))); in radf5_ps()
628 ch_ref(1, 5, k) = VSUB(SVMUL(ti12, ci5), SVMUL(ti11, ci4)); in radf5_ps()
656 tr2 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3))); in radf5_ps()
657 ti2 = VSUB(cc_ref(i, k, 1), VADD(SVMUL(tr11, ci2), SVMUL(tr12, ci3))); in radf5_ps()
658 tr3 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3))); in radf5_ps()
659 ti3 = VSUB(cc_ref(i, k, 1), VADD(SVMUL(tr12, ci2), SVMUL(tr11, ci3))); in radf5_ps()
660 tr5 = VADD(SVMUL(ti11, cr5), SVMUL(ti12, cr4)); in radf5_ps()
661 ti5 = VADD(SVMUL(ti11, ci5), SVMUL(ti12, ci4)); in radf5_ps()
662 tr4 = VSUB(SVMUL(ti12, cr5), SVMUL(ti11, cr4)); in radf5_ps()
663 ti4 = VSUB(SVMUL(ti12, ci5), SVMUL(ti11, ci4)); in radf5_ps()
710 cr2 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3))); in radb5_ps()
711 cr3 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr12, tr2), SVMUL(tr11, tr3))); in radb5_ps()
712 ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); in radb5_ps()
713 ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); in radb5_ps()
736 cr2 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3))); in radb5_ps()
737 ci2 = VADD(cc_ref(i , 1, k), VADD(SVMUL(tr11, ti2), SVMUL(tr12, ti3))); in radb5_ps()
738 cr3 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr12, tr2), SVMUL(tr11, tr3))); in radb5_ps()
739 ci3 = VADD(cc_ref(i , 1, k), VADD(SVMUL(tr12, ti2), SVMUL(tr11, ti3))); in radb5_ps()
740 cr5 = VADD(SVMUL(ti11, tr5), SVMUL(ti12, tr4)); in radb5_ps()
741 ci5 = VADD(SVMUL(ti11, ti5), SVMUL(ti12, ti4)); in radb5_ps()
742 cr4 = VSUB(SVMUL(ti12, tr5), SVMUL(ti11, tr4)); in radb5_ps()
743 ci4 = VSUB(SVMUL(ti12, ti5), SVMUL(ti11, ti4)); in radb5_ps()