• Home
  • Raw
  • Download

Lines Matching refs:cr2

114   v4sf tr2, ti2, cr2, ci2, cr3, ci3, dr2, di2, dr3, di3;  in passf3_ps()  local
121 cr2 = VADD(cc[i], SVMUL(taur,tr2)); in passf3_ps()
128 dr2 = VSUB(cr2, ci3); in passf3_ps()
129 dr3 = VADD(cr2, ci3); in passf3_ps()
148 v4sf ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; in passf4_ps() local
188 cr2 = VADD(tr1, tr4); in passf4_ps()
193 VCPLXMUL(cr2, ci2, LD_PS1(wr1), LD_PS1(wi1)); in passf4_ps()
195 ch[i + l1ido] = cr2; in passf4_ps()
224 v4sf ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, in passf5_ps() local
245 cr2 = VADD(cc_ref(i-1, 1), VADD(SVMUL(tr11, tr2),SVMUL(tr12, tr3))); in passf5_ps()
257 dr5 = VADD(cr2, ci5); in passf5_ps()
258 dr2 = VSUB(cr2, ci5); in passf5_ps()
349 v4sf ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3, wr1, wi1, wr2, wi2; in radf3_ps() local
351 cr2 = VADD(cc[(k + l1)*ido], cc[(k + 2*l1)*ido]); in radf3_ps()
352 ch[3*k*ido] = VADD(cc[k*ido], cr2); in radf3_ps()
354 ch[ido-1 + (3*k + 1)*ido] = VADD(cc[k*ido], SVMUL(taur, cr2)); in radf3_ps()
368 cr2 = VADD(dr2, dr3); in radf3_ps()
370 ch[i - 1 + 3*k*ido] = VADD(cc[i - 1 + k*ido], cr2); in radf3_ps()
372 tr2 = VADD(cc[i - 1 + k*ido], SVMUL(taur, cr2)); in radf3_ps()
392 v4sf ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2; in radb3_ps() local
395 cr2 = VMADD(LD_PS1(taur), tr2, cc[3*k*ido]); in radb3_ps()
398 ch[(k + l1)*ido] = VSUB(cr2, ci3); in radb3_ps()
399 ch[(k + 2*l1)*ido] = VADD(cr2, ci3); in radb3_ps()
406 cr2 = VMADD(LD_PS1(taur), tr2, cc[i - 1 + 3*k*ido]); in radb3_ps()
413 dr2 = VSUB(cr2, ci3); in radb3_ps()
414 dr3 = VADD(cr2, ci3); in radb3_ps()
455 v4sf wr, wi, cr2, ci2, cr3, ci3, cr4, ci4; in radf4_ps() local
458 cr2 = pc[1*l1ido+0]; in radf4_ps()
462 VCPLXMULCONJ(cr2,ci2,wr,wi); in radf4_ps()
478 tr1 = VADD(cr2,cr4); in radf4_ps()
479 tr4 = VSUB(cr4,cr2); in radf4_ps()
517 v4sf ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; in radb4_ps() local
553 cr2 = VSUB(tr1, tr4); in radb4_ps()
563 VCPLXMUL(cr2, ci2, LD_PS1(wa1[i-2]), LD_PS1(wa1[i-1])); in radb4_ps()
564 ph[0] = cr2; in radb4_ps()
604 v4sf ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5, in radf5_ps() local
620 cr2 = VADD(cc_ref(1, k, 5), cc_ref(1, k, 2)); in radf5_ps()
624 ch_ref(1, 1, k) = VADD(cc_ref(1, k, 1), VADD(cr2, cr3)); in radf5_ps()
625 ch_ref(ido, 2, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3))); in radf5_ps()
627 ch_ref(ido, 4, k) = VADD(cc_ref(1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3))); in radf5_ps()
646 cr2 = VADD(dr2, dr5); in radf5_ps()
654 ch_ref(i - 1, 1, k) = VADD(cc_ref(i - 1, k, 1), VADD(cr2, cr3)); in radf5_ps()
656 tr2 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr11, cr2), SVMUL(tr12, cr3))); in radf5_ps()
658 tr3 = VADD(cc_ref(i - 1, k, 1), VADD(SVMUL(tr12, cr2), SVMUL(tr11, cr3))); in radf5_ps()
690 v4sf ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, in radb5_ps() local
710 cr2 = VADD(cc_ref(1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3))); in radb5_ps()
714 ch_ref(1, k, 2) = VSUB(cr2, ci5); in radb5_ps()
717 ch_ref(1, k, 5) = VADD(cr2, ci5); in radb5_ps()
736 cr2 = VADD(cc_ref(i-1, 1, k), VADD(SVMUL(tr11, tr2), SVMUL(tr12, tr3))); in radb5_ps()
748 dr5 = VADD(cr2, ci5); in radb5_ps()
749 dr2 = VSUB(cr2, ci5); in radb5_ps()
1379 float cr0, ci0, cr1, ci1, cr2, ci2, cr3, ci3; in FUNC_REAL_PREPROCESS() local
1407 cr2=(Xr.f[0]+Xi.f[0]) - 2*Xr.f[2]; uout[0].f[2] = cr2; in FUNC_REAL_PREPROCESS()