Lines Matching refs:res3
40 %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 -1)
41 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
88 %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 %mask)
89 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
137 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 -1)
138 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
185 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 %mask)
186 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
234 %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 -1)
235 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
282 %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 %mask)
283 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
331 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 -1)
332 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
379 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 %mask)
380 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
430 %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 -1)
431 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
478 %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 %mask)
479 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
527 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 -1)
528 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
575 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 %mask)
576 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
624 %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 -1)
625 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
672 %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 %mask)
673 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
721 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 -1)
722 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
769 %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 %mask)
770 %vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
2857 %res3 = add <16 x i8> %res0, %res1
2858 %res4 = add <16 x i8> %res3, %res2
2891 %res3 = add <16 x i8> %res0, %res1
2892 %res4 = add <16 x i8> %res3, %res2
2925 %res3 = add <16 x i8> %res0, %res1
2926 %res4 = add <16 x i8> %res3, %res2
2959 %res3 = add <16 x i8> %res0, %res1
2960 %res4 = add <16 x i8> %res3, %res2
2993 %res3 = add <16 x i8> %res0, %res1
2994 %res4 = add <16 x i8> %res3, %res2
3027 %res3 = add <16 x i8> %res0, %res1
3028 %res4 = add <16 x i8> %res3, %res2
3061 %res3 = add <8 x i16> %res0, %res1
3062 %res4 = add <8 x i16> %res3, %res2
3095 %res3 = add <8 x i16> %res0, %res1
3096 %res4 = add <8 x i16> %res3, %res2
3129 %res3 = add <8 x i16> %res0, %res1
3130 %res4 = add <8 x i16> %res3, %res2
3163 %res3 = add <8 x i16> %res0, %res1
3164 %res4 = add <8 x i16> %res3, %res2
3197 %res3 = add <8 x i16> %res0, %res1
3198 %res4 = add <8 x i16> %res3, %res2
3231 %res3 = add <8 x i16> %res0, %res1
3232 %res4 = add <8 x i16> %res3, %res2
3265 %res3 = add <4 x i32> %res0, %res1
3266 %res4 = add <4 x i32> %res3, %res2
3299 %res3 = add <4 x i32> %res0, %res1
3300 %res4 = add <4 x i32> %res3, %res2
3333 %res3 = add <4 x i32> %res0, %res1
3334 %res4 = add <4 x i32> %res3, %res2
3367 %res3 = add <4 x i32> %res0, %res1
3368 %res4 = add <4 x i32> %res3, %res2
3401 %res3 = add <4 x i32> %res0, %res1
3402 %res4 = add <4 x i32> %res3, %res2
3435 %res3 = add <4 x i32> %res0, %res1
3436 %res4 = add <4 x i32> %res3, %res2
3469 %res3 = add <16 x i8> %res0, %res1
3470 %res4 = add <16 x i8> %res3, %res2
3503 %res3 = add <16 x i8> %res0, %res1
3504 %res4 = add <16 x i8> %res3, %res2
3537 %res3 = add <16 x i8> %res0, %res1
3538 %res4 = add <16 x i8> %res3, %res2
3571 %res3 = add <16 x i8> %res0, %res1
3572 %res4 = add <16 x i8> %res3, %res2
3605 %res3 = add <16 x i8> %res0, %res1
3606 %res4 = add <16 x i8> %res3, %res2
3639 %res3 = add <16 x i8> %res0, %res1
3640 %res4 = add <16 x i8> %res3, %res2
3673 %res3 = add <8 x i16> %res0, %res1
3674 %res4 = add <8 x i16> %res3, %res2
3707 %res3 = add <8 x i16> %res0, %res1
3708 %res4 = add <8 x i16> %res3, %res2
3741 %res3 = add <8 x i16> %res0, %res1
3742 %res4 = add <8 x i16> %res3, %res2
3775 %res3 = add <8 x i16> %res0, %res1
3776 %res4 = add <8 x i16> %res3, %res2
3809 %res3 = add <8 x i16> %res0, %res1
3810 %res4 = add <8 x i16> %res3, %res2
3843 %res3 = add <8 x i16> %res0, %res1
3844 %res4 = add <8 x i16> %res3, %res2
4392 %res3 = fadd <8 x float> %res, %res1
4393 %res4 = fadd <8 x float> %res2, %res3
4415 %res3 = fadd <4 x double> %res, %res1
4416 %res4 = fadd <4 x double> %res2, %res3
4471 %res3 = fadd <4 x float> %res, %res1
4472 %res4 = fadd <4 x float> %res2, %res3
4491 %res3 = fadd <2 x double> %res, %res1
4492 %res4 = fadd <2 x double> %res2, %res3
4562 %res3 = fadd <2 x double> %res, %res1
4563 %res4 = fadd <2 x double> %res2, %res3
4636 %res3 = add <4 x i32> %res, %res1
4637 %res4 = add <4 x i32> %res3, %res2
4704 %res3 = fadd <4 x double> %res, %res1
4705 %res4 = fadd <4 x double> %res2, %res3
4724 %res3 = fadd <2 x double> %res, %res1
4725 %res4 = fadd <2 x double> %res3, %res2
4744 %res3 = fadd <8 x float> %res, %res1
4745 %res4 = fadd <8 x float> %res3, %res2
4764 %res3 = fadd <4 x float> %res, %res1
4765 %res4 = fadd <4 x float> %res2, %res3
4784 %res3 = fadd <8 x float> %res, %res1
4785 %res4 = fadd <8 x float> %res2, %res3
4805 %res3 = add <8 x i32> %res, %res1
4806 %res4 = add <8 x i32> %res2, %res3
5023 …%res3 = call <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.128(<4 x float> %a0, i32 2, <8 x i16> %src,…
5025 %res = add <8 x i16> %res3, %res0
5043 …%res3 = call <8 x i16> @llvm.x86.avx512.mask.vcvtps2ph.256(<8 x float> %a0, i32 2, <8 x i16> %src,…
5045 %res = add <8 x i16> %res3, %res0
5321 …%res3 = call <8 x float> @llvm.x86.avx512.mask.broadcastf32x4.256(<4 x float> %x0, <8 x float> zer…
5323 %res5 = fadd <8 x float> %res3, %res4
5345 …%res3 = call <8 x i32> @llvm.x86.avx512.mask.broadcasti32x4.256(<4 x i32> %x0, <8 x i32> zeroiniti…
5347 %res5 = add <8 x i32> %res3, %res4
5366 %res3 = add <2 x i64> %res, %res1
5367 %res4 = add <2 x i64> %res3, %res2
5386 %res3 = add <4 x i64> %res, %res1
5387 %res4 = add <4 x i64> %res3, %res2
5406 %res3 = add <2 x i64> %res, %res1
5407 %res4 = add <2 x i64> %res2, %res3
5426 %res3 = add <4 x i64> %res, %res1
5427 %res4 = add <4 x i64> %res2, %res3
5444 %res3 = add <4 x i32> %res, %res1
5445 %res4 = add <4 x i32> %res3, %res2
5464 %res3 = add <8 x i32> %res, %res1
5465 %res4 = add <8 x i32> %res2, %res3
5484 %res3 = add <4 x i32> %res, %res1
5485 %res4 = add <4 x i32> %res2, %res3
5504 %res3 = add <8 x i32> %res, %res1
5505 %res4 = add <8 x i32> %res2, %res3
5524 %res3 = add <16 x i32> %res, %res1
5525 %res4 = add <16 x i32> %res2, %res3
5544 %res3 = add <2 x i64> %res, %res1
5545 %res4 = add <2 x i64> %res3, %res2
5564 %res3 = add <4 x i64> %res, %res1
5565 %res4 = add <4 x i64> %res3, %res2
5584 %res3 = add <4 x i32> %res, %res1
5585 %res4 = add <4 x i32> %res3, %res2
5604 %res3 = add <8 x i32> %res, %res1
5605 %res4 = add <8 x i32> %res3, %res2
5624 %res3 = add <4 x i32> %res, %res1
5625 %res4 = add <4 x i32> %res3, %res2
5644 %res3 = add <8 x i32> %res, %res1
5645 %res4 = add <8 x i32> %res3, %res2
5664 %res3 = add <4 x i32> %res, %res1
5665 %res4 = add <4 x i32> %res3, %res2
5684 %res3 = add <8 x i32> %res, %res1
5685 %res4 = add <8 x i32> %res3, %res2
5704 %res3 = add <2 x i64> %res, %res1
5705 %res4 = add <2 x i64> %res3, %res2
5724 %res3 = add <4 x i64> %res, %res1
5725 %res4 = add <4 x i64> %res3, %res2
5744 %res3 = add <2 x i64> %res, %res1
5745 %res4 = add <2 x i64> %res3, %res2
5764 %res3 = add <4 x i64> %res, %res1
5765 %res4 = add <4 x i64> %res3, %res2
5785 %res3 = add <4 x i32> %res, %res1
5786 %res4 = add <4 x i32> %res3, %res2
5805 %res3 = add <8 x i32> %res, %res1
5806 %res4 = add <8 x i32> %res3, %res2
5825 %res3 = add <4 x i32> %res, %res1
5826 %res4 = add <4 x i32> %res3, %res2
5845 %res3 = add <8 x i32> %res, %res1
5846 %res4 = add <8 x i32> %res3, %res2
5865 %res3 = add <4 x i64> %res, %res1
5866 %res4 = add <4 x i64> %res3, %res2
5885 %res3 = add <2 x i64> %res, %res1
5886 %res4 = add <2 x i64> %res3, %res2
5905 %res3 = add <4 x i64> %res, %res1
5906 %res4 = add <4 x i64> %res3, %res2
5925 %res3 = add <4 x i32> %res, %res1
5926 %res4 = add <4 x i32> %res3, %res2
5945 %res3 = add <8 x i32> %res, %res1
5946 %res4 = add <8 x i32> %res3, %res2
5978 %res3 = add <2 x i64> %res, %res1
5979 %res4 = add <2 x i64> %res3, %res2
6011 %res3 = add <4 x i64> %res, %res1
6012 %res4 = add <4 x i64> %res3, %res2
6031 %res3 = add <2 x i64> %res, %res1
6032 %res4 = add <2 x i64> %res3, %res2
6051 %res3 = add <4 x i64> %res, %res1
6052 %res4 = add <4 x i64> %res3, %res2
6071 %res3 = add <4 x i32> %res, %res1
6072 %res4 = add <4 x i32> %res3, %res2
6091 %res3 = add <8 x i32> %res, %res1
6092 %res4 = add <8 x i32> %res3, %res2
6111 %res3 = add <4 x i32> %res, %res1
6112 %res4 = add <4 x i32> %res3, %res2
6131 %res3 = add <8 x i32> %res, %res1
6132 %res4 = add <8 x i32> %res3, %res2
6151 %res3 = add <2 x i64> %res, %res1
6152 %res4 = add <2 x i64> %res3, %res2
6171 %res3 = add <4 x i64> %res, %res1
6172 %res4 = add <4 x i64> %res3, %res2
6191 %res3 = add <4 x i32> %res, %res1
6192 %res4 = add <4 x i32> %res3, %res2
6211 %res3 = add <8 x i32> %res, %res1
6212 %res4 = add <8 x i32> %res3, %res2
6231 %res3 = add <2 x i64> %res, %res1
6232 %res4 = add <2 x i64> %res3, %res2
6251 %res3 = add <4 x i64> %res, %res1
6252 %res4 = add <4 x i64> %res3, %res2
6271 %res3 = add <4 x i32> %res, %res1
6272 %res4 = add <4 x i32> %res3, %res2
6291 %res3 = add <8 x i32> %res, %res1
6292 %res4 = add <8 x i32> %res3, %res2
6311 %res3 = add <2 x i64> %res, %res1
6312 %res4 = add <2 x i64> %res3, %res2
6331 %res3 = add <4 x i64> %res, %res1
6332 %res4 = add <4 x i64> %res3, %res2
6351 %res3 = add <4 x i32> %res, %res1
6352 %res4 = add <4 x i32> %res3, %res2
6371 %res3 = add <8 x i32> %res, %res1
6372 %res4 = add <8 x i32> %res3, %res2
6391 %res3 = add <2 x i64> %res, %res1
6392 %res4 = add <2 x i64> %res3, %res2
6411 %res3 = add <4 x i64> %res, %res1
6412 %res4 = add <4 x i64> %res3, %res2
6434 %res3 = add <4 x i32> %res, %res1
6435 %res4 = add <4 x i32> %res3, %res2
6457 %res3 = add <8 x i32> %res, %res1
6458 %res4 = add <8 x i32> %res3, %res2
6480 %res3 = add <2 x i64> %res, %res1
6481 %res4 = add <2 x i64> %res3, %res2
6503 %res3 = add <4 x i64> %res, %res1
6504 %res4 = add <4 x i64> %res3, %res2
6526 %res3 = add <2 x i64> %res, %res1
6527 %res4 = add <2 x i64> %res3, %res2
6549 %res3 = add <4 x i64> %res, %res1
6550 %res4 = add <4 x i64> %res3, %res2
6572 %res3 = add <4 x i32> %res, %res1
6573 %res4 = add <4 x i32> %res3, %res2
6595 %res3 = add <8 x i32> %res, %res1
6596 %res4 = add <8 x i32> %res3, %res2
6618 %res3 = add <2 x i64> %res, %res1
6619 %res4 = add <2 x i64> %res3, %res2
6641 %res3 = add <4 x i64> %res, %res1
6642 %res4 = add <4 x i64> %res3, %res2
6661 %res3 = add <4 x i32> %res, %res1
6662 %res4 = add <4 x i32> %res3, %res2
6681 %res3 = add <8 x i32> %res, %res1
6682 %res4 = add <8 x i32> %res3, %res2
6701 %res3 = add <2 x i64> %res, %res1
6702 %res4 = add <2 x i64> %res3, %res2
6721 %res3 = add <4 x i64> %res, %res1
6722 %res4 = add <4 x i64> %res3, %res2
6741 %res3 = add <4 x i32> %res, %res1
6742 %res4 = add <4 x i32> %res3, %res2
6761 %res3 = add <8 x i32> %res, %res1
6762 %res4 = add <8 x i32> %res3, %res2
6781 %res3 = add <2 x i64> %res, %res1
6782 %res4 = add <2 x i64> %res3, %res2
6801 %res3 = add <4 x i64> %res, %res1
6802 %res4 = add <4 x i64> %res3, %res2
6821 %res3 = fadd <4 x double> %res, %res1
6822 %res4 = fadd <4 x double> %res3, %res2
6841 %res3 = add <4 x i64> %res, %res1
6842 %res4 = add <4 x i64> %res3, %res2
6861 %res3 = fadd <8 x float> %res, %res1
6862 %res4 = fadd <8 x float> %res3, %res2
6881 %res3 = add <8 x i32> %res, %res1
6882 %res4 = add <8 x i32> %res3, %res2
6903 %res3 = fadd <2 x double> %res, %res1
6904 %res4 = fadd <2 x double> %res3, %res2
6923 %res3 = fadd <2 x double> %res, %res1
6924 ;%res4 = fadd <2 x double> %res3, %res2
6925 ret <2 x double> %res3
6945 %res3 = fadd <4 x double> %res, %res1
6946 %res4 = fadd <4 x double> %res3, %res2
6968 %res3 = fadd <4 x double> %res, %res1
6969 %res4 = fadd <4 x double> %res3, %res2
6991 %res3 = fadd <4 x float> %res, %res1
6992 %res4 = fadd <4 x float> %res3, %res2
7014 %res3 = fadd <4 x float> %res, %res1
7015 %res4 = fadd <4 x float> %res3, %res2
7037 %res3 = fadd <8 x float> %res, %res1
7038 %res4 = fadd <8 x float> %res3, %res2
7060 %res3 = fadd <8 x float> %res, %res1
7061 %res4 = fadd <8 x float> %res3, %res2
7232 %res3 = add <8 x i32> %res, %res1
7233 %res4 = add <8 x i32> %res2, %res3
7252 %res3 = add <4 x i32> %res, %res1
7253 %res4 = add <4 x i32> %res2, %res3
7272 %res3 = add <4 x i64> %res, %res1
7273 %res4 = add <4 x i64> %res2, %res3
7292 %res3 = add <2 x i64> %res, %res1
7293 %res4 = add <2 x i64> %res2, %res3