• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple x86_64-unknown-linux-gnu -mattr +avx512f | FileCheck %s
2
3define <16 x float> @testzmm_1(<16 x float> %_zmm0, <16 x float> %_zmm1) {
4entry:
5; CHECK: vpternlogd  $0, %zmm1, %zmm0, %zmm0
6  %0 = tail call <16 x float> asm "vpternlogd $$0, $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm0)
7  ret <16 x float> %0
8}
9
10define <16 x float> @testzmm_2(<16 x float> %_zmm0, <16 x float> %_zmm1) {
11entry:
12; CHECK: vpabsq  %zmm1, %zmm0
13  %0 = tail call <16 x float> asm "vpabsq $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1)
14  ret <16 x float> %0
15}
16
17
18define <16 x float> @testzmm_3(<16 x float> %_zmm0, <16 x float> %_zmm1) {
19entry:
20; CHECK: vpaddd  %zmm1, %zmm1, %zmm0
21  %0 = tail call <16 x float> asm "vpaddd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
22  ret <16 x float> %0
23}
24
25
26define <16 x float> @testzmm_4(<16 x float> %_zmm0, <16 x float> %_zmm1) {
27entry:
28; CHECK: vpaddq  %zmm1, %zmm1, %zmm0
29  %0 = tail call <16 x float> asm "vpaddq $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
30  ret <16 x float> %0
31}
32
33
34define <16 x float> @testzmm_5(<16 x float> %_zmm0, <16 x float> %_zmm1) {
35entry:
36; CHECK: vpandd  %zmm1, %zmm1, %zmm0
37  %0 = tail call <16 x float> asm "vpandd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
38  ret <16 x float> %0
39}
40
41
42define <16 x float> @testzmm_6(<16 x float> %_zmm0, <16 x float> %_zmm1) {
43entry:
44; CHECK: vpandnd %zmm1, %zmm1, %zmm0
45  %0 = tail call <16 x float> asm "vpandnd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
46  ret <16 x float> %0
47}
48
49
50define <16 x float> @testzmm_7(<16 x float> %_zmm0, <16 x float> %_zmm1) {
51entry:
52; CHECK: vpmaxsd %zmm1, %zmm1, %zmm0
53  %0 = tail call <16 x float> asm "vpmaxsd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
54  ret <16 x float> %0
55}
56
57
58define <16 x float> @testzmm_8(<16 x float> %_zmm0, <16 x float> %_zmm1) {
59entry:
60; CHECK: vmovups %zmm1, %zmm0
61  %0 = tail call <16 x float> asm "vmovups $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1)
62  ret <16 x float> %0
63}
64
65
66define <16 x float> @testzmm_9(<16 x float> %_zmm0, <16 x float> %_zmm1) {
67entry:
68; CHECK: vmovupd %zmm1, %zmm0
69  %0 = tail call <16 x float> asm "vmovupd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1)
70  ret <16 x float> %0
71}
72
73define <16 x float> @testZMM0() {
74entry:
75; CHECK: vpternlogd $255, %zmm0, %zmm0, %zmm0
76  %zmm0 = alloca <16 x float>, align 64
77  %0 = call <16 x float> asm "vpternlogd $$255, $0, $0, $0", "=^Yz,~{dirflag},~{fpsr},~{flags}"()
78  store <16 x float> %0, <16 x float>* %zmm0, align 64
79  %1 = load <16 x float>, <16 x float>* %zmm0, align 64
80  ret <16 x float> %1
81}
82