1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s 3 4; In the following 4 tests, the existing call to VZU/VZA ensures clean state before 5; the call to the unknown, so we don't need to insert a second VZU at that point. 6 7define <4 x float> @zeroupper_v4f32(<8 x float> *%x, <8 x float> %y) nounwind { 8; CHECK-LABEL: zeroupper_v4f32: 9; CHECK: # BB#0: 10; CHECK-NEXT: pushq %rbx 11; CHECK-NEXT: subq $48, %rsp 12; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill 13; CHECK-NEXT: movq %rdi, %rbx 14; CHECK-NEXT: vzeroupper 15; CHECK-NEXT: callq the_unknown 16; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload 17; CHECK-NEXT: vaddps (%rbx), %ymm0, %ymm0 18; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 19; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 20; CHECK-NEXT: addq $48, %rsp 21; CHECK-NEXT: popq %rbx 22; CHECK-NEXT: vzeroupper 23; CHECK-NEXT: retq 24 call void @llvm.x86.avx.vzeroupper() 25 call void @the_unknown() 26 %loadx = load <8 x float>, <8 x float> *%x, align 32 27 %sum = fadd <8 x float> %loadx, %y 28 %lo = shufflevector <8 x float> %sum, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 29 %hi = shufflevector <8 x float> %sum, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> 30 %res = fadd <4 x float> %lo, %hi 31 ret <4 x float> %res 32} 33 34define <8 x float> @zeroupper_v8f32(<8 x float> %x) nounwind { 35; CHECK-LABEL: zeroupper_v8f32: 36; CHECK: # BB#0: 37; CHECK-NEXT: subq $56, %rsp 38; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill 39; CHECK-NEXT: vzeroupper 40; CHECK-NEXT: callq the_unknown 41; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload 42; CHECK-NEXT: addq $56, %rsp 43; CHECK-NEXT: retq 44 call void @llvm.x86.avx.vzeroupper() 45 call void @the_unknown() 46 ret <8 x float> %x 47} 48 49define <4 x float> @zeroall_v4f32(<8 x float> *%x, <8 x float> %y) nounwind { 50; CHECK-LABEL: zeroall_v4f32: 51; CHECK: # BB#0: 52; CHECK-NEXT: pushq %rbx 53; CHECK-NEXT: subq $48, %rsp 54; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill 55; CHECK-NEXT: movq %rdi, %rbx 56; CHECK-NEXT: vzeroall 57; CHECK-NEXT: callq the_unknown 58; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload 59; CHECK-NEXT: vaddps (%rbx), %ymm0, %ymm0 60; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 61; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 62; CHECK-NEXT: addq $48, %rsp 63; CHECK-NEXT: popq %rbx 64; CHECK-NEXT: vzeroupper 65; CHECK-NEXT: retq 66 call void @llvm.x86.avx.vzeroall() 67 call void @the_unknown() 68 %loadx = load <8 x float>, <8 x float> *%x, align 32 69 %sum = fadd <8 x float> %loadx, %y 70 %lo = shufflevector <8 x float> %sum, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 71 %hi = shufflevector <8 x float> %sum, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> 72 %res = fadd <4 x float> %lo, %hi 73 ret <4 x float> %res 74} 75 76define <8 x float> @zeroall_v8f32(<8 x float> %x) nounwind { 77; CHECK-LABEL: zeroall_v8f32: 78; CHECK: # BB#0: 79; CHECK-NEXT: subq $56, %rsp 80; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill 81; CHECK-NEXT: vzeroall 82; CHECK-NEXT: callq the_unknown 83; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload 84; CHECK-NEXT: addq $56, %rsp 85; CHECK-NEXT: retq 86 call void @llvm.x86.avx.vzeroall() 87 call void @the_unknown() 88 ret <8 x float> %x 89} 90 91declare void @llvm.x86.avx.vzeroupper() nounwind readnone 92declare void @llvm.x86.avx.vzeroall() nounwind readnone 93declare void @the_unknown() nounwind 94 95