1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s 3 4; This tests whether or not we generate vectors large than preferred vector width when 5; lowering memmove. 6 7; Function Attrs: nounwind uwtable 8define weak_odr dso_local void @A(i8* %src, i8* %dst) local_unnamed_addr #0 { 9; CHECK-LABEL: A: 10; CHECK: # %bb.0: # %entry 11; CHECK-NEXT: vmovups (%rdi), %xmm0 12; CHECK-NEXT: vmovups 16(%rdi), %xmm1 13; CHECK-NEXT: vmovups %xmm1, 16(%rsi) 14; CHECK-NEXT: vmovups %xmm0, (%rsi) 15; CHECK-NEXT: retq 16entry: 17 call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 32, i1 false) 18 ret void 19} 20 21; Function Attrs: nounwind uwtable 22define weak_odr dso_local void @B(i8* %src, i8* %dst) local_unnamed_addr #0 { 23; CHECK-LABEL: B: 24; CHECK: # %bb.0: # %entry 25; CHECK-NEXT: vmovups (%rdi), %xmm0 26; CHECK-NEXT: vmovups 16(%rdi), %xmm1 27; CHECK-NEXT: vmovups 32(%rdi), %xmm2 28; CHECK-NEXT: vmovups 48(%rdi), %xmm3 29; CHECK-NEXT: vmovups %xmm3, 48(%rsi) 30; CHECK-NEXT: vmovups %xmm2, 32(%rsi) 31; CHECK-NEXT: vmovups %xmm1, 16(%rsi) 32; CHECK-NEXT: vmovups %xmm0, (%rsi) 33; CHECK-NEXT: retq 34entry: 35 call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 64, i1 false) 36 ret void 37} 38 39; Function Attrs: nounwind uwtable 40define weak_odr dso_local void @C(i8* %src, i8* %dst) local_unnamed_addr #2 { 41; CHECK-LABEL: C: 42; CHECK: # %bb.0: # %entry 43; CHECK-NEXT: vmovups (%rdi), %ymm0 44; CHECK-NEXT: vmovups %ymm0, (%rsi) 45; CHECK-NEXT: vzeroupper 46; CHECK-NEXT: retq 47entry: 48 call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 32, i1 false) 49 ret void 50} 51 52; Function Attrs: nounwind uwtable 53define weak_odr dso_local void @D(i8* %src, i8* %dst) local_unnamed_addr #2 { 54; CHECK-LABEL: D: 55; CHECK: # %bb.0: # %entry 56; CHECK-NEXT: vmovups (%rdi), %ymm0 57; CHECK-NEXT: vmovups 32(%rdi), %ymm1 58; CHECK-NEXT: vmovups %ymm1, 32(%rsi) 59; CHECK-NEXT: vmovups %ymm0, (%rsi) 60; CHECK-NEXT: vzeroupper 61; CHECK-NEXT: retq 62entry: 63 call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 64, i1 false) 64 ret void 65} 66 67; Function Attrs: argmemonly nounwind 68declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1 immarg) #1 69 70attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "prefer-vector-width"="128" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+cx8,+f16c,+fma,+fsgsbase,+fxsr,+invpcid,+lzcnt,+mmx,+movbe,+pclmul,+pku,+popcnt,+prfchw,+rdrnd,+rdseed,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" } 71attributes #1 = { argmemonly nounwind } 72attributes #2 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "prefer-vector-width"="256" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+cx8,+f16c,+fma,+fsgsbase,+fxsr,+invpcid,+lzcnt,+mmx,+movbe,+pclmul,+pku,+popcnt,+prfchw,+rdrnd,+rdseed,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" } 73 74!0 = !{i32 1, !"wchar_size", i32 4} 75