1; REQUIRES: asserts 2; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -debug-only=loop-accesses < %s 2>&1 | FileCheck %s 3 4target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 5 6; Check that the compile-time-unknown depenendece-distance is resolved 7; statically. Due to the non-unit stride of the accesses in this testcase 8; we are currently not able to create runtime dependence checks, and therefore 9; if we don't resolve the dependence statically we cannot vectorize the loop. 10; 11; Specifically in this example, during dependence analysis we get 6 unknown 12; dependence distances between the 8 real/imaginary accesses below: 13; dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D. 14; At compile time we can prove for all of the above that |dist|>loopBound*step 15; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude 16; that there are no dependencies (without runtime tests): 17; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc. 18 19; #include <stdlib.h> 20; class Complex { 21; private: 22; float real_; 23; float imaginary_; 24; 25; public: 26; Complex() : real_(0), imaginary_(0) { } 27; Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { } 28; Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { } 29; 30; inline float real() const { return real_; } 31; inline float imaginary() const { return imaginary_; } 32; 33; Complex operator+(const Complex& rhs) const 34; { 35; return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_); 36; } 37; 38; Complex operator-(const Complex& rhs) const 39; { 40; return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_); 41; } 42; }; 43; 44; void Test(Complex *out, size_t size) 45; { 46; size_t D = size / 2; 47; for (size_t offset = 0; offset < D; ++offset) 48; { 49; Complex t0 = out[offset]; 50; Complex t1 = out[offset + D]; 51; out[offset] = t1 + t0; 52; out[offset + D] = t0 - t1; 53; } 54; } 55 56; CHECK-LABEL: Test 57; CHECK: LAA: No unsafe dependent memory operations in loop. We don't need runtime memory checks. 58; CHECK: vector.body: 59; CHECK: <4 x i32> 60 61%class.Complex = type { float, float } 62 63define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr { 64entry: 65 %div = lshr i64 %size, 1 66 %cmp47 = icmp eq i64 %div, 0 67 br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader 68 69for.body.preheader: 70 br label %for.body 71 72for.cond.cleanup.loopexit: 73 br label %for.cond.cleanup 74 75for.cond.cleanup: 76 ret void 77 78for.body: 79 %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ] 80 %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0 81 %1 = load float, float* %0, align 4 82 %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1 83 %2 = load float, float* %imaginary_.i.i, align 4 84 %add = add nuw i64 %offset.048, %div 85 %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0 86 %4 = load float, float* %3, align 4 87 %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1 88 %5 = load float, float* %imaginary_.i.i28, align 4 89 %add.i = fadd fast float %4, %1 90 %add4.i = fadd fast float %5, %2 91 store float %add.i, float* %0, align 4 92 store float %add4.i, float* %imaginary_.i.i, align 4 93 %sub.i = fsub fast float %1, %4 94 %sub4.i = fsub fast float %2, %5 95 store float %sub.i, float* %3, align 4 96 store float %sub4.i, float* %imaginary_.i.i28, align 4 97 %inc = add nuw nsw i64 %offset.048, 1 98 %exitcond = icmp eq i64 %inc, %div 99 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body 100} 101