1; RUN: opt -loop-accesses -analyze < %s | FileCheck %s 2; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output < %s 2>&1 | FileCheck %s 3 4target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 5 6; Check that the compile-time-unknown depenendece-distance is resolved 7; statically. Due to the non-unit stride of the accesses in this testcase 8; we are currently not able to create runtime dependence checks, and therefore 9; if we don't resolve the dependence statically we cannot vectorize the loop. 10; 11; Specifically in this example, during dependence analysis we get 6 unknown 12; dependence distances between the 8 real/imaginary accesses below: 13; dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D. 14; At compile time we can prove for all of the above that |dist|>loopBound*step 15; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude 16; that there are no dependencies (without runtime tests): 17; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc. 18 19; #include <stdlib.h> 20; class Complex { 21; private: 22; float real_; 23; float imaginary_; 24; 25; public: 26; Complex() : real_(0), imaginary_(0) { } 27; Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { } 28; Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { } 29; 30; inline float real() const { return real_; } 31; inline float imaginary() const { return imaginary_; } 32; 33; Complex operator+(const Complex& rhs) const 34; { 35; return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_); 36; } 37; 38; Complex operator-(const Complex& rhs) const 39; { 40; return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_); 41; } 42; }; 43; 44; void Test(Complex *out, size_t size) 45; { 46; size_t D = size / 2; 47; for (size_t offset = 0; offset < D; ++offset) 48; { 49; Complex t0 = out[offset]; 50; Complex t1 = out[offset + D]; 51; out[offset] = t1 + t0; 52; out[offset + D] = t0 - t1; 53; } 54; } 55 56; CHECK-LABEL: Test 57; CHECK: Memory dependences are safe 58 59 60%class.Complex = type { float, float } 61 62define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr { 63entry: 64 %div = lshr i64 %size, 1 65 %cmp47 = icmp eq i64 %div, 0 66 br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader 67 68for.body.preheader: 69 br label %for.body 70 71for.cond.cleanup.loopexit: 72 br label %for.cond.cleanup 73 74for.cond.cleanup: 75 ret void 76 77for.body: 78 %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ] 79 %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0 80 %1 = load float, float* %0, align 4 81 %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1 82 %2 = load float, float* %imaginary_.i.i, align 4 83 %add = add nuw i64 %offset.048, %div 84 %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0 85 %4 = load float, float* %3, align 4 86 %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1 87 %5 = load float, float* %imaginary_.i.i28, align 4 88 %add.i = fadd fast float %4, %1 89 %add4.i = fadd fast float %5, %2 90 store float %add.i, float* %0, align 4 91 store float %add4.i, float* %imaginary_.i.i, align 4 92 %sub.i = fsub fast float %1, %4 93 %sub4.i = fsub fast float %2, %5 94 store float %sub.i, float* %3, align 4 95 store float %sub4.i, float* %imaginary_.i.i28, align 4 96 %inc = add nuw nsw i64 %offset.048, 1 97 %exitcond = icmp eq i64 %inc, %div 98 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body 99} 100