Lines Matching refs:wide
29 %wide.load = load <4 x i32>, <4 x i32>* %1, align 16
32 %wide.load5 = load <4 x i32>, <4 x i32>* %3, align 16
33 %4 = add nsw <4 x i32> %wide.load, %vec.phi
34 %5 = add nsw <4 x i32> %wide.load5, %vec.phi4
38 %wide.load.1 = load <4 x i32>, <4 x i32>* %7, align 16
41 %wide.load5.1 = load <4 x i32>, <4 x i32>* %9, align 16
42 %10 = add nsw <4 x i32> %wide.load.1, %4
43 %11 = add nsw <4 x i32> %wide.load5.1, %5
47 %wide.load.2 = load <4 x i32>, <4 x i32>* %13, align 16
50 %wide.load5.2 = load <4 x i32>, <4 x i32>* %15, align 16
51 %16 = add nsw <4 x i32> %wide.load.2, %10
52 %17 = add nsw <4 x i32> %wide.load5.2, %11
56 %wide.load.3 = load <4 x i32>, <4 x i32>* %19, align 16
59 %wide.load5.3 = load <4 x i32>, <4 x i32>* %21, align 16
60 %22 = add nsw <4 x i32> %wide.load.3, %16
61 %23 = add nsw <4 x i32> %wide.load5.3, %17
65 %wide.load.4 = load <4 x i32>, <4 x i32>* %25, align 16
68 %wide.load5.4 = load <4 x i32>, <4 x i32>* %27, align 16
69 %28 = add nsw <4 x i32> %wide.load.4, %22
70 %29 = add nsw <4 x i32> %wide.load5.4, %23
109 %wide.load = load <4 x i32>, <4 x i32>* %scevgep35, align 16
111 %wide.load10 = load <4 x i32>, <4 x i32>* %scevgep36, align 16
112 %0 = and <4 x i32> %wide.load, %vec.phi
113 %1 = and <4 x i32> %wide.load10, %vec.phi9
117 %wide.load.1 = load <4 x i32>, <4 x i32>* %scevgep32, align 16
121 %wide.load10.1 = load <4 x i32>, <4 x i32>* %scevgep29, align 16
122 %2 = and <4 x i32> %wide.load.1, %0
123 %3 = and <4 x i32> %wide.load10.1, %1
127 %wide.load.2 = load <4 x i32>, <4 x i32>* %scevgep26, align 16
131 %wide.load10.2 = load <4 x i32>, <4 x i32>* %scevgep23, align 16
132 %4 = and <4 x i32> %wide.load.2, %2
133 %5 = and <4 x i32> %wide.load10.2, %3
137 %wide.load.3 = load <4 x i32>, <4 x i32>* %scevgep20, align 16
141 %wide.load10.3 = load <4 x i32>, <4 x i32>* %scevgep, align 16
142 %6 = and <4 x i32> %wide.load.3, %4
143 %7 = and <4 x i32> %wide.load10.3, %5
181 %wide.load = load <4 x float>, <4 x float>* %1, align 4
184 %wide.load10 = load <4 x float>, <4 x float>* %3, align 4
185 %4 = fadd fast <4 x float> %wide.load, %vec.phi
186 %5 = fadd fast <4 x float> %wide.load10, %vec.phi9
190 %wide.load.1 = load <4 x float>, <4 x float>* %7, align 4
193 %wide.load10.1 = load <4 x float>, <4 x float>* %9, align 4
194 %10 = fadd fast <4 x float> %wide.load.1, %4
195 %11 = fadd fast <4 x float> %wide.load10.1, %5
199 %wide.load.2 = load <4 x float>, <4 x float>* %13, align 4
202 %wide.load10.2 = load <4 x float>, <4 x float>* %15, align 4
203 %16 = fadd fast <4 x float> %wide.load.2, %10
204 %17 = fadd fast <4 x float> %wide.load10.2, %11
208 %wide.load.3 = load <4 x float>, <4 x float>* %19, align 4
211 %wide.load10.3 = load <4 x float>, <4 x float>* %21, align 4
212 %22 = fadd fast <4 x float> %wide.load.3, %16
213 %23 = fadd fast <4 x float> %wide.load10.3, %17
217 %wide.load.4 = load <4 x float>, <4 x float>* %25, align 4
220 %wide.load10.4 = load <4 x float>, <4 x float>* %27, align 4
221 %28 = fadd fast <4 x float> %wide.load.4, %22
222 %29 = fadd fast <4 x float> %wide.load10.4, %23