• Home
  • Raw
  • Download

Lines Matching refs:vi

99       v128_t vi${M}x0246 = vzero;
102 v128_t vi${M}x1357 = vzero;
105 const v128_t vi${M}x89AB = wasm_v128_load(i${M});
106 const v128_t vi${M}xCDEF = wasm_v128_load(i${M} + 4);
110 v128_t vi${M}x8ACE = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 0, 2, 4, 6);
111 v128_t vi${M}x9BDF = wasm_v32x4_shuffle(vi${M}x89AB, vi${M}xCDEF, 1, 3, 5, 7);
120 v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, vk02);
122 vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, vk02));
126 v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12);
128 vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12));
132 v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, vk22);
134 …vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x…
138 v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M+3}x8ACE, vk32);
140 …vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x…
144 vo${M}p5 = wasm_f32x4_mul(vi${2*M+4}x8ACE, vk42);
146 …vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x…
149 …vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9B…
152 …vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x…
155 …vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x…
158 …vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
161 …vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
164 const v128_t vi${M}x68AC = wasm_v32x4_shuffle(vi${M}x0246, vi${M}x8ACE, 3, 4, 5, 6);
165 vi${M}x0246 = vi${M}x8ACE;
168 …vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x…
171 …vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1…
174 …vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2…
177 …vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
180 …vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
183 const v128_t vi${M}x79BD = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
184 vi${M}x1357 = vi${M}x9BDF;
187 const v128_t vi${M}xGHIJ = wasm_v128_load(i${M});
188 const v128_t vi${M}xKLMN = wasm_v128_load(i${M} + 4);
192 const v128_t vi${M}xGIKM = wasm_v32x4_shuffle(vi${M}xGHIJ, vi${M}xKLMN, 0, 2, 4, 6);
193 const v128_t vi${M}xHJLN = wasm_v32x4_shuffle(vi${M}xGHIJ, vi${M}xKLMN, 1, 3, 5, 7);
196 …vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x…
199 …vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1…
202 …vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2…
205 …vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
208 …vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
211 const v128_t vi${M}xACEG = wasm_v32x4_shuffle(vi${M}x8ACE, vi${M}xGIKM, 1, 2, 3, 4);
212 vi${M}x8ACE = vi${M}xGIKM;
213 vi${M}x9BDF = vi${M}xHJLN;
216 …vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x…
219 …vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1…
222 …vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2…
225 …vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
228 …vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
261 vi${M}x8ACE = wasm_v128_and(vmask_even, vi${M}x8ACE);
264 vi${M}x9BDF = wasm_v128_and(vmask_odd, vi${M}x9BDF);
268 v128_t vo${M}p1 = wasm_f32x4_mul(vi${2*M}x8ACE, vk02);
270 vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M}x8ACE, vk02));
274 v128_t vo${M}p2 = wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12);
276 vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${2*M+1}x8ACE, vk12));
280 v128_t vo${M}p3 = wasm_f32x4_mul(vi${2*M+2}x8ACE, vk22);
282 …vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x…
286 v128_t vo${M}p4 = wasm_f32x4_mul(vi${2*M+3}x8ACE, vk32);
288 …vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3}x…
292 vo${M}p5 = wasm_f32x4_mul(vi${2*M+4}x8ACE, vk42);
294 …vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4}x…
297 …vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x9B…
300 …vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1}x…
303 …vo${M}p${9 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${9 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2}x…
306 …vo${M}p${10 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${10 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
309 …vo${M}p${11 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${11 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
312 const v128_t vi${M}x68AC = wasm_v32x4_shuffle(vi${M}x0246, vi${M}x8ACE, 3, 4, 5, 6);
315 …vo${M}p${12 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${12 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x…
318 …vo${M}p${13 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${13 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1…
321 …vo${M}p${14 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${14 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2…
324 …vo${M}p${15 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${15 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
327 …vo${M}p${16 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${16 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
330 const v128_t vi${M}x79BD = wasm_v32x4_shuffle(vi${M}x1357, vi${M}x9BDF, 3, 4, 5, 6);
333 …vo${M}p${17 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${17 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x…
336 …vo${M}p${18 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${18 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1…
339 …vo${M}p${19 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${19 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2…
342 …vo${M}p${20 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${20 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
345 …vo${M}p${21 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${21 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…
348 const v128_t vi${M}xACEG = wasm_v32x4_shuffle(vi${M}x8ACE, vzero, 1, 2, 3, 4);
351 …vo${M}p${22 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${22 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M}x…
354 …vo${M}p${23 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${23 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+1…
357 …vo${M}p${24 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${24 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+2…
360 …vo${M}p${25 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${25 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+3…
363 …vo${M}p${26 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${26 % ACCUMULATORS}, wasm_f32x4_mul(vi${2*M+4…