1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "elk_eu.h"
29 #include "elk_fs.h"
30 #include "elk_fs_live_variables.h"
31 #include "elk_vec4.h"
32 #include "elk_cfg.h"
33 #include "elk_shader.h"
34 #include <new>
35
36 using namespace elk;
37
38 /** @file elk_fs_schedule_instructions.cpp
39 *
40 * List scheduling of FS instructions.
41 *
42 * The basic model of the list scheduler is to take a basic block,
43 * compute a DAG of the dependencies (RAW ordering with latency, WAW
44 * ordering with latency, WAR ordering), and make a list of the DAG heads.
45 * Heuristically pick a DAG head, then put all the children that are
46 * now DAG heads into the list of things to schedule.
47 *
48 * The heuristic is the important part. We're trying to be cheap,
49 * since actually computing the optimal scheduling is NP complete.
50 * What we do is track a "current clock". When we schedule a node, we
51 * update the earliest-unblocked clock time of its children, and
52 * increment the clock. Then, when trying to schedule, we just pick
53 * the earliest-unblocked instruction to schedule.
54 *
55 * Note that often there will be many things which could execute
56 * immediately, and there are a range of heuristic options to choose
57 * from in picking among those.
58 */
59
60 static bool debug = false;
61
62 class elk_instruction_scheduler;
63 struct elk_schedule_node_child;
64
65 class elk_schedule_node : public exec_node
66 {
67 public:
68 void set_latency_gfx4();
69 void set_latency_gfx7(const struct elk_isa_info *isa);
70
71 elk_backend_instruction *inst;
72 elk_schedule_node_child *children;
73 int children_count;
74 int children_cap;
75 int initial_parent_count;
76 int initial_unblocked_time;
77 int latency;
78
79 /**
80 * This is the sum of the instruction's latency plus the maximum delay of
81 * its children, or just the issue_time if it's a leaf node.
82 */
83 int delay;
84
85 /**
86 * Preferred exit node among the (direct or indirect) successors of this
87 * node. Among the scheduler nodes blocked by this node, this will be the
88 * one that may cause earliest program termination, or NULL if none of the
89 * successors is an exit node.
90 */
91 elk_schedule_node *exit;
92
93 /**
94 * How many cycles this instruction takes to issue.
95 *
96 * Instructions in gen hardware are handled one simd4 vector at a time,
97 * with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
98 * cycles to dispatch and SIMD16 (compressed) instructions take 4.
99 */
100 int issue_time;
101
102 /* Temporary data used during the scheduling process. */
103 struct {
104 int parent_count;
105 int unblocked_time;
106
107 /**
108 * Which iteration of pushing groups of children onto the candidates list
109 * this node was a part of.
110 */
111 unsigned cand_generation;
112 } tmp;
113 };
114
115 struct elk_schedule_node_child {
116 elk_schedule_node *n;
117 int effective_latency;
118 };
119
120 static inline void
reset_node_tmp(elk_schedule_node * n)121 reset_node_tmp(elk_schedule_node *n)
122 {
123 n->tmp.parent_count = n->initial_parent_count;
124 n->tmp.unblocked_time = n->initial_unblocked_time;
125 n->tmp.cand_generation = 0;
126 }
127
128 /**
129 * Lower bound of the scheduling time after which one of the instructions
130 * blocked by this node may lead to program termination.
131 *
132 * exit_unblocked_time() determines a strict partial ordering relation '«' on
133 * the set of scheduler nodes as follows:
134 *
135 * n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
136 *
137 * which can be used to heuristically order nodes according to how early they
138 * can unblock an exit node and lead to program termination.
139 */
140 static inline int
exit_tmp_unblocked_time(const elk_schedule_node * n)141 exit_tmp_unblocked_time(const elk_schedule_node *n)
142 {
143 return n->exit ? n->exit->tmp.unblocked_time : INT_MAX;
144 }
145
146 static inline int
exit_initial_unblocked_time(const elk_schedule_node * n)147 exit_initial_unblocked_time(const elk_schedule_node *n)
148 {
149 return n->exit ? n->exit->initial_unblocked_time : INT_MAX;
150 }
151
152 void
set_latency_gfx4()153 elk_schedule_node::set_latency_gfx4()
154 {
155 int chans = 8;
156 int math_latency = 22;
157
158 switch (inst->opcode) {
159 case ELK_SHADER_OPCODE_RCP:
160 this->latency = 1 * chans * math_latency;
161 break;
162 case ELK_SHADER_OPCODE_RSQ:
163 this->latency = 2 * chans * math_latency;
164 break;
165 case ELK_SHADER_OPCODE_INT_QUOTIENT:
166 case ELK_SHADER_OPCODE_SQRT:
167 case ELK_SHADER_OPCODE_LOG2:
168 /* full precision log. partial is 2. */
169 this->latency = 3 * chans * math_latency;
170 break;
171 case ELK_SHADER_OPCODE_INT_REMAINDER:
172 case ELK_SHADER_OPCODE_EXP2:
173 /* full precision. partial is 3, same throughput. */
174 this->latency = 4 * chans * math_latency;
175 break;
176 case ELK_SHADER_OPCODE_POW:
177 this->latency = 8 * chans * math_latency;
178 break;
179 case ELK_SHADER_OPCODE_SIN:
180 case ELK_SHADER_OPCODE_COS:
181 /* minimum latency, max is 12 rounds. */
182 this->latency = 5 * chans * math_latency;
183 break;
184 default:
185 this->latency = 2;
186 break;
187 }
188 }
189
190 void
set_latency_gfx7(const struct elk_isa_info * isa)191 elk_schedule_node::set_latency_gfx7(const struct elk_isa_info *isa)
192 {
193 const bool is_haswell = isa->devinfo->verx10 == 75;
194
195 switch (inst->opcode) {
196 case ELK_OPCODE_MAD:
197 /* 2 cycles
198 * (since the last two src operands are in different register banks):
199 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
200 *
201 * 3 cycles on IVB, 4 on HSW
202 * (since the last two src operands are in the same register bank):
203 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
204 *
205 * 18 cycles on IVB, 16 on HSW
206 * (since the last two src operands are in different register banks):
207 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
208 * mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
209 *
210 * 20 cycles on IVB, 18 on HSW
211 * (since the last two src operands are in the same register bank):
212 * mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
213 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
214 */
215
216 /* Our register allocator doesn't know about register banks, so use the
217 * higher latency.
218 */
219 latency = is_haswell ? 16 : 18;
220 break;
221
222 case ELK_OPCODE_LRP:
223 /* 2 cycles
224 * (since the last two src operands are in different register banks):
225 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
226 *
227 * 3 cycles on IVB, 4 on HSW
228 * (since the last two src operands are in the same register bank):
229 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
230 *
231 * 16 cycles on IVB, 14 on HSW
232 * (since the last two src operands are in different register banks):
233 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
234 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
235 *
236 * 16 cycles
237 * (since the last two src operands are in the same register bank):
238 * lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
239 * mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
240 */
241
242 /* Our register allocator doesn't know about register banks, so use the
243 * higher latency.
244 */
245 latency = 14;
246 break;
247
248 case ELK_SHADER_OPCODE_RCP:
249 case ELK_SHADER_OPCODE_RSQ:
250 case ELK_SHADER_OPCODE_SQRT:
251 case ELK_SHADER_OPCODE_LOG2:
252 case ELK_SHADER_OPCODE_EXP2:
253 case ELK_SHADER_OPCODE_SIN:
254 case ELK_SHADER_OPCODE_COS:
255 /* 2 cycles:
256 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
257 *
258 * 18 cycles:
259 * math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
260 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
261 *
262 * Same for exp2, log2, rsq, sqrt, sin, cos.
263 */
264 latency = is_haswell ? 14 : 16;
265 break;
266
267 case ELK_SHADER_OPCODE_POW:
268 /* 2 cycles:
269 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
270 *
271 * 26 cycles:
272 * math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
273 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
274 */
275 latency = is_haswell ? 22 : 24;
276 break;
277
278 case ELK_SHADER_OPCODE_TEX:
279 case ELK_SHADER_OPCODE_TXD:
280 case ELK_SHADER_OPCODE_TXF:
281 case ELK_SHADER_OPCODE_TXF_LZ:
282 case ELK_SHADER_OPCODE_TXL:
283 case ELK_SHADER_OPCODE_TXL_LZ:
284 /* 18 cycles:
285 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
286 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
287 * send(8) g4<1>UW g114<8,8,1>F
288 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
289 *
290 * 697 +/-49 cycles (min 610, n=26):
291 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
292 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
293 * send(8) g4<1>UW g114<8,8,1>F
294 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
295 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
296 *
297 * So the latency on our first texture load of the batchbuffer takes
298 * ~700 cycles, since the caches are cold at that point.
299 *
300 * 840 +/- 92 cycles (min 720, n=25):
301 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
302 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
303 * send(8) g4<1>UW g114<8,8,1>F
304 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
305 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
306 * send(8) g4<1>UW g114<8,8,1>F
307 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
308 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
309 *
310 * On the second load, it takes just an extra ~140 cycles, and after
311 * accounting for the 14 cycles of the MOV's latency, that makes ~130.
312 *
313 * 683 +/- 49 cycles (min = 602, n=47):
314 * mov(8) g115<1>F 0F { align1 WE_normal 1Q };
315 * mov(8) g114<1>F 0F { align1 WE_normal 1Q };
316 * send(8) g4<1>UW g114<8,8,1>F
317 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
318 * send(8) g50<1>UW g114<8,8,1>F
319 * sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
320 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
321 *
322 * The unit appears to be pipelined, since this matches up with the
323 * cache-cold case, despite there being two loads here. If you replace
324 * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
325 *
326 * So, take some number between the cache-hot 140 cycles and the
327 * cache-cold 700 cycles. No particular tuning was done on this.
328 *
329 * I haven't done significant testing of the non-TEX opcodes. TXL at
330 * least looked about the same as TEX.
331 */
332 latency = 200;
333 break;
334
335 case ELK_SHADER_OPCODE_TXS:
336 /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
337 * cycles (n=15):
338 * mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
339 * send(8) g6<1>UW g114<8,8,1>F
340 * sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
341 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
342 *
343 *
344 * Two loads was 535 +/- 30 cycles (n=19):
345 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
346 * send(16) g6<1>UW g114<8,8,1>F
347 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
348 * mov(16) g114<1>UD 0D { align1 WE_normal 1H };
349 * mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
350 * send(16) g8<1>UW g114<8,8,1>F
351 * sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
352 * mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
353 * add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
354 *
355 * Since the only caches that should matter are just the
356 * instruction/state cache containing the surface state, assume that we
357 * always have hot caches.
358 */
359 latency = 100;
360 break;
361
362 case ELK_FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GFX4:
363 case ELK_FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
364 case ELK_VS_OPCODE_PULL_CONSTANT_LOAD:
365 /* testing using varying-index pull constants:
366 *
367 * 16 cycles:
368 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
369 * send(8) g4<1>F g4<8,8,1>D
370 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
371 *
372 * ~480 cycles:
373 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
374 * send(8) g4<1>F g4<8,8,1>D
375 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
376 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
377 *
378 * ~620 cycles:
379 * mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
380 * send(8) g4<1>F g4<8,8,1>D
381 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
382 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
383 * send(8) g4<1>F g4<8,8,1>D
384 * data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
385 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
386 *
387 * So, if it's cache-hot, it's about 140. If it's cache cold, it's
388 * about 460. We expect to mostly be cache hot, so pick something more
389 * in that direction.
390 */
391 latency = 200;
392 break;
393
394 case ELK_SHADER_OPCODE_GFX7_SCRATCH_READ:
395 /* Testing a load from offset 0, that had been previously written:
396 *
397 * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
398 * mov(8) null g114<8,8,1>F { align1 WE_normal 1Q };
399 *
400 * The cycles spent seemed to be grouped around 40-50 (as low as 38),
401 * then around 140. Presumably this is cache hit vs miss.
402 */
403 latency = 50;
404 break;
405
406 case ELK_VEC4_OPCODE_UNTYPED_ATOMIC:
407 /* See GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
408 latency = 14000;
409 break;
410
411 case ELK_VEC4_OPCODE_UNTYPED_SURFACE_READ:
412 case ELK_VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
413 /* See also GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ */
414 latency = is_haswell ? 300 : 600;
415 break;
416
417 case ELK_SHADER_OPCODE_SEND:
418 switch (inst->sfid) {
419 case ELK_SFID_SAMPLER: {
420 unsigned msg_type = (inst->desc >> 12) & 0x1f;
421 switch (msg_type) {
422 case GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO:
423 case GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO:
424 /* See also ELK_SHADER_OPCODE_TXS */
425 latency = 100;
426 break;
427
428 default:
429 /* See also ELK_SHADER_OPCODE_TEX */
430 latency = 200;
431 break;
432 }
433 break;
434 }
435
436 case GFX6_SFID_DATAPORT_CONSTANT_CACHE:
437 /* See ELK_FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD */
438 latency = 200;
439 break;
440
441 case GFX6_SFID_DATAPORT_RENDER_CACHE:
442 switch (elk_fb_desc_msg_type(isa->devinfo, inst->desc)) {
443 case GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE:
444 case GFX7_DATAPORT_RC_TYPED_SURFACE_READ:
445 /* See also ELK_SHADER_OPCODE_TYPED_SURFACE_READ */
446 assert(!is_haswell);
447 latency = 600;
448 break;
449
450 case GFX7_DATAPORT_RC_TYPED_ATOMIC_OP:
451 /* See also ELK_SHADER_OPCODE_TYPED_ATOMIC */
452 assert(!is_haswell);
453 latency = 14000;
454 break;
455
456 case GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE:
457 /* completely fabricated number */
458 latency = 600;
459 break;
460
461 default:
462 unreachable("Unknown render cache message");
463 }
464 break;
465
466 case GFX7_SFID_DATAPORT_DATA_CACHE:
467 switch ((inst->desc >> 14) & 0x1f) {
468 case ELK_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ:
469 case GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ:
470 case GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE:
471 /* We have no data for this but assume it's a little faster than
472 * untyped surface read/write.
473 */
474 latency = 200;
475 break;
476
477 case GFX7_DATAPORT_DC_DWORD_SCATTERED_READ:
478 case GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE:
479 case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ:
480 case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE:
481 /* We have no data for this but assume it's roughly the same as
482 * untyped surface read/write.
483 */
484 latency = 300;
485 break;
486
487 case GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ:
488 case GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE:
489 /* Test code:
490 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
491 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
492 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
493 * send(8) g4<1>UD g112<8,8,1>UD
494 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
495 * .
496 * . [repeats 8 times]
497 * .
498 * mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
499 * mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
500 * mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
501 * send(8) g4<1>UD g112<8,8,1>UD
502 * data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
503 *
504 * Running it 100 times as fragment shader on a 128x128 quad
505 * gives an average latency of 583 cycles per surface read,
506 * standard deviation 0.9%.
507 */
508 assert(!is_haswell);
509 latency = 600;
510 break;
511
512 case GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP:
513 /* Test code:
514 * mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
515 * mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
516 * mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
517 * send(8) g4<1>ud g112<8,8,1>ud
518 * data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
519 *
520 * Running it 100 times as fragment shader on a 128x128 quad
521 * gives an average latency of 13867 cycles per atomic op,
522 * standard deviation 3%. Note that this is a rather
523 * pessimistic estimate, the actual latency in cases with few
524 * collisions between threads and favorable pipelining has been
525 * seen to be reduced by a factor of 100.
526 */
527 assert(!is_haswell);
528 latency = 14000;
529 break;
530
531 default:
532 unreachable("Unknown data cache message");
533 }
534 break;
535
536 case HSW_SFID_DATAPORT_DATA_CACHE_1:
537 switch (elk_dp_desc_msg_type(isa->devinfo, inst->desc)) {
538 case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ:
539 case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE:
540 case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ:
541 case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE:
542 case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE:
543 case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ:
544 case GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE:
545 case GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ:
546 case GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ:
547 case GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE:
548 /* See also GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ */
549 latency = 300;
550 break;
551
552 case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP:
553 case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2:
554 case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2:
555 case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP:
556 case GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP:
557 case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP:
558 case GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP:
559 case GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP:
560 case GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP:
561 /* See also GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
562 latency = 14000;
563 break;
564
565 default:
566 unreachable("Unknown data cache message");
567 }
568 break;
569
570 case GFX7_SFID_PIXEL_INTERPOLATOR:
571 latency = 50; /* TODO */
572 break;
573
574 case GFX12_SFID_UGM:
575 case GFX12_SFID_TGM:
576 case GFX12_SFID_SLM:
577 switch (lsc_msg_desc_opcode(isa->devinfo, inst->desc)) {
578 case LSC_OP_LOAD:
579 case LSC_OP_STORE:
580 case LSC_OP_LOAD_CMASK:
581 case LSC_OP_STORE_CMASK:
582 latency = 300;
583 break;
584 case LSC_OP_FENCE:
585 case LSC_OP_ATOMIC_INC:
586 case LSC_OP_ATOMIC_DEC:
587 case LSC_OP_ATOMIC_LOAD:
588 case LSC_OP_ATOMIC_STORE:
589 case LSC_OP_ATOMIC_ADD:
590 case LSC_OP_ATOMIC_SUB:
591 case LSC_OP_ATOMIC_MIN:
592 case LSC_OP_ATOMIC_MAX:
593 case LSC_OP_ATOMIC_UMIN:
594 case LSC_OP_ATOMIC_UMAX:
595 case LSC_OP_ATOMIC_CMPXCHG:
596 case LSC_OP_ATOMIC_FADD:
597 case LSC_OP_ATOMIC_FSUB:
598 case LSC_OP_ATOMIC_FMIN:
599 case LSC_OP_ATOMIC_FMAX:
600 case LSC_OP_ATOMIC_FCMPXCHG:
601 case LSC_OP_ATOMIC_AND:
602 case LSC_OP_ATOMIC_OR:
603 case LSC_OP_ATOMIC_XOR:
604 latency = 1400;
605 break;
606 default:
607 unreachable("unsupported new data port message instruction");
608 }
609 break;
610
611 case ELK_SFID_URB:
612 latency = 200;
613 break;
614
615 default:
616 unreachable("Unknown SFID");
617 }
618 break;
619
620 case ELK_OPCODE_DPAS:
621 switch (inst->rcount) {
622 case 1:
623 latency = 21;
624 break;
625 case 2:
626 latency = 22;
627 break;
628 case 8:
629 default:
630 latency = 32;
631 break;
632 }
633 break;
634
635 default:
636 /* 2 cycles:
637 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
638 *
639 * 16 cycles:
640 * mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
641 * mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
642 */
643 latency = 14;
644 break;
645 }
646 }
647
648 class elk_instruction_scheduler {
649 public:
elk_instruction_scheduler(void * mem_ctx,const elk_backend_shader * s,int grf_count,int grf_write_scale,bool post_reg_alloc)650 elk_instruction_scheduler(void *mem_ctx, const elk_backend_shader *s, int grf_count,
651 int grf_write_scale, bool post_reg_alloc):
652 bs(s)
653 {
654 this->mem_ctx = mem_ctx;
655 this->lin_ctx = linear_context(this->mem_ctx);
656 this->grf_count = grf_count;
657 this->post_reg_alloc = post_reg_alloc;
658
659 this->last_grf_write = linear_zalloc_array(lin_ctx, elk_schedule_node *, grf_count * grf_write_scale);
660
661 this->nodes_len = s->cfg->last_block()->end_ip + 1;
662 this->nodes = linear_zalloc_array(lin_ctx, elk_schedule_node, this->nodes_len);
663
664 const struct intel_device_info *devinfo = bs->devinfo;
665 const struct elk_isa_info *isa = &bs->compiler->isa;
666
667 elk_schedule_node *n = nodes;
668 foreach_block_and_inst(block, elk_backend_instruction, inst, s->cfg) {
669 n->inst = inst;
670
671 /* We can't measure Gfx6 timings directly but expect them to be much
672 * closer to Gfx7 than Gfx4.
673 */
674 if (!post_reg_alloc)
675 n->latency = 1;
676 else if (devinfo->ver >= 6)
677 n->set_latency_gfx7(isa);
678 else
679 n->set_latency_gfx4();
680
681 n++;
682 }
683 assert(n == nodes + nodes_len);
684
685 current.block = NULL;
686 current.start = NULL;
687 current.end = NULL;
688 current.len = 0;
689 current.time = 0;
690 current.cand_generation = 0;
691 current.available.make_empty();
692 }
693
694 void add_barrier_deps(elk_schedule_node *n);
695 void add_cross_lane_deps(elk_schedule_node *n);
696 void add_dep(elk_schedule_node *before, elk_schedule_node *after, int latency);
697 void add_dep(elk_schedule_node *before, elk_schedule_node *after);
698
699 void set_current_block(elk_bblock_t *block);
700 void compute_delays();
701 void compute_exits();
702
703 void schedule(elk_schedule_node *chosen);
704 void update_children(elk_schedule_node *chosen);
705
706 void *mem_ctx;
707 linear_ctx *lin_ctx;
708
709 elk_schedule_node *nodes;
710 int nodes_len;
711
712 /* Current block being processed. */
713 struct {
714 elk_bblock_t *block;
715
716 /* Range of nodes in the block. End will point to first node
717 * address after the block, i.e. the range is [start, end).
718 */
719 elk_schedule_node *start;
720 elk_schedule_node *end;
721 int len;
722
723 int scheduled;
724
725 unsigned cand_generation;
726 int time;
727 exec_list available;
728 } current;
729
730 bool post_reg_alloc;
731 int grf_count;
732 const elk_backend_shader *bs;
733
734 /**
735 * Last instruction to have written the grf (or a channel in the grf, for the
736 * scalar backend)
737 */
738 elk_schedule_node **last_grf_write;
739 };
740
741 class elk_fs_instruction_scheduler : public elk_instruction_scheduler
742 {
743 public:
744 elk_fs_instruction_scheduler(void *mem_ctx, const elk_fs_visitor *v, int grf_count, int hw_reg_count,
745 int block_count, bool post_reg_alloc);
746 void calculate_deps();
747 bool is_compressed(const elk_fs_inst *inst);
748 elk_schedule_node *choose_instruction_to_schedule();
749 int calculate_issue_time(elk_backend_instruction *inst);
750
751 void count_reads_remaining(elk_backend_instruction *inst);
752 void setup_liveness(elk_cfg_t *cfg);
753 void update_register_pressure(elk_backend_instruction *inst);
754 int get_register_pressure_benefit(elk_backend_instruction *inst);
755 void clear_last_grf_write();
756
757 void schedule_instructions();
758 void run(instruction_scheduler_mode mode);
759
760 const elk_fs_visitor *v;
761 unsigned hw_reg_count;
762 int reg_pressure;
763 instruction_scheduler_mode mode;
764
765 /*
766 * The register pressure at the beginning of each basic block.
767 */
768
769 int *reg_pressure_in;
770
771 /*
772 * The virtual GRF's whose range overlaps the beginning of each basic block.
773 */
774
775 BITSET_WORD **livein;
776
777 /*
778 * The virtual GRF's whose range overlaps the end of each basic block.
779 */
780
781 BITSET_WORD **liveout;
782
783 /*
784 * The hardware GRF's whose range overlaps the end of each basic block.
785 */
786
787 BITSET_WORD **hw_liveout;
788
789 /*
790 * Whether we've scheduled a write for this virtual GRF yet.
791 */
792
793 bool *written;
794
795 /*
796 * How many reads we haven't scheduled for this virtual GRF yet.
797 */
798
799 int *reads_remaining;
800
801 /*
802 * How many reads we haven't scheduled for this hardware GRF yet.
803 */
804
805 int *hw_reads_remaining;
806
807 };
808
elk_fs_instruction_scheduler(void * mem_ctx,const elk_fs_visitor * v,int grf_count,int hw_reg_count,int block_count,bool post_reg_alloc)809 elk_fs_instruction_scheduler::elk_fs_instruction_scheduler(void *mem_ctx, const elk_fs_visitor *v,
810 int grf_count, int hw_reg_count,
811 int block_count, bool post_reg_alloc)
812 : elk_instruction_scheduler(mem_ctx, v, grf_count, /* grf_write_scale */ 16,
813 post_reg_alloc),
814 v(v)
815 {
816 this->hw_reg_count = hw_reg_count;
817 this->mode = SCHEDULE_NONE;
818 this->reg_pressure = 0;
819
820 if (!post_reg_alloc) {
821 this->reg_pressure_in = linear_zalloc_array(lin_ctx, int, block_count);
822
823 this->livein = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
824 for (int i = 0; i < block_count; i++)
825 this->livein[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
826 BITSET_WORDS(grf_count));
827
828 this->liveout = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
829 for (int i = 0; i < block_count; i++)
830 this->liveout[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
831 BITSET_WORDS(grf_count));
832
833 this->hw_liveout = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
834 for (int i = 0; i < block_count; i++)
835 this->hw_liveout[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
836 BITSET_WORDS(hw_reg_count));
837
838 setup_liveness(v->cfg);
839
840 this->written = linear_alloc_array(lin_ctx, bool, grf_count);
841
842 this->reads_remaining = linear_alloc_array(lin_ctx, int, grf_count);
843
844 this->hw_reads_remaining = linear_alloc_array(lin_ctx, int, hw_reg_count);
845 } else {
846 this->reg_pressure_in = NULL;
847 this->livein = NULL;
848 this->liveout = NULL;
849 this->hw_liveout = NULL;
850 this->written = NULL;
851 this->reads_remaining = NULL;
852 this->hw_reads_remaining = NULL;
853 }
854
855 foreach_block(block, v->cfg) {
856 set_current_block(block);
857
858 for (elk_schedule_node *n = current.start; n < current.end; n++)
859 n->issue_time = calculate_issue_time(n->inst);
860
861 calculate_deps();
862 compute_delays();
863 compute_exits();
864 }
865 }
866
867 static bool
is_src_duplicate(elk_fs_inst * inst,int src)868 is_src_duplicate(elk_fs_inst *inst, int src)
869 {
870 for (int i = 0; i < src; i++)
871 if (inst->src[i].equals(inst->src[src]))
872 return true;
873
874 return false;
875 }
876
877 void
count_reads_remaining(elk_backend_instruction * be)878 elk_fs_instruction_scheduler::count_reads_remaining(elk_backend_instruction *be)
879 {
880 assert(reads_remaining);
881
882 elk_fs_inst *inst = (elk_fs_inst *)be;
883
884 for (int i = 0; i < inst->sources; i++) {
885 if (is_src_duplicate(inst, i))
886 continue;
887
888 if (inst->src[i].file == VGRF) {
889 reads_remaining[inst->src[i].nr]++;
890 } else if (inst->src[i].file == FIXED_GRF) {
891 if (inst->src[i].nr >= hw_reg_count)
892 continue;
893
894 for (unsigned j = 0; j < regs_read(inst, i); j++)
895 hw_reads_remaining[inst->src[i].nr + j]++;
896 }
897 }
898 }
899
900 void
setup_liveness(elk_cfg_t * cfg)901 elk_fs_instruction_scheduler::setup_liveness(elk_cfg_t *cfg)
902 {
903 const fs_live_variables &live = v->live_analysis.require();
904
905 /* First, compute liveness on a per-GRF level using the in/out sets from
906 * liveness calculation.
907 */
908 for (int block = 0; block < cfg->num_blocks; block++) {
909 for (int i = 0; i < live.num_vars; i++) {
910 if (BITSET_TEST(live.block_data[block].livein, i)) {
911 int vgrf = live.vgrf_from_var[i];
912 if (!BITSET_TEST(livein[block], vgrf)) {
913 reg_pressure_in[block] += v->alloc.sizes[vgrf];
914 BITSET_SET(livein[block], vgrf);
915 }
916 }
917
918 if (BITSET_TEST(live.block_data[block].liveout, i))
919 BITSET_SET(liveout[block], live.vgrf_from_var[i]);
920 }
921 }
922
923 /* Now, extend the live in/live out sets for when a range crosses a block
924 * boundary, which matches what our register allocator/interference code
925 * does to account for force_writemask_all and incompatible exec_mask's.
926 */
927 for (int block = 0; block < cfg->num_blocks - 1; block++) {
928 for (int i = 0; i < grf_count; i++) {
929 if (live.vgrf_start[i] <= cfg->blocks[block]->end_ip &&
930 live.vgrf_end[i] >= cfg->blocks[block + 1]->start_ip) {
931 if (!BITSET_TEST(livein[block + 1], i)) {
932 reg_pressure_in[block + 1] += v->alloc.sizes[i];
933 BITSET_SET(livein[block + 1], i);
934 }
935
936 BITSET_SET(liveout[block], i);
937 }
938 }
939 }
940
941 int payload_last_use_ip[hw_reg_count];
942 v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
943
944 for (unsigned i = 0; i < hw_reg_count; i++) {
945 if (payload_last_use_ip[i] == -1)
946 continue;
947
948 for (int block = 0; block < cfg->num_blocks; block++) {
949 if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
950 reg_pressure_in[block]++;
951
952 if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
953 BITSET_SET(hw_liveout[block], i);
954 }
955 }
956 }
957
958 void
update_register_pressure(elk_backend_instruction * be)959 elk_fs_instruction_scheduler::update_register_pressure(elk_backend_instruction *be)
960 {
961 assert(reads_remaining);
962
963 elk_fs_inst *inst = (elk_fs_inst *)be;
964
965 if (inst->dst.file == VGRF) {
966 written[inst->dst.nr] = true;
967 }
968
969 for (int i = 0; i < inst->sources; i++) {
970 if (is_src_duplicate(inst, i))
971 continue;
972
973 if (inst->src[i].file == VGRF) {
974 reads_remaining[inst->src[i].nr]--;
975 } else if (inst->src[i].file == FIXED_GRF &&
976 inst->src[i].nr < hw_reg_count) {
977 for (unsigned off = 0; off < regs_read(inst, i); off++)
978 hw_reads_remaining[inst->src[i].nr + off]--;
979 }
980 }
981 }
982
983 int
get_register_pressure_benefit(elk_backend_instruction * be)984 elk_fs_instruction_scheduler::get_register_pressure_benefit(elk_backend_instruction *be)
985 {
986 elk_fs_inst *inst = (elk_fs_inst *)be;
987 int benefit = 0;
988 const int block_idx = current.block->num;
989
990 if (inst->dst.file == VGRF) {
991 if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
992 !written[inst->dst.nr])
993 benefit -= v->alloc.sizes[inst->dst.nr];
994 }
995
996 for (int i = 0; i < inst->sources; i++) {
997 if (is_src_duplicate(inst, i))
998 continue;
999
1000 if (inst->src[i].file == VGRF &&
1001 !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
1002 reads_remaining[inst->src[i].nr] == 1)
1003 benefit += v->alloc.sizes[inst->src[i].nr];
1004
1005 if (inst->src[i].file == FIXED_GRF &&
1006 inst->src[i].nr < hw_reg_count) {
1007 for (unsigned off = 0; off < regs_read(inst, i); off++) {
1008 int reg = inst->src[i].nr + off;
1009 if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
1010 hw_reads_remaining[reg] == 1) {
1011 benefit++;
1012 }
1013 }
1014 }
1015 }
1016
1017 return benefit;
1018 }
1019
1020 class elk_vec4_instruction_scheduler : public elk_instruction_scheduler
1021 {
1022 public:
1023 elk_vec4_instruction_scheduler(void *mem_ctx, const vec4_visitor *v, int grf_count);
1024 void calculate_deps();
1025 elk_schedule_node *choose_instruction_to_schedule();
1026 const vec4_visitor *v;
1027
1028 void run();
1029 };
1030
elk_vec4_instruction_scheduler(void * mem_ctx,const vec4_visitor * v,int grf_count)1031 elk_vec4_instruction_scheduler::elk_vec4_instruction_scheduler(void *mem_ctx, const vec4_visitor *v,
1032 int grf_count)
1033 : elk_instruction_scheduler(mem_ctx, v, grf_count, /* grf_write_scale */ 1,
1034 /* post_reg_alloc */ true),
1035 v(v)
1036 {
1037 }
1038
1039 void
set_current_block(elk_bblock_t * block)1040 elk_instruction_scheduler::set_current_block(elk_bblock_t *block)
1041 {
1042 current.block = block;
1043 current.start = nodes + block->start_ip;
1044 current.len = block->end_ip - block->start_ip + 1;
1045 current.end = current.start + current.len;
1046 current.time = 0;
1047 current.scheduled = 0;
1048 current.cand_generation = 1;
1049 }
1050
1051 /** Computation of the delay member of each node. */
1052 void
compute_delays()1053 elk_instruction_scheduler::compute_delays()
1054 {
1055 for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1056 if (!n->children_count) {
1057 n->delay = n->issue_time;
1058 } else {
1059 for (int i = 0; i < n->children_count; i++) {
1060 assert(n->children[i].n->delay);
1061 n->delay = MAX2(n->delay, n->latency + n->children[i].n->delay);
1062 }
1063 }
1064 }
1065 }
1066
1067 void
compute_exits()1068 elk_instruction_scheduler::compute_exits()
1069 {
1070 /* Calculate a lower bound of the scheduling time of each node in the
1071 * graph. This is analogous to the node's critical path but calculated
1072 * from the top instead of from the bottom of the block.
1073 */
1074 for (elk_schedule_node *n = current.start; n < current.end; n++) {
1075 for (int i = 0; i < n->children_count; i++) {
1076 elk_schedule_node_child *child = &n->children[i];
1077 child->n->initial_unblocked_time =
1078 MAX2(child->n->initial_unblocked_time,
1079 n->initial_unblocked_time + n->issue_time + child->effective_latency);
1080 }
1081 }
1082
1083 /* Calculate the exit of each node by induction based on the exit nodes of
1084 * its children. The preferred exit of a node is the one among the exit
1085 * nodes of its children which can be unblocked first according to the
1086 * optimistic unblocked time estimate calculated above.
1087 */
1088 for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1089 n->exit = (n->inst->opcode == ELK_OPCODE_HALT ? n : NULL);
1090
1091 for (int i = 0; i < n->children_count; i++) {
1092 if (exit_initial_unblocked_time(n->children[i].n) < exit_initial_unblocked_time(n))
1093 n->exit = n->children[i].n->exit;
1094 }
1095 }
1096 }
1097
1098 /**
1099 * Add a dependency between two instruction nodes.
1100 *
1101 * The @after node will be scheduled after @before. We will try to
1102 * schedule it @latency cycles after @before, but no guarantees there.
1103 */
1104 void
add_dep(elk_schedule_node * before,elk_schedule_node * after,int latency)1105 elk_instruction_scheduler::add_dep(elk_schedule_node *before, elk_schedule_node *after,
1106 int latency)
1107 {
1108 if (!before || !after)
1109 return;
1110
1111 assert(before != after);
1112
1113 for (int i = 0; i < before->children_count; i++) {
1114 elk_schedule_node_child *child = &before->children[i];
1115 if (child->n == after) {
1116 child->effective_latency = MAX2(child->effective_latency, latency);
1117 return;
1118 }
1119 }
1120
1121 if (before->children_cap <= before->children_count) {
1122 if (before->children_cap < 16)
1123 before->children_cap = 16;
1124 else
1125 before->children_cap *= 2;
1126
1127 before->children = reralloc(mem_ctx, before->children,
1128 elk_schedule_node_child,
1129 before->children_cap);
1130 }
1131
1132 elk_schedule_node_child *child = &before->children[before->children_count];
1133 child->n = after;
1134 child->effective_latency = latency;
1135 before->children_count++;
1136 after->initial_parent_count++;
1137 }
1138
1139 void
add_dep(elk_schedule_node * before,elk_schedule_node * after)1140 elk_instruction_scheduler::add_dep(elk_schedule_node *before, elk_schedule_node *after)
1141 {
1142 if (!before)
1143 return;
1144
1145 add_dep(before, after, before->latency);
1146 }
1147
1148 static bool
is_scheduling_barrier(const elk_backend_instruction * inst)1149 is_scheduling_barrier(const elk_backend_instruction *inst)
1150 {
1151 return inst->opcode == ELK_SHADER_OPCODE_HALT_TARGET ||
1152 inst->is_control_flow() ||
1153 inst->has_side_effects();
1154 }
1155
1156 static bool
has_cross_lane_access(const elk_fs_inst * inst)1157 has_cross_lane_access(const elk_fs_inst *inst)
1158 {
1159 /* FINISHME:
1160 *
1161 * This function is likely incomplete in terms of identify cross lane
1162 * accesses.
1163 */
1164 if (inst->opcode == ELK_SHADER_OPCODE_BROADCAST ||
1165 inst->opcode == ELK_SHADER_OPCODE_READ_SR_REG ||
1166 inst->opcode == ELK_SHADER_OPCODE_CLUSTER_BROADCAST ||
1167 inst->opcode == ELK_SHADER_OPCODE_SHUFFLE ||
1168 inst->opcode == ELK_FS_OPCODE_LOAD_LIVE_CHANNELS ||
1169 inst->opcode == ELK_SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL ||
1170 inst->opcode == ELK_SHADER_OPCODE_FIND_LIVE_CHANNEL)
1171 return true;
1172
1173 for (unsigned s = 0; s < inst->sources; s++) {
1174 if (inst->src[s].file == VGRF) {
1175 if (inst->src[s].stride == 0)
1176 return true;
1177 }
1178 }
1179
1180 return false;
1181 }
1182
1183 /**
1184 * Sometimes we really want this node to execute after everything that
1185 * was before it and before everything that followed it. This adds
1186 * the deps to do so.
1187 */
1188 void
add_barrier_deps(elk_schedule_node * n)1189 elk_instruction_scheduler::add_barrier_deps(elk_schedule_node *n)
1190 {
1191 for (elk_schedule_node *prev = n - 1; prev >= current.start; prev--) {
1192 add_dep(prev, n, 0);
1193 if (is_scheduling_barrier(prev->inst))
1194 break;
1195 }
1196
1197 for (elk_schedule_node *next = n + 1; next < current.end; next++) {
1198 add_dep(n, next, 0);
1199 if (is_scheduling_barrier(next->inst))
1200 break;
1201 }
1202 }
1203
1204 /**
1205 * Because some instructions like HALT can disable lanes, scheduling prior to
1206 * a cross lane access should not be allowed, otherwise we could end up with
1207 * later instructions accessing uninitialized data.
1208 */
1209 void
add_cross_lane_deps(elk_schedule_node * n)1210 elk_instruction_scheduler::add_cross_lane_deps(elk_schedule_node *n)
1211 {
1212 for (elk_schedule_node *prev = n - 1; prev >= current.start; prev--) {
1213 if (has_cross_lane_access((elk_fs_inst*)prev->inst))
1214 add_dep(prev, n, 0);
1215 }
1216 }
1217
1218 /* instruction scheduling needs to be aware of when an MRF write
1219 * actually writes 2 MRFs.
1220 */
1221 bool
is_compressed(const elk_fs_inst * inst)1222 elk_fs_instruction_scheduler::is_compressed(const elk_fs_inst *inst)
1223 {
1224 return inst->exec_size == 16;
1225 }
1226
1227 /* Clears last_grf_write to be ready to start calculating deps for a block
1228 * again.
1229 *
1230 * Since pre-ra grf_count scales with instructions, and instructions scale with
1231 * BBs, we don't want to memset all of last_grf_write per block or you'll end up
1232 * O(n^2) with number of blocks. For shaders using softfp64, we get a *lot* of
1233 * blocks.
1234 *
1235 * We don't bother being careful for post-ra, since then grf_count doesn't scale
1236 * with instructions.
1237 */
1238 void
clear_last_grf_write()1239 elk_fs_instruction_scheduler::clear_last_grf_write()
1240 {
1241 if (!post_reg_alloc) {
1242 for (elk_schedule_node *n = current.start; n < current.end; n++) {
1243 elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1244
1245 if (inst->dst.file == VGRF) {
1246 /* Don't bother being careful with regs_written(), quicker to just clear 2 cachelines. */
1247 memset(&last_grf_write[inst->dst.nr * 16], 0, sizeof(*last_grf_write) * 16);
1248 }
1249 }
1250 } else {
1251 memset(last_grf_write, 0, sizeof(*last_grf_write) * grf_count * 16);
1252 }
1253 }
1254
1255 void
calculate_deps()1256 elk_fs_instruction_scheduler::calculate_deps()
1257 {
1258 /* Pre-register-allocation, this tracks the last write per VGRF offset.
1259 * After register allocation, reg_offsets are gone and we track individual
1260 * GRF registers.
1261 */
1262 elk_schedule_node *last_mrf_write[ELK_MAX_MRF(v->devinfo->ver)];
1263 elk_schedule_node *last_conditional_mod[8] = {};
1264 elk_schedule_node *last_accumulator_write = NULL;
1265 /* Fixed HW registers are assumed to be separate from the virtual
1266 * GRFs, so they can be tracked separately. We don't really write
1267 * to fixed GRFs much, so don't bother tracking them on a more
1268 * granular level.
1269 */
1270 elk_schedule_node *last_fixed_grf_write = NULL;
1271
1272 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1273
1274 /* top-to-bottom dependencies: RAW and WAW. */
1275 for (elk_schedule_node *n = current.start; n < current.end; n++) {
1276 elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1277
1278 if (is_scheduling_barrier(inst))
1279 add_barrier_deps(n);
1280
1281 if (inst->opcode == ELK_OPCODE_HALT ||
1282 inst->opcode == ELK_SHADER_OPCODE_HALT_TARGET)
1283 add_cross_lane_deps(n);
1284
1285 /* read-after-write deps. */
1286 for (int i = 0; i < inst->sources; i++) {
1287 if (inst->src[i].file == VGRF) {
1288 if (post_reg_alloc) {
1289 for (unsigned r = 0; r < regs_read(inst, i); r++)
1290 add_dep(last_grf_write[inst->src[i].nr + r], n);
1291 } else {
1292 for (unsigned r = 0; r < regs_read(inst, i); r++) {
1293 add_dep(last_grf_write[inst->src[i].nr * 16 +
1294 inst->src[i].offset / REG_SIZE + r], n);
1295 }
1296 }
1297 } else if (inst->src[i].file == FIXED_GRF) {
1298 if (post_reg_alloc) {
1299 for (unsigned r = 0; r < regs_read(inst, i); r++)
1300 add_dep(last_grf_write[inst->src[i].nr + r], n);
1301 } else {
1302 add_dep(last_fixed_grf_write, n);
1303 }
1304 } else if (inst->src[i].is_accumulator()) {
1305 add_dep(last_accumulator_write, n);
1306 } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1307 add_barrier_deps(n);
1308 }
1309 }
1310
1311 if (inst->base_mrf != -1) {
1312 for (int i = 0; i < inst->mlen; i++) {
1313 /* It looks like the MRF regs are released in the send
1314 * instruction once it's sent, not when the result comes
1315 * back.
1316 */
1317 add_dep(last_mrf_write[inst->base_mrf + i], n);
1318 }
1319 }
1320
1321 if (const unsigned mask = inst->flags_read(v->devinfo)) {
1322 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1323
1324 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1325 if (mask & (1 << i))
1326 add_dep(last_conditional_mod[i], n);
1327 }
1328 }
1329
1330 if (inst->reads_accumulator_implicitly()) {
1331 add_dep(last_accumulator_write, n);
1332 }
1333
1334 /* write-after-write deps. */
1335 if (inst->dst.file == VGRF) {
1336 if (post_reg_alloc) {
1337 for (unsigned r = 0; r < regs_written(inst); r++) {
1338 add_dep(last_grf_write[inst->dst.nr + r], n);
1339 last_grf_write[inst->dst.nr + r] = n;
1340 }
1341 } else {
1342 for (unsigned r = 0; r < regs_written(inst); r++) {
1343 add_dep(last_grf_write[inst->dst.nr * 16 +
1344 inst->dst.offset / REG_SIZE + r], n);
1345 last_grf_write[inst->dst.nr * 16 +
1346 inst->dst.offset / REG_SIZE + r] = n;
1347 }
1348 }
1349 } else if (inst->dst.file == MRF) {
1350 int reg = inst->dst.nr & ~ELK_MRF_COMPR4;
1351
1352 add_dep(last_mrf_write[reg], n);
1353 last_mrf_write[reg] = n;
1354 if (is_compressed(inst)) {
1355 if (inst->dst.nr & ELK_MRF_COMPR4)
1356 reg += 4;
1357 else
1358 reg++;
1359 add_dep(last_mrf_write[reg], n);
1360 last_mrf_write[reg] = n;
1361 }
1362 } else if (inst->dst.file == FIXED_GRF) {
1363 if (post_reg_alloc) {
1364 for (unsigned r = 0; r < regs_written(inst); r++) {
1365 add_dep(last_grf_write[inst->dst.nr + r], n);
1366 last_grf_write[inst->dst.nr + r] = n;
1367 }
1368 } else {
1369 add_dep(last_fixed_grf_write, n);
1370 last_fixed_grf_write = n;
1371 }
1372 } else if (inst->dst.is_accumulator()) {
1373 add_dep(last_accumulator_write, n);
1374 last_accumulator_write = n;
1375 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1376 add_barrier_deps(n);
1377 }
1378
1379 if (inst->mlen > 0 && inst->base_mrf != -1) {
1380 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1381 add_dep(last_mrf_write[inst->base_mrf + i], n);
1382 last_mrf_write[inst->base_mrf + i] = n;
1383 }
1384 }
1385
1386 if (const unsigned mask = inst->flags_written(v->devinfo)) {
1387 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1388
1389 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1390 if (mask & (1 << i)) {
1391 add_dep(last_conditional_mod[i], n, 0);
1392 last_conditional_mod[i] = n;
1393 }
1394 }
1395 }
1396
1397 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1398 !inst->dst.is_accumulator()) {
1399 add_dep(last_accumulator_write, n);
1400 last_accumulator_write = n;
1401 }
1402 }
1403
1404 clear_last_grf_write();
1405
1406 /* bottom-to-top dependencies: WAR */
1407 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1408 memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1409 last_accumulator_write = NULL;
1410 last_fixed_grf_write = NULL;
1411
1412 for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1413 elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1414
1415 /* write-after-read deps. */
1416 for (int i = 0; i < inst->sources; i++) {
1417 if (inst->src[i].file == VGRF) {
1418 if (post_reg_alloc) {
1419 for (unsigned r = 0; r < regs_read(inst, i); r++)
1420 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1421 } else {
1422 for (unsigned r = 0; r < regs_read(inst, i); r++) {
1423 add_dep(n, last_grf_write[inst->src[i].nr * 16 +
1424 inst->src[i].offset / REG_SIZE + r], 0);
1425 }
1426 }
1427 } else if (inst->src[i].file == FIXED_GRF) {
1428 if (post_reg_alloc) {
1429 for (unsigned r = 0; r < regs_read(inst, i); r++)
1430 add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1431 } else {
1432 add_dep(n, last_fixed_grf_write, 0);
1433 }
1434 } else if (inst->src[i].is_accumulator()) {
1435 add_dep(n, last_accumulator_write, 0);
1436 } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1437 add_barrier_deps(n);
1438 }
1439 }
1440
1441 if (inst->base_mrf != -1) {
1442 for (int i = 0; i < inst->mlen; i++) {
1443 /* It looks like the MRF regs are released in the send
1444 * instruction once it's sent, not when the result comes
1445 * back.
1446 */
1447 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1448 }
1449 }
1450
1451 if (const unsigned mask = inst->flags_read(v->devinfo)) {
1452 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1453
1454 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1455 if (mask & (1 << i))
1456 add_dep(n, last_conditional_mod[i]);
1457 }
1458 }
1459
1460 if (inst->reads_accumulator_implicitly()) {
1461 add_dep(n, last_accumulator_write);
1462 }
1463
1464 /* Update the things this instruction wrote, so earlier reads
1465 * can mark this as WAR dependency.
1466 */
1467 if (inst->dst.file == VGRF) {
1468 if (post_reg_alloc) {
1469 for (unsigned r = 0; r < regs_written(inst); r++)
1470 last_grf_write[inst->dst.nr + r] = n;
1471 } else {
1472 for (unsigned r = 0; r < regs_written(inst); r++) {
1473 last_grf_write[inst->dst.nr * 16 +
1474 inst->dst.offset / REG_SIZE + r] = n;
1475 }
1476 }
1477 } else if (inst->dst.file == MRF) {
1478 int reg = inst->dst.nr & ~ELK_MRF_COMPR4;
1479
1480 last_mrf_write[reg] = n;
1481
1482 if (is_compressed(inst)) {
1483 if (inst->dst.nr & ELK_MRF_COMPR4)
1484 reg += 4;
1485 else
1486 reg++;
1487
1488 last_mrf_write[reg] = n;
1489 }
1490 } else if (inst->dst.file == FIXED_GRF) {
1491 if (post_reg_alloc) {
1492 for (unsigned r = 0; r < regs_written(inst); r++)
1493 last_grf_write[inst->dst.nr + r] = n;
1494 } else {
1495 last_fixed_grf_write = n;
1496 }
1497 } else if (inst->dst.is_accumulator()) {
1498 last_accumulator_write = n;
1499 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1500 add_barrier_deps(n);
1501 }
1502
1503 if (inst->mlen > 0 && inst->base_mrf != -1) {
1504 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1505 last_mrf_write[inst->base_mrf + i] = n;
1506 }
1507 }
1508
1509 if (const unsigned mask = inst->flags_written(v->devinfo)) {
1510 assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1511
1512 for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1513 if (mask & (1 << i))
1514 last_conditional_mod[i] = n;
1515 }
1516 }
1517
1518 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1519 last_accumulator_write = n;
1520 }
1521 }
1522
1523 clear_last_grf_write();
1524 }
1525
1526 void
calculate_deps()1527 elk_vec4_instruction_scheduler::calculate_deps()
1528 {
1529 elk_schedule_node *last_mrf_write[ELK_MAX_MRF(v->devinfo->ver)];
1530 elk_schedule_node *last_conditional_mod = NULL;
1531 elk_schedule_node *last_accumulator_write = NULL;
1532 /* Fixed HW registers are assumed to be separate from the virtual
1533 * GRFs, so they can be tracked separately. We don't really write
1534 * to fixed GRFs much, so don't bother tracking them on a more
1535 * granular level.
1536 */
1537 elk_schedule_node *last_fixed_grf_write = NULL;
1538
1539 memset(last_grf_write, 0, grf_count * sizeof(*last_grf_write));
1540 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1541
1542 /* top-to-bottom dependencies: RAW and WAW. */
1543 for (elk_schedule_node *n = current.start; n < current.end; n++) {
1544 vec4_instruction *inst = (vec4_instruction *)n->inst;
1545
1546 if (is_scheduling_barrier(inst))
1547 add_barrier_deps(n);
1548
1549 /* read-after-write deps. */
1550 for (int i = 0; i < 3; i++) {
1551 if (inst->src[i].file == VGRF) {
1552 for (unsigned j = 0; j < regs_read(inst, i); ++j)
1553 add_dep(last_grf_write[inst->src[i].nr + j], n);
1554 } else if (inst->src[i].file == FIXED_GRF) {
1555 add_dep(last_fixed_grf_write, n);
1556 } else if (inst->src[i].is_accumulator()) {
1557 assert(last_accumulator_write);
1558 add_dep(last_accumulator_write, n);
1559 } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1560 add_barrier_deps(n);
1561 }
1562 }
1563
1564 if (inst->reads_g0_implicitly())
1565 add_dep(last_fixed_grf_write, n);
1566
1567 if (!inst->is_send_from_grf()) {
1568 for (int i = 0; i < inst->mlen; i++) {
1569 /* It looks like the MRF regs are released in the send
1570 * instruction once it's sent, not when the result comes
1571 * back.
1572 */
1573 add_dep(last_mrf_write[inst->base_mrf + i], n);
1574 }
1575 }
1576
1577 if (inst->reads_flag()) {
1578 assert(last_conditional_mod);
1579 add_dep(last_conditional_mod, n);
1580 }
1581
1582 if (inst->reads_accumulator_implicitly()) {
1583 assert(last_accumulator_write);
1584 add_dep(last_accumulator_write, n);
1585 }
1586
1587 /* write-after-write deps. */
1588 if (inst->dst.file == VGRF) {
1589 for (unsigned j = 0; j < regs_written(inst); ++j) {
1590 add_dep(last_grf_write[inst->dst.nr + j], n);
1591 last_grf_write[inst->dst.nr + j] = n;
1592 }
1593 } else if (inst->dst.file == MRF) {
1594 add_dep(last_mrf_write[inst->dst.nr], n);
1595 last_mrf_write[inst->dst.nr] = n;
1596 } else if (inst->dst.file == FIXED_GRF) {
1597 add_dep(last_fixed_grf_write, n);
1598 last_fixed_grf_write = n;
1599 } else if (inst->dst.is_accumulator()) {
1600 add_dep(last_accumulator_write, n);
1601 last_accumulator_write = n;
1602 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1603 add_barrier_deps(n);
1604 }
1605
1606 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1607 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1608 add_dep(last_mrf_write[inst->base_mrf + i], n);
1609 last_mrf_write[inst->base_mrf + i] = n;
1610 }
1611 }
1612
1613 if (inst->writes_flag(v->devinfo)) {
1614 add_dep(last_conditional_mod, n, 0);
1615 last_conditional_mod = n;
1616 }
1617
1618 if (inst->writes_accumulator_implicitly(v->devinfo) &&
1619 !inst->dst.is_accumulator()) {
1620 add_dep(last_accumulator_write, n);
1621 last_accumulator_write = n;
1622 }
1623 }
1624
1625 /* bottom-to-top dependencies: WAR */
1626 memset(last_grf_write, 0, grf_count * sizeof(*last_grf_write));
1627 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1628 last_conditional_mod = NULL;
1629 last_accumulator_write = NULL;
1630 last_fixed_grf_write = NULL;
1631
1632 for (elk_schedule_node *n = current.end - 1; n >= current.start; n--) {
1633 vec4_instruction *inst = (vec4_instruction *)n->inst;
1634
1635 /* write-after-read deps. */
1636 for (int i = 0; i < 3; i++) {
1637 if (inst->src[i].file == VGRF) {
1638 for (unsigned j = 0; j < regs_read(inst, i); ++j)
1639 add_dep(n, last_grf_write[inst->src[i].nr + j]);
1640 } else if (inst->src[i].file == FIXED_GRF) {
1641 add_dep(n, last_fixed_grf_write);
1642 } else if (inst->src[i].is_accumulator()) {
1643 add_dep(n, last_accumulator_write);
1644 } else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
1645 add_barrier_deps(n);
1646 }
1647 }
1648
1649 if (!inst->is_send_from_grf()) {
1650 for (int i = 0; i < inst->mlen; i++) {
1651 /* It looks like the MRF regs are released in the send
1652 * instruction once it's sent, not when the result comes
1653 * back.
1654 */
1655 add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1656 }
1657 }
1658
1659 if (inst->reads_flag()) {
1660 add_dep(n, last_conditional_mod);
1661 }
1662
1663 if (inst->reads_accumulator_implicitly()) {
1664 add_dep(n, last_accumulator_write);
1665 }
1666
1667 /* Update the things this instruction wrote, so earlier reads
1668 * can mark this as WAR dependency.
1669 */
1670 if (inst->dst.file == VGRF) {
1671 for (unsigned j = 0; j < regs_written(inst); ++j)
1672 last_grf_write[inst->dst.nr + j] = n;
1673 } else if (inst->dst.file == MRF) {
1674 last_mrf_write[inst->dst.nr] = n;
1675 } else if (inst->dst.file == FIXED_GRF) {
1676 last_fixed_grf_write = n;
1677 } else if (inst->dst.is_accumulator()) {
1678 last_accumulator_write = n;
1679 } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1680 add_barrier_deps(n);
1681 }
1682
1683 if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1684 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1685 last_mrf_write[inst->base_mrf + i] = n;
1686 }
1687 }
1688
1689 if (inst->writes_flag(v->devinfo)) {
1690 last_conditional_mod = n;
1691 }
1692
1693 if (inst->writes_accumulator_implicitly(v->devinfo)) {
1694 last_accumulator_write = n;
1695 }
1696 }
1697 }
1698
1699 elk_schedule_node *
choose_instruction_to_schedule()1700 elk_fs_instruction_scheduler::choose_instruction_to_schedule()
1701 {
1702 elk_schedule_node *chosen = NULL;
1703
1704 if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1705 int chosen_time = 0;
1706
1707 /* Of the instructions ready to execute or the closest to being ready,
1708 * choose the one most likely to unblock an early program exit, or
1709 * otherwise the oldest one.
1710 */
1711 foreach_in_list(elk_schedule_node, n, ¤t.available) {
1712 if (!chosen ||
1713 exit_tmp_unblocked_time(n) < exit_tmp_unblocked_time(chosen) ||
1714 (exit_tmp_unblocked_time(n) == exit_tmp_unblocked_time(chosen) &&
1715 n->tmp.unblocked_time < chosen_time)) {
1716 chosen = n;
1717 chosen_time = n->tmp.unblocked_time;
1718 }
1719 }
1720 } else {
1721 int chosen_register_pressure_benefit = 0;
1722
1723 /* Before register allocation, we don't care about the latencies of
1724 * instructions. All we care about is reducing live intervals of
1725 * variables so that we can avoid register spilling, or get SIMD16
1726 * shaders which naturally do a better job of hiding instruction
1727 * latency.
1728 */
1729 foreach_in_list(elk_schedule_node, n, ¤t.available) {
1730 elk_fs_inst *inst = (elk_fs_inst *)n->inst;
1731
1732 if (!chosen) {
1733 chosen = n;
1734 chosen_register_pressure_benefit =
1735 get_register_pressure_benefit(chosen->inst);
1736 continue;
1737 }
1738
1739 /* Most important: If we can definitely reduce register pressure, do
1740 * so immediately.
1741 */
1742 int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1743
1744 if (register_pressure_benefit > 0 &&
1745 register_pressure_benefit > chosen_register_pressure_benefit) {
1746 chosen = n;
1747 chosen_register_pressure_benefit = register_pressure_benefit;
1748 continue;
1749 } else if (chosen_register_pressure_benefit > 0 &&
1750 (register_pressure_benefit <
1751 chosen_register_pressure_benefit)) {
1752 continue;
1753 }
1754
1755 if (mode == SCHEDULE_PRE_LIFO) {
1756 /* Prefer instructions that recently became available for
1757 * scheduling. These are the things that are most likely to
1758 * (eventually) make a variable dead and reduce register pressure.
1759 * Typical register pressure estimates don't work for us because
1760 * most of our pressure comes from texturing, where no single
1761 * instruction to schedule will make a vec4 value dead.
1762 */
1763 if (n->tmp.cand_generation > chosen->tmp.cand_generation) {
1764 chosen = n;
1765 chosen_register_pressure_benefit = register_pressure_benefit;
1766 continue;
1767 } else if (n->tmp.cand_generation < chosen->tmp.cand_generation) {
1768 continue;
1769 }
1770
1771 /* On MRF-using chips, prefer non-SEND instructions. If we don't
1772 * do this, then because we prefer instructions that just became
1773 * candidates, we'll end up in a pattern of scheduling a SEND,
1774 * then the MRFs for the next SEND, then the next SEND, then the
1775 * MRFs, etc., without ever consuming the results of a send.
1776 */
1777 if (v->devinfo->ver < 7) {
1778 elk_fs_inst *chosen_inst = (elk_fs_inst *)chosen->inst;
1779
1780 /* We use size_written > 4 * exec_size as our test for the kind
1781 * of send instruction to avoid -- only sends generate many
1782 * regs, and a single-result send is probably actually reducing
1783 * register pressure.
1784 */
1785 if (inst->size_written <= 4 * inst->exec_size &&
1786 chosen_inst->size_written > 4 * chosen_inst->exec_size) {
1787 chosen = n;
1788 chosen_register_pressure_benefit = register_pressure_benefit;
1789 continue;
1790 } else if (inst->size_written > chosen_inst->size_written) {
1791 continue;
1792 }
1793 }
1794 }
1795
1796 /* For instructions pushed on the cands list at the same time, prefer
1797 * the one with the highest delay to the end of the program. This is
1798 * most likely to have its values able to be consumed first (such as
1799 * for a large tree of lowered ubo loads, which appear reversed in
1800 * the instruction stream with respect to when they can be consumed).
1801 */
1802 if (n->delay > chosen->delay) {
1803 chosen = n;
1804 chosen_register_pressure_benefit = register_pressure_benefit;
1805 continue;
1806 } else if (n->delay < chosen->delay) {
1807 continue;
1808 }
1809
1810 /* Prefer the node most likely to unblock an early program exit.
1811 */
1812 if (exit_tmp_unblocked_time(n) < exit_tmp_unblocked_time(chosen)) {
1813 chosen = n;
1814 chosen_register_pressure_benefit = register_pressure_benefit;
1815 continue;
1816 } else if (exit_tmp_unblocked_time(n) > exit_tmp_unblocked_time(chosen)) {
1817 continue;
1818 }
1819
1820 /* If all other metrics are equal, we prefer the first instruction in
1821 * the list (program execution).
1822 */
1823 }
1824 }
1825
1826 return chosen;
1827 }
1828
1829 elk_schedule_node *
choose_instruction_to_schedule()1830 elk_vec4_instruction_scheduler::choose_instruction_to_schedule()
1831 {
1832 elk_schedule_node *chosen = NULL;
1833 int chosen_time = 0;
1834
1835 /* Of the instructions ready to execute or the closest to being ready,
1836 * choose the oldest one.
1837 */
1838 foreach_in_list(elk_schedule_node, n, ¤t.available) {
1839 if (!chosen || n->tmp.unblocked_time < chosen_time) {
1840 chosen = n;
1841 chosen_time = n->tmp.unblocked_time;
1842 }
1843 }
1844
1845 return chosen;
1846 }
1847
1848 int
calculate_issue_time(elk_backend_instruction * inst0)1849 elk_fs_instruction_scheduler::calculate_issue_time(elk_backend_instruction *inst0)
1850 {
1851 const struct elk_isa_info *isa = &v->compiler->isa;
1852 const elk_fs_inst *inst = static_cast<elk_fs_inst *>(inst0);
1853 const unsigned overhead = v->grf_used && elk_has_bank_conflict(isa, inst) ?
1854 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE) : 0;
1855 if (is_compressed(inst))
1856 return 4 + overhead;
1857 else
1858 return 2 + overhead;
1859 }
1860
1861 void
schedule(elk_schedule_node * chosen)1862 elk_instruction_scheduler::schedule(elk_schedule_node *chosen)
1863 {
1864 assert(current.scheduled < current.len);
1865 current.scheduled++;
1866
1867 assert(chosen);
1868 chosen->remove();
1869 current.block->instructions.push_tail(chosen->inst);
1870
1871 /* If we expected a delay for scheduling, then bump the clock to reflect
1872 * that. In reality, the hardware will switch to another hyperthread
1873 * and may not return to dispatching our thread for a while even after
1874 * we're unblocked. After this, we have the time when the chosen
1875 * instruction will start executing.
1876 */
1877 current.time = MAX2(current.time, chosen->tmp.unblocked_time);
1878
1879 /* Update the clock for how soon an instruction could start after the
1880 * chosen one.
1881 */
1882 current.time += chosen->issue_time;
1883
1884 if (debug) {
1885 fprintf(stderr, "clock %4d, scheduled: ", current.time);
1886 bs->dump_instruction(chosen->inst);
1887 }
1888 }
1889
1890 void
update_children(elk_schedule_node * chosen)1891 elk_instruction_scheduler::update_children(elk_schedule_node *chosen)
1892 {
1893 /* Now that we've scheduled a new instruction, some of its
1894 * children can be promoted to the list of instructions ready to
1895 * be scheduled. Update the children's unblocked time for this
1896 * DAG edge as we do so.
1897 */
1898 for (int i = chosen->children_count - 1; i >= 0; i--) {
1899 elk_schedule_node_child *child = &chosen->children[i];
1900
1901 child->n->tmp.unblocked_time = MAX2(child->n->tmp.unblocked_time,
1902 current.time + child->effective_latency);
1903
1904 if (debug) {
1905 fprintf(stderr, "\tchild %d, %d parents: ", i, child->n->tmp.parent_count);
1906 bs->dump_instruction(child->n->inst);
1907 }
1908
1909 child->n->tmp.cand_generation = current.cand_generation;
1910 child->n->tmp.parent_count--;
1911 if (child->n->tmp.parent_count == 0) {
1912 if (debug) {
1913 fprintf(stderr, "\t\tnow available\n");
1914 }
1915 current.available.push_head(child->n);
1916 }
1917 }
1918 current.cand_generation++;
1919
1920 /* Shared resource: the mathbox. There's one mathbox per EU on Gfx6+
1921 * but it's more limited pre-gfx6, so if we send something off to it then
1922 * the next math instruction isn't going to make progress until the first
1923 * is done.
1924 */
1925 if (bs->devinfo->ver < 6 && chosen->inst->is_math()) {
1926 foreach_in_list(elk_schedule_node, n, ¤t.available) {
1927 if (n->inst->is_math())
1928 n->tmp.unblocked_time = MAX2(n->tmp.unblocked_time,
1929 current.time + chosen->latency);
1930 }
1931 }
1932 }
1933
1934 void
schedule_instructions()1935 elk_fs_instruction_scheduler::schedule_instructions()
1936 {
1937 if (!post_reg_alloc)
1938 reg_pressure = reg_pressure_in[current.block->num];
1939
1940 assert(current.available.is_empty());
1941 for (elk_schedule_node *n = current.start; n < current.end; n++) {
1942 reset_node_tmp(n);
1943
1944 /* Add DAG heads to the list of available instructions. */
1945 if (n->tmp.parent_count == 0)
1946 current.available.push_tail(n);
1947 }
1948
1949 current.block->instructions.make_empty();
1950
1951 while (!current.available.is_empty()) {
1952 elk_schedule_node *chosen = choose_instruction_to_schedule();
1953 schedule(chosen);
1954
1955 if (!post_reg_alloc) {
1956 reg_pressure -= get_register_pressure_benefit(chosen->inst);
1957 update_register_pressure(chosen->inst);
1958 if (debug)
1959 fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1960 }
1961
1962 update_children(chosen);
1963 }
1964 }
1965
1966 void
run(instruction_scheduler_mode mode)1967 elk_fs_instruction_scheduler::run(instruction_scheduler_mode mode)
1968 {
1969 this->mode = mode;
1970
1971 if (debug && !post_reg_alloc) {
1972 fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1973 post_reg_alloc);
1974 bs->dump_instructions();
1975 }
1976
1977 if (!post_reg_alloc) {
1978 memset(reads_remaining, 0, grf_count * sizeof(*reads_remaining));
1979 memset(hw_reads_remaining, 0, hw_reg_count * sizeof(*hw_reads_remaining));
1980 memset(written, 0, grf_count * sizeof(*written));
1981 }
1982
1983 foreach_block(block, v->cfg) {
1984 set_current_block(block);
1985
1986 if (!post_reg_alloc) {
1987 for (elk_schedule_node *n = current.start; n < current.end; n++)
1988 count_reads_remaining(n->inst);
1989 }
1990
1991 schedule_instructions();
1992 }
1993
1994 if (debug && !post_reg_alloc) {
1995 fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1996 post_reg_alloc);
1997 bs->dump_instructions();
1998 }
1999 }
2000
2001 void
run()2002 elk_vec4_instruction_scheduler::run()
2003 {
2004 foreach_block(block, v->cfg) {
2005 set_current_block(block);
2006
2007 for (elk_schedule_node *n = current.start; n < current.end; n++) {
2008 /* We always execute as two vec4s in parallel. */
2009 n->issue_time = 2;
2010 }
2011
2012 calculate_deps();
2013
2014 compute_delays();
2015 compute_exits();
2016
2017 assert(current.available.is_empty());
2018 for (elk_schedule_node *n = current.start; n < current.end; n++) {
2019 reset_node_tmp(n);
2020
2021 /* Add DAG heads to the list of available instructions. */
2022 if (n->tmp.parent_count == 0)
2023 current.available.push_tail(n);
2024 }
2025
2026 current.block->instructions.make_empty();
2027
2028 while (!current.available.is_empty()) {
2029 elk_schedule_node *chosen = choose_instruction_to_schedule();
2030 schedule(chosen);
2031 update_children(chosen);
2032 }
2033 }
2034 }
2035
2036 elk_fs_instruction_scheduler *
prepare_scheduler(void * mem_ctx)2037 elk_fs_visitor::prepare_scheduler(void *mem_ctx)
2038 {
2039 const int grf_count = alloc.count;
2040
2041 elk_fs_instruction_scheduler *empty = rzalloc(mem_ctx, elk_fs_instruction_scheduler);
2042 return new (empty) elk_fs_instruction_scheduler(mem_ctx, this, grf_count, first_non_payload_grf,
2043 cfg->num_blocks, /* post_reg_alloc */ false);
2044 }
2045
2046 void
schedule_instructions_pre_ra(elk_fs_instruction_scheduler * sched,instruction_scheduler_mode mode)2047 elk_fs_visitor::schedule_instructions_pre_ra(elk_fs_instruction_scheduler *sched,
2048 instruction_scheduler_mode mode)
2049 {
2050 if (mode == SCHEDULE_NONE)
2051 return;
2052
2053 sched->run(mode);
2054
2055 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2056 }
2057
2058 void
schedule_instructions_post_ra()2059 elk_fs_visitor::schedule_instructions_post_ra()
2060 {
2061 const bool post_reg_alloc = true;
2062 const int grf_count = reg_unit(devinfo) * grf_used;
2063
2064 void *mem_ctx = ralloc_context(NULL);
2065
2066 elk_fs_instruction_scheduler sched(mem_ctx, this, grf_count, first_non_payload_grf,
2067 cfg->num_blocks, post_reg_alloc);
2068 sched.run(SCHEDULE_POST);
2069
2070 ralloc_free(mem_ctx);
2071
2072 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2073 }
2074
2075 void
opt_schedule_instructions()2076 vec4_visitor::opt_schedule_instructions()
2077 {
2078 void *mem_ctx = ralloc_context(NULL);
2079
2080 elk_vec4_instruction_scheduler sched(mem_ctx, this, prog_data->total_grf);
2081 sched.run();
2082
2083 ralloc_free(mem_ctx);
2084
2085 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2086 }
2087