1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /** @file register_allocate.c
29 *
30 * Graph-coloring register allocator.
31 *
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
36 *
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
42 *
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
48 *
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
54 *
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
66 *
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
71 */
72
73 #include <stdbool.h>
74 #include <stdlib.h>
75
76 #include "blob.h"
77 #include "ralloc.h"
78 #include "main/macros.h"
79 #include "util/bitset.h"
80 #include "util/u_dynarray.h"
81 #include "u_math.h"
82 #include "register_allocate.h"
83 #include "register_allocate_internal.h"
84
85 /**
86 * Creates a set of registers for the allocator.
87 *
88 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
89 * using ralloc_free().
90 */
91 struct ra_regs *
ra_alloc_reg_set(void * mem_ctx,unsigned int count,bool need_conflict_lists)92 ra_alloc_reg_set(void *mem_ctx, unsigned int count, bool need_conflict_lists)
93 {
94 unsigned int i;
95 struct ra_regs *regs;
96
97 regs = rzalloc(mem_ctx, struct ra_regs);
98 regs->count = count;
99 regs->regs = rzalloc_array(regs, struct ra_reg, count);
100
101 for (i = 0; i < count; i++) {
102 regs->regs[i].conflicts = rzalloc_array(regs->regs, BITSET_WORD,
103 BITSET_WORDS(count));
104 BITSET_SET(regs->regs[i].conflicts, i);
105
106 util_dynarray_init(®s->regs[i].conflict_list,
107 need_conflict_lists ? regs->regs : NULL);
108 if (need_conflict_lists)
109 util_dynarray_append(®s->regs[i].conflict_list, unsigned int, i);
110 }
111
112 return regs;
113 }
114
115 /**
116 * The register allocator by default prefers to allocate low register numbers,
117 * since it was written for hardware (gen4/5 Intel) that is limited in its
118 * multithreadedness by the number of registers used in a given shader.
119 *
120 * However, for hardware without that restriction, densely packed register
121 * allocation can put serious constraints on instruction scheduling. This
122 * function tells the allocator to rotate around the registers if possible as
123 * it allocates the nodes.
124 */
125 void
ra_set_allocate_round_robin(struct ra_regs * regs)126 ra_set_allocate_round_robin(struct ra_regs *regs)
127 {
128 regs->round_robin = true;
129 }
130
131 static void
ra_add_conflict_list(struct ra_regs * regs,unsigned int r1,unsigned int r2)132 ra_add_conflict_list(struct ra_regs *regs, unsigned int r1, unsigned int r2)
133 {
134 struct ra_reg *reg1 = ®s->regs[r1];
135
136 if (reg1->conflict_list.mem_ctx) {
137 util_dynarray_append(®1->conflict_list, unsigned int, r2);
138 }
139 BITSET_SET(reg1->conflicts, r2);
140 }
141
142 void
ra_add_reg_conflict(struct ra_regs * regs,unsigned int r1,unsigned int r2)143 ra_add_reg_conflict(struct ra_regs *regs, unsigned int r1, unsigned int r2)
144 {
145 if (!BITSET_TEST(regs->regs[r1].conflicts, r2)) {
146 ra_add_conflict_list(regs, r1, r2);
147 ra_add_conflict_list(regs, r2, r1);
148 }
149 }
150
151 /**
152 * Adds a conflict between base_reg and reg, and also between reg and
153 * anything that base_reg conflicts with.
154 *
155 * This can simplify code for setting up multiple register classes
156 * which are aggregates of some base hardware registers, compared to
157 * explicitly using ra_add_reg_conflict.
158 */
159 void
ra_add_transitive_reg_conflict(struct ra_regs * regs,unsigned int base_reg,unsigned int reg)160 ra_add_transitive_reg_conflict(struct ra_regs *regs,
161 unsigned int base_reg, unsigned int reg)
162 {
163 ra_add_reg_conflict(regs, reg, base_reg);
164
165 util_dynarray_foreach(®s->regs[base_reg].conflict_list, unsigned int,
166 r2p) {
167 ra_add_reg_conflict(regs, reg, *r2p);
168 }
169 }
170
171 /**
172 * Set up conflicts between base_reg and it's two half registers reg0 and
173 * reg1, but take care to not add conflicts between reg0 and reg1.
174 *
175 * This is useful for architectures where full size registers are aliased by
176 * two half size registers (eg 32 bit float and 16 bit float registers).
177 */
178 void
ra_add_transitive_reg_pair_conflict(struct ra_regs * regs,unsigned int base_reg,unsigned int reg0,unsigned int reg1)179 ra_add_transitive_reg_pair_conflict(struct ra_regs *regs,
180 unsigned int base_reg, unsigned int reg0, unsigned int reg1)
181 {
182 ra_add_reg_conflict(regs, reg0, base_reg);
183 ra_add_reg_conflict(regs, reg1, base_reg);
184
185 util_dynarray_foreach(®s->regs[base_reg].conflict_list, unsigned int, i) {
186 unsigned int conflict = *i;
187 if (conflict != reg1)
188 ra_add_reg_conflict(regs, reg0, conflict);
189 if (conflict != reg0)
190 ra_add_reg_conflict(regs, reg1, conflict);
191 }
192 }
193
194 /**
195 * Makes every conflict on the given register transitive. In other words,
196 * every register that conflicts with r will now conflict with every other
197 * register conflicting with r.
198 *
199 * This can simplify code for setting up multiple register classes
200 * which are aggregates of some base hardware registers, compared to
201 * explicitly using ra_add_reg_conflict.
202 */
203 void
ra_make_reg_conflicts_transitive(struct ra_regs * regs,unsigned int r)204 ra_make_reg_conflicts_transitive(struct ra_regs *regs, unsigned int r)
205 {
206 struct ra_reg *reg = ®s->regs[r];
207 int c;
208
209 BITSET_FOREACH_SET(c, reg->conflicts, regs->count) {
210 struct ra_reg *other = ®s->regs[c];
211 unsigned i;
212 for (i = 0; i < BITSET_WORDS(regs->count); i++)
213 other->conflicts[i] |= reg->conflicts[i];
214 }
215 }
216
217 struct ra_class *
ra_alloc_reg_class(struct ra_regs * regs)218 ra_alloc_reg_class(struct ra_regs *regs)
219 {
220 struct ra_class *class;
221
222 regs->classes = reralloc(regs->regs, regs->classes, struct ra_class *,
223 regs->class_count + 1);
224
225 class = rzalloc(regs, struct ra_class);
226 class->regset = regs;
227
228 /* Users may rely on the class index being allocated in order starting from 0. */
229 class->index = regs->class_count++;
230 regs->classes[class->index] = class;
231
232 class->regs = rzalloc_array(class, BITSET_WORD, BITSET_WORDS(regs->count));
233
234 return class;
235 }
236
237 /**
238 * Creates a register class for contiguous register groups of a base register
239 * set.
240 *
241 * A reg set using this type of register class must use only this type of
242 * register class.
243 */
244 struct ra_class *
ra_alloc_contig_reg_class(struct ra_regs * regs,int contig_len)245 ra_alloc_contig_reg_class(struct ra_regs *regs, int contig_len)
246 {
247 struct ra_class *c = ra_alloc_reg_class(regs);
248
249 assert(contig_len != 0);
250 c->contig_len = contig_len;
251
252 return c;
253 }
254
255 struct ra_class *
ra_get_class_from_index(struct ra_regs * regs,unsigned int class)256 ra_get_class_from_index(struct ra_regs *regs, unsigned int class)
257 {
258 return regs->classes[class];
259 }
260
261 unsigned int
ra_class_index(struct ra_class * c)262 ra_class_index(struct ra_class *c)
263 {
264 return c->index;
265 }
266
267 void
ra_class_add_reg(struct ra_class * class,unsigned int r)268 ra_class_add_reg(struct ra_class *class, unsigned int r)
269 {
270 assert(r < class->regset->count);
271 assert(r + class->contig_len <= class->regset->count);
272
273 BITSET_SET(class->regs, r);
274 class->p++;
275 }
276
277 /**
278 * Returns true if the register belongs to the given class.
279 */
280 static bool
reg_belongs_to_class(unsigned int r,struct ra_class * c)281 reg_belongs_to_class(unsigned int r, struct ra_class *c)
282 {
283 return BITSET_TEST(c->regs, r);
284 }
285
286 /**
287 * Must be called after all conflicts and register classes have been
288 * set up and before the register set is used for allocation.
289 * To avoid costly q value computation, use the q_values paramater
290 * to pass precomputed q values to this function.
291 */
292 void
ra_set_finalize(struct ra_regs * regs,unsigned int ** q_values)293 ra_set_finalize(struct ra_regs *regs, unsigned int **q_values)
294 {
295 unsigned int b, c;
296
297 for (b = 0; b < regs->class_count; b++) {
298 regs->classes[b]->q = ralloc_array(regs, unsigned int, regs->class_count);
299 }
300
301 if (q_values) {
302 for (b = 0; b < regs->class_count; b++) {
303 for (c = 0; c < regs->class_count; c++) {
304 regs->classes[b]->q[c] = q_values[b][c];
305 }
306 }
307 } else {
308 /* Compute, for each class B and C, how many regs of B an
309 * allocation to C could conflict with.
310 */
311 for (b = 0; b < regs->class_count; b++) {
312 for (c = 0; c < regs->class_count; c++) {
313 struct ra_class *class_b = regs->classes[b];
314 struct ra_class *class_c = regs->classes[c];
315
316 if (class_b->contig_len && class_c->contig_len) {
317 if (class_b->contig_len == 1 && class_c->contig_len == 1) {
318 /* If both classes are single registers, then they only
319 * conflict if there are any regs shared between them. This
320 * is a cheap test for a common case.
321 */
322 class_b->q[c] = 0;
323 for (int i = 0; i < BITSET_WORDS(regs->count); i++) {
324 if (class_b->regs[i] & class_c->regs[i]) {
325 class_b->q[c] = 1;
326 break;
327 }
328 }
329 } else {
330 int max_possible_conflicts = class_b->contig_len + class_c->contig_len - 1;
331
332 unsigned int max_conflicts = 0;
333 unsigned int rc;
334 BITSET_FOREACH_SET(rc, regs->classes[c]->regs, regs->count) {
335 int start = MAX2(0, (int)rc - class_b->contig_len + 1);
336 int end = MIN2(regs->count, rc + class_c->contig_len);
337 unsigned int conflicts = 0;
338 for (int i = start; i < end; i++) {
339 if (BITSET_TEST(class_b->regs, i))
340 conflicts++;
341 }
342 max_conflicts = MAX2(max_conflicts, conflicts);
343 /* Unless a class has some restriction like the register
344 * bases are all aligned, then we should quickly find this
345 * limit and exit the loop.
346 */
347 if (max_conflicts == max_possible_conflicts)
348 break;
349 }
350 class_b->q[c] = max_conflicts;
351 }
352 } else {
353 /* If you're doing contiguous classes, you have to be all in
354 * because I don't want to deal with it.
355 */
356 assert(!class_b->contig_len && !class_c->contig_len);
357
358 unsigned int rc;
359 int max_conflicts = 0;
360
361 BITSET_FOREACH_SET(rc, regs->classes[c]->regs, regs->count) {
362 int conflicts = 0;
363
364 util_dynarray_foreach(®s->regs[rc].conflict_list,
365 unsigned int, rbp) {
366 unsigned int rb = *rbp;
367 if (reg_belongs_to_class(rb, regs->classes[b]))
368 conflicts++;
369 }
370 max_conflicts = MAX2(max_conflicts, conflicts);
371 }
372 regs->classes[b]->q[c] = max_conflicts;
373 }
374 }
375 }
376 }
377
378 for (b = 0; b < regs->count; b++) {
379 util_dynarray_fini(®s->regs[b].conflict_list);
380 }
381
382 bool all_contig = true;
383 for (int c = 0; c < regs->class_count; c++)
384 all_contig &= regs->classes[c]->contig_len != 0;
385 if (all_contig) {
386 /* In this case, we never need the conflicts lists (and it would probably
387 * be a mistake to look at conflicts when doing contiguous classes!), so
388 * free them. TODO: Avoid the allocation in the first place.
389 */
390 for (int i = 0; i < regs->count; i++) {
391 ralloc_free(regs->regs[i].conflicts);
392 regs->regs[i].conflicts = NULL;
393 }
394 }
395 }
396
397 void
ra_set_serialize(const struct ra_regs * regs,struct blob * blob)398 ra_set_serialize(const struct ra_regs *regs, struct blob *blob)
399 {
400 blob_write_uint32(blob, regs->count);
401 blob_write_uint32(blob, regs->class_count);
402
403 bool is_contig = regs->classes[0]->contig_len != 0;
404 blob_write_uint8(blob, is_contig);
405
406 if (!is_contig) {
407 for (unsigned int r = 0; r < regs->count; r++) {
408 struct ra_reg *reg = ®s->regs[r];
409 blob_write_bytes(blob, reg->conflicts, BITSET_WORDS(regs->count) *
410 sizeof(BITSET_WORD));
411 assert(util_dynarray_num_elements(®->conflict_list, unsigned int) == 0);
412 }
413 }
414
415 for (unsigned int c = 0; c < regs->class_count; c++) {
416 struct ra_class *class = regs->classes[c];
417 blob_write_bytes(blob, class->regs, BITSET_WORDS(regs->count) *
418 sizeof(BITSET_WORD));
419 blob_write_uint32(blob, class->contig_len);
420 blob_write_uint32(blob, class->p);
421 blob_write_bytes(blob, class->q, regs->class_count * sizeof(*class->q));
422 }
423
424 blob_write_uint32(blob, regs->round_robin);
425 }
426
427 struct ra_regs *
ra_set_deserialize(void * mem_ctx,struct blob_reader * blob)428 ra_set_deserialize(void *mem_ctx, struct blob_reader *blob)
429 {
430 unsigned int reg_count = blob_read_uint32(blob);
431 unsigned int class_count = blob_read_uint32(blob);
432 bool is_contig = blob_read_uint8(blob);
433
434 struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, reg_count, false);
435 assert(regs->count == reg_count);
436
437 if (is_contig) {
438 for (int i = 0; i < regs->count; i++) {
439 ralloc_free(regs->regs[i].conflicts);
440 regs->regs[i].conflicts = NULL;
441 }
442 } else {
443 for (unsigned int r = 0; r < reg_count; r++) {
444 struct ra_reg *reg = ®s->regs[r];
445 blob_copy_bytes(blob, reg->conflicts, BITSET_WORDS(reg_count) *
446 sizeof(BITSET_WORD));
447 }
448 }
449
450 assert(regs->classes == NULL);
451 regs->classes = ralloc_array(regs->regs, struct ra_class *, class_count);
452 regs->class_count = class_count;
453
454 for (unsigned int c = 0; c < class_count; c++) {
455 struct ra_class *class = rzalloc(regs, struct ra_class);
456 regs->classes[c] = class;
457 class->regset = regs;
458 class->index = c;
459
460 class->regs = ralloc_array(class, BITSET_WORD, BITSET_WORDS(reg_count));
461 blob_copy_bytes(blob, class->regs, BITSET_WORDS(reg_count) *
462 sizeof(BITSET_WORD));
463
464 class->contig_len = blob_read_uint32(blob);
465 class->p = blob_read_uint32(blob);
466
467 class->q = ralloc_array(regs->classes[c], unsigned int, class_count);
468 blob_copy_bytes(blob, class->q, class_count * sizeof(*class->q));
469 }
470
471 regs->round_robin = blob_read_uint32(blob);
472
473 return regs;
474 }
475
476 static void
ra_add_node_adjacency(struct ra_graph * g,unsigned int n1,unsigned int n2)477 ra_add_node_adjacency(struct ra_graph *g, unsigned int n1, unsigned int n2)
478 {
479 BITSET_SET(g->nodes[n1].adjacency, n2);
480
481 assert(n1 != n2);
482
483 int n1_class = g->nodes[n1].class;
484 int n2_class = g->nodes[n2].class;
485 g->nodes[n1].q_total += g->regs->classes[n1_class]->q[n2_class];
486
487 util_dynarray_append(&g->nodes[n1].adjacency_list, unsigned int, n2);
488 }
489
490 static void
ra_node_remove_adjacency(struct ra_graph * g,unsigned int n1,unsigned int n2)491 ra_node_remove_adjacency(struct ra_graph *g, unsigned int n1, unsigned int n2)
492 {
493 BITSET_CLEAR(g->nodes[n1].adjacency, n2);
494
495 assert(n1 != n2);
496
497 int n1_class = g->nodes[n1].class;
498 int n2_class = g->nodes[n2].class;
499 g->nodes[n1].q_total -= g->regs->classes[n1_class]->q[n2_class];
500
501 util_dynarray_delete_unordered(&g->nodes[n1].adjacency_list, unsigned int,
502 n2);
503 }
504
505 static void
ra_realloc_interference_graph(struct ra_graph * g,unsigned int alloc)506 ra_realloc_interference_graph(struct ra_graph *g, unsigned int alloc)
507 {
508 if (alloc <= g->alloc)
509 return;
510
511 /* If we always have a whole number of BITSET_WORDs, it makes it much
512 * easier to memset the top of the growing bitsets.
513 */
514 assert(g->alloc % BITSET_WORDBITS == 0);
515 alloc = align64(alloc, BITSET_WORDBITS);
516
517 g->nodes = reralloc(g, g->nodes, struct ra_node, alloc);
518
519 unsigned g_bitset_count = BITSET_WORDS(g->alloc);
520 unsigned bitset_count = BITSET_WORDS(alloc);
521 /* For nodes already in the graph, we just have to grow the adjacency set */
522 for (unsigned i = 0; i < g->alloc; i++) {
523 assert(g->nodes[i].adjacency != NULL);
524 g->nodes[i].adjacency = rerzalloc(g, g->nodes[i].adjacency, BITSET_WORD,
525 g_bitset_count, bitset_count);
526 }
527
528 /* For new nodes, we have to fully initialize them */
529 for (unsigned i = g->alloc; i < alloc; i++) {
530 memset(&g->nodes[i], 0, sizeof(g->nodes[i]));
531 g->nodes[i].adjacency = rzalloc_array(g, BITSET_WORD, bitset_count);
532 util_dynarray_init(&g->nodes[i].adjacency_list, g);
533 g->nodes[i].q_total = 0;
534
535 g->nodes[i].forced_reg = NO_REG;
536 g->nodes[i].reg = NO_REG;
537 }
538
539 /* These are scratch values and don't need to be zeroed. We'll clear them
540 * as part of ra_select() setup.
541 */
542 g->tmp.stack = reralloc(g, g->tmp.stack, unsigned int, alloc);
543 g->tmp.in_stack = reralloc(g, g->tmp.in_stack, BITSET_WORD, bitset_count);
544
545 g->tmp.reg_assigned = reralloc(g, g->tmp.reg_assigned, BITSET_WORD,
546 bitset_count);
547 g->tmp.pq_test = reralloc(g, g->tmp.pq_test, BITSET_WORD, bitset_count);
548 g->tmp.min_q_total = reralloc(g, g->tmp.min_q_total, unsigned int,
549 bitset_count);
550 g->tmp.min_q_node = reralloc(g, g->tmp.min_q_node, unsigned int,
551 bitset_count);
552
553 g->alloc = alloc;
554 }
555
556 struct ra_graph *
ra_alloc_interference_graph(struct ra_regs * regs,unsigned int count)557 ra_alloc_interference_graph(struct ra_regs *regs, unsigned int count)
558 {
559 struct ra_graph *g;
560
561 g = rzalloc(NULL, struct ra_graph);
562 g->regs = regs;
563 g->count = count;
564 ra_realloc_interference_graph(g, count);
565
566 return g;
567 }
568
569 void
ra_resize_interference_graph(struct ra_graph * g,unsigned int count)570 ra_resize_interference_graph(struct ra_graph *g, unsigned int count)
571 {
572 g->count = count;
573 if (count > g->alloc)
574 ra_realloc_interference_graph(g, g->alloc * 2);
575 }
576
ra_set_select_reg_callback(struct ra_graph * g,ra_select_reg_callback callback,void * data)577 void ra_set_select_reg_callback(struct ra_graph *g,
578 ra_select_reg_callback callback,
579 void *data)
580 {
581 g->select_reg_callback = callback;
582 g->select_reg_callback_data = data;
583 }
584
585 void
ra_set_node_class(struct ra_graph * g,unsigned int n,struct ra_class * class)586 ra_set_node_class(struct ra_graph *g,
587 unsigned int n, struct ra_class *class)
588 {
589 g->nodes[n].class = class->index;
590 }
591
592 struct ra_class *
ra_get_node_class(struct ra_graph * g,unsigned int n)593 ra_get_node_class(struct ra_graph *g,
594 unsigned int n)
595 {
596 return g->regs->classes[g->nodes[n].class];
597 }
598
599 unsigned int
ra_add_node(struct ra_graph * g,struct ra_class * class)600 ra_add_node(struct ra_graph *g, struct ra_class *class)
601 {
602 unsigned int n = g->count;
603 ra_resize_interference_graph(g, g->count + 1);
604
605 ra_set_node_class(g, n, class);
606
607 return n;
608 }
609
610 void
ra_add_node_interference(struct ra_graph * g,unsigned int n1,unsigned int n2)611 ra_add_node_interference(struct ra_graph *g,
612 unsigned int n1, unsigned int n2)
613 {
614 assert(n1 < g->count && n2 < g->count);
615 if (n1 != n2 && !BITSET_TEST(g->nodes[n1].adjacency, n2)) {
616 ra_add_node_adjacency(g, n1, n2);
617 ra_add_node_adjacency(g, n2, n1);
618 }
619 }
620
621 void
ra_reset_node_interference(struct ra_graph * g,unsigned int n)622 ra_reset_node_interference(struct ra_graph *g, unsigned int n)
623 {
624 util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
625 ra_node_remove_adjacency(g, *n2p, n);
626 }
627
628 memset(g->nodes[n].adjacency, 0,
629 BITSET_WORDS(g->count) * sizeof(BITSET_WORD));
630 util_dynarray_clear(&g->nodes[n].adjacency_list);
631 }
632
633 static void
update_pq_info(struct ra_graph * g,unsigned int n)634 update_pq_info(struct ra_graph *g, unsigned int n)
635 {
636 int i = n / BITSET_WORDBITS;
637 int n_class = g->nodes[n].class;
638 if (g->nodes[n].tmp.q_total < g->regs->classes[n_class]->p) {
639 BITSET_SET(g->tmp.pq_test, n);
640 } else if (g->tmp.min_q_total[i] != UINT_MAX) {
641 /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
642 * that we don't update while we have stale data and accidentally mark
643 * it as non-stale. Also, in order to remain consistent with the old
644 * naive implementation of the algorithm, we do a lexicographical sort
645 * to ensure that we always choose the node with the highest node index.
646 */
647 if (g->nodes[n].tmp.q_total < g->tmp.min_q_total[i] ||
648 (g->nodes[n].tmp.q_total == g->tmp.min_q_total[i] &&
649 n > g->tmp.min_q_node[i])) {
650 g->tmp.min_q_total[i] = g->nodes[n].tmp.q_total;
651 g->tmp.min_q_node[i] = n;
652 }
653 }
654 }
655
656 static void
add_node_to_stack(struct ra_graph * g,unsigned int n)657 add_node_to_stack(struct ra_graph *g, unsigned int n)
658 {
659 int n_class = g->nodes[n].class;
660
661 assert(!BITSET_TEST(g->tmp.in_stack, n));
662
663 util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
664 unsigned int n2 = *n2p;
665 unsigned int n2_class = g->nodes[n2].class;
666
667 if (!BITSET_TEST(g->tmp.in_stack, n2) &&
668 !BITSET_TEST(g->tmp.reg_assigned, n2)) {
669 assert(g->nodes[n2].tmp.q_total >= g->regs->classes[n2_class]->q[n_class]);
670 g->nodes[n2].tmp.q_total -= g->regs->classes[n2_class]->q[n_class];
671 update_pq_info(g, n2);
672 }
673 }
674
675 g->tmp.stack[g->tmp.stack_count] = n;
676 g->tmp.stack_count++;
677 BITSET_SET(g->tmp.in_stack, n);
678
679 /* Flag the min_q_total for n's block as dirty so it gets recalculated */
680 g->tmp.min_q_total[n / BITSET_WORDBITS] = UINT_MAX;
681 }
682
683 /**
684 * Simplifies the interference graph by pushing all
685 * trivially-colorable nodes into a stack of nodes to be colored,
686 * removing them from the graph, and rinsing and repeating.
687 *
688 * If we encounter a case where we can't push any nodes on the stack, then
689 * we optimistically choose a node and push it on the stack. We heuristically
690 * push the node with the lowest total q value, since it has the fewest
691 * neighbors and therefore is most likely to be allocated.
692 */
693 static void
ra_simplify(struct ra_graph * g)694 ra_simplify(struct ra_graph *g)
695 {
696 bool progress = true;
697 unsigned int stack_optimistic_start = UINT_MAX;
698
699 /* Figure out the high bit and bit mask for the first iteration of a loop
700 * over BITSET_WORDs.
701 */
702 const unsigned int top_word_high_bit = (g->count - 1) % BITSET_WORDBITS;
703
704 /* Do a quick pre-pass to set things up */
705 g->tmp.stack_count = 0;
706 for (int i = BITSET_WORDS(g->count) - 1, high_bit = top_word_high_bit;
707 i >= 0; i--, high_bit = BITSET_WORDBITS - 1) {
708 g->tmp.in_stack[i] = 0;
709 g->tmp.reg_assigned[i] = 0;
710 g->tmp.pq_test[i] = 0;
711 g->tmp.min_q_total[i] = UINT_MAX;
712 g->tmp.min_q_node[i] = UINT_MAX;
713 for (int j = high_bit; j >= 0; j--) {
714 unsigned int n = i * BITSET_WORDBITS + j;
715 g->nodes[n].reg = g->nodes[n].forced_reg;
716 g->nodes[n].tmp.q_total = g->nodes[n].q_total;
717 if (g->nodes[n].reg != NO_REG)
718 g->tmp.reg_assigned[i] |= BITSET_BIT(j);
719 update_pq_info(g, n);
720 }
721 }
722
723 while (progress) {
724 unsigned int min_q_total = UINT_MAX;
725 unsigned int min_q_node = UINT_MAX;
726
727 progress = false;
728
729 for (int i = BITSET_WORDS(g->count) - 1, high_bit = top_word_high_bit;
730 i >= 0; i--, high_bit = BITSET_WORDBITS - 1) {
731 BITSET_WORD mask = ~(BITSET_WORD)0 >> (31 - high_bit);
732
733 BITSET_WORD skip = g->tmp.in_stack[i] | g->tmp.reg_assigned[i];
734 if (skip == mask)
735 continue;
736
737 BITSET_WORD pq = g->tmp.pq_test[i] & ~skip;
738 if (pq) {
739 /* In this case, we have stuff we can immediately take off the
740 * stack. This also means that we're guaranteed to make progress
741 * and we don't need to bother updating lowest_q_total because we
742 * know we're going to loop again before attempting to do anything
743 * optimistic.
744 */
745 for (int j = high_bit; j >= 0; j--) {
746 if (pq & BITSET_BIT(j)) {
747 unsigned int n = i * BITSET_WORDBITS + j;
748 assert(n < g->count);
749 add_node_to_stack(g, n);
750 /* add_node_to_stack() may update pq_test for this word so
751 * we need to update our local copy.
752 */
753 pq = g->tmp.pq_test[i] & ~skip;
754 progress = true;
755 }
756 }
757 } else if (!progress) {
758 if (g->tmp.min_q_total[i] == UINT_MAX) {
759 /* The min_q_total and min_q_node are dirty because we added
760 * one of these nodes to the stack. It needs to be
761 * recalculated.
762 */
763 for (int j = high_bit; j >= 0; j--) {
764 if (skip & BITSET_BIT(j))
765 continue;
766
767 unsigned int n = i * BITSET_WORDBITS + j;
768 assert(n < g->count);
769 if (g->nodes[n].tmp.q_total < g->tmp.min_q_total[i]) {
770 g->tmp.min_q_total[i] = g->nodes[n].tmp.q_total;
771 g->tmp.min_q_node[i] = n;
772 }
773 }
774 }
775 if (g->tmp.min_q_total[i] < min_q_total) {
776 min_q_node = g->tmp.min_q_node[i];
777 min_q_total = g->tmp.min_q_total[i];
778 }
779 }
780 }
781
782 if (!progress && min_q_total != UINT_MAX) {
783 if (stack_optimistic_start == UINT_MAX)
784 stack_optimistic_start = g->tmp.stack_count;
785
786 add_node_to_stack(g, min_q_node);
787 progress = true;
788 }
789 }
790
791 g->tmp.stack_optimistic_start = stack_optimistic_start;
792 }
793
794 bool
ra_class_allocations_conflict(struct ra_class * c1,unsigned int r1,struct ra_class * c2,unsigned int r2)795 ra_class_allocations_conflict(struct ra_class *c1, unsigned int r1,
796 struct ra_class *c2, unsigned int r2)
797 {
798 if (c1->contig_len) {
799 assert(c2->contig_len);
800
801 int r1_end = r1 + c1->contig_len;
802 int r2_end = r2 + c2->contig_len;
803 return !(r2 >= r1_end || r1 >= r2_end);
804 } else {
805 return BITSET_TEST(c1->regset->regs[r1].conflicts, r2);
806 }
807 }
808
809 static struct ra_node *
ra_find_conflicting_neighbor(struct ra_graph * g,unsigned int n,unsigned int r)810 ra_find_conflicting_neighbor(struct ra_graph *g, unsigned int n, unsigned int r)
811 {
812 util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
813 unsigned int n2 = *n2p;
814
815 /* If our adjacent node is in the stack, it's not allocated yet. */
816 if (!BITSET_TEST(g->tmp.in_stack, n2) &&
817 ra_class_allocations_conflict(g->regs->classes[g->nodes[n].class], r,
818 g->regs->classes[g->nodes[n2].class], g->nodes[n2].reg)) {
819 return &g->nodes[n2];
820 }
821 }
822
823 return NULL;
824 }
825
826 /* Computes a bitfield of what regs are available for a given register
827 * selection.
828 *
829 * This lets drivers implement a more complicated policy than our simple first
830 * or round robin policies (which don't require knowing the whole bitset)
831 */
832 static bool
ra_compute_available_regs(struct ra_graph * g,unsigned int n,BITSET_WORD * regs)833 ra_compute_available_regs(struct ra_graph *g, unsigned int n, BITSET_WORD *regs)
834 {
835 struct ra_class *c = g->regs->classes[g->nodes[n].class];
836
837 /* Populate with the set of regs that are in the node's class. */
838 memcpy(regs, c->regs, BITSET_WORDS(g->regs->count) * sizeof(BITSET_WORD));
839
840 /* Remove any regs that conflict with nodes that we're adjacent to and have
841 * already colored.
842 */
843 util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
844 struct ra_node *n2 = &g->nodes[*n2p];
845 struct ra_class *n2c = g->regs->classes[n2->class];
846
847 if (!BITSET_TEST(g->tmp.in_stack, *n2p)) {
848 if (c->contig_len) {
849 int start = MAX2(0, (int)n2->reg - c->contig_len + 1);
850 int end = MIN2(g->regs->count, n2->reg + n2c->contig_len);
851 for (unsigned i = start; i < end; i++)
852 BITSET_CLEAR(regs, i);
853 } else {
854 for (int j = 0; j < BITSET_WORDS(g->regs->count); j++)
855 regs[j] &= ~g->regs->regs[n2->reg].conflicts[j];
856 }
857 }
858 }
859
860 for (int i = 0; i < BITSET_WORDS(g->regs->count); i++) {
861 if (regs[i])
862 return true;
863 }
864
865 return false;
866 }
867
868 /**
869 * Pops nodes from the stack back into the graph, coloring them with
870 * registers as they go.
871 *
872 * If all nodes were trivially colorable, then this must succeed. If
873 * not (optimistic coloring), then it may return false;
874 */
875 static bool
ra_select(struct ra_graph * g)876 ra_select(struct ra_graph *g)
877 {
878 int start_search_reg = 0;
879 BITSET_WORD *select_regs = NULL;
880
881 if (g->select_reg_callback)
882 select_regs = malloc(BITSET_WORDS(g->regs->count) * sizeof(BITSET_WORD));
883
884 while (g->tmp.stack_count != 0) {
885 unsigned int ri;
886 unsigned int r = -1;
887 int n = g->tmp.stack[g->tmp.stack_count - 1];
888 struct ra_class *c = g->regs->classes[g->nodes[n].class];
889
890 /* set this to false even if we return here so that
891 * ra_get_best_spill_node() considers this node later.
892 */
893 BITSET_CLEAR(g->tmp.in_stack, n);
894
895 if (g->select_reg_callback) {
896 if (!ra_compute_available_regs(g, n, select_regs)) {
897 free(select_regs);
898 return false;
899 }
900
901 r = g->select_reg_callback(n, select_regs, g->select_reg_callback_data);
902 assert(r < g->regs->count);
903 } else {
904 /* Find the lowest-numbered reg which is not used by a member
905 * of the graph adjacent to us.
906 */
907 for (ri = 0; ri < g->regs->count; ri++) {
908 r = (start_search_reg + ri) % g->regs->count;
909 if (!reg_belongs_to_class(r, c))
910 continue;
911
912 struct ra_node *conflicting = ra_find_conflicting_neighbor(g, n, r);
913 if (!conflicting) {
914 /* Found a reg! */
915 break;
916 }
917 if (g->regs->classes[conflicting->class]->contig_len) {
918 /* Skip to point at the last base reg of the conflicting reg
919 * allocation -- the loop will increment us to check the next reg
920 * after the conflicting allocaiton.
921 */
922 unsigned conflicting_end = (conflicting->reg +
923 g->regs->classes[conflicting->class]->contig_len - 1);
924 assert(conflicting_end >= r);
925 ri += conflicting_end - r;
926 }
927 }
928
929 if (ri >= g->regs->count)
930 return false;
931 }
932
933 g->nodes[n].reg = r;
934 g->tmp.stack_count--;
935
936 /* Rotate the starting point except for any nodes above the lowest
937 * optimistically colorable node. The likelihood that we will succeed
938 * at allocating optimistically colorable nodes is highly dependent on
939 * the way that the previous nodes popped off the stack are laid out.
940 * The round-robin strategy increases the fragmentation of the register
941 * file and decreases the number of nearby nodes assigned to the same
942 * color, what increases the likelihood of spilling with respect to the
943 * dense packing strategy.
944 */
945 if (g->regs->round_robin &&
946 g->tmp.stack_count - 1 <= g->tmp.stack_optimistic_start)
947 start_search_reg = r + 1;
948 }
949
950 free(select_regs);
951
952 return true;
953 }
954
955 bool
ra_allocate(struct ra_graph * g)956 ra_allocate(struct ra_graph *g)
957 {
958 ra_simplify(g);
959 return ra_select(g);
960 }
961
962 unsigned int
ra_get_node_reg(struct ra_graph * g,unsigned int n)963 ra_get_node_reg(struct ra_graph *g, unsigned int n)
964 {
965 if (g->nodes[n].forced_reg != NO_REG)
966 return g->nodes[n].forced_reg;
967 else
968 return g->nodes[n].reg;
969 }
970
971 /**
972 * Forces a node to a specific register. This can be used to avoid
973 * creating a register class containing one node when handling data
974 * that must live in a fixed location and is known to not conflict
975 * with other forced register assignment (as is common with shader
976 * input data). These nodes do not end up in the stack during
977 * ra_simplify(), and thus at ra_select() time it is as if they were
978 * the first popped off the stack and assigned their fixed locations.
979 * Nodes that use this function do not need to be assigned a register
980 * class.
981 *
982 * Must be called before ra_simplify().
983 */
984 void
ra_set_node_reg(struct ra_graph * g,unsigned int n,unsigned int reg)985 ra_set_node_reg(struct ra_graph *g, unsigned int n, unsigned int reg)
986 {
987 g->nodes[n].forced_reg = reg;
988 }
989
990 static float
ra_get_spill_benefit(struct ra_graph * g,unsigned int n)991 ra_get_spill_benefit(struct ra_graph *g, unsigned int n)
992 {
993 float benefit = 0;
994 int n_class = g->nodes[n].class;
995
996 /* Define the benefit of eliminating an interference between n, n2
997 * through spilling as q(C, B) / p(C). This is similar to the
998 * "count number of edges" approach of traditional graph coloring,
999 * but takes classes into account.
1000 */
1001 util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
1002 unsigned int n2 = *n2p;
1003 unsigned int n2_class = g->nodes[n2].class;
1004 benefit += ((float)g->regs->classes[n_class]->q[n2_class] /
1005 g->regs->classes[n_class]->p);
1006 }
1007
1008 return benefit;
1009 }
1010
1011 /**
1012 * Returns a node number to be spilled according to the cost/benefit using
1013 * the pq test, or -1 if there are no spillable nodes.
1014 */
1015 int
ra_get_best_spill_node(struct ra_graph * g)1016 ra_get_best_spill_node(struct ra_graph *g)
1017 {
1018 unsigned int best_node = -1;
1019 float best_benefit = 0.0;
1020 unsigned int n;
1021
1022 /* Consider any nodes that we colored successfully or the node we failed to
1023 * color for spilling. When we failed to color a node in ra_select(), we
1024 * only considered these nodes, so spilling any other ones would not result
1025 * in us making progress.
1026 */
1027 for (n = 0; n < g->count; n++) {
1028 float cost = g->nodes[n].spill_cost;
1029 float benefit;
1030
1031 if (cost <= 0.0f)
1032 continue;
1033
1034 if (BITSET_TEST(g->tmp.in_stack, n))
1035 continue;
1036
1037 benefit = ra_get_spill_benefit(g, n);
1038
1039 if (benefit / cost > best_benefit) {
1040 best_benefit = benefit / cost;
1041 best_node = n;
1042 }
1043 }
1044
1045 return best_node;
1046 }
1047
1048 /**
1049 * Only nodes with a spill cost set (cost != 0.0) will be considered
1050 * for register spilling.
1051 */
1052 void
ra_set_node_spill_cost(struct ra_graph * g,unsigned int n,float cost)1053 ra_set_node_spill_cost(struct ra_graph *g, unsigned int n, float cost)
1054 {
1055 g->nodes[n].spill_cost = cost;
1056 }
1057