• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Alyssa Rosenzweig
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "util/macros.h"
7 #include "agx_builder.h"
8 #include "agx_compile.h"
9 #include "agx_compiler.h"
10 
11 /* Lower moves involving memory registers (created when spilling) to concrete
12  * spills and fills.
13  */
14 
15 static void
spill_fill(agx_builder * b,agx_instr * I,enum agx_size size,unsigned channels,unsigned component_offset_el)16 spill_fill(agx_builder *b, agx_instr *I, enum agx_size size, unsigned channels,
17            unsigned component_offset_el)
18 {
19    unsigned size_B = agx_size_align_16(size) * 2;
20    enum agx_format format = size_B == 2 ? AGX_FORMAT_I16 : AGX_FORMAT_I32;
21    unsigned format_size_B = size_B == 2 ? 2 : 4;
22 
23    unsigned offset_B = component_offset_el * size_B;
24    unsigned effective_chans = size == AGX_SIZE_64 ? (channels * 2) : channels;
25    unsigned mask = BITFIELD_MASK(effective_chans);
26 
27    assert(effective_chans <= 4);
28 
29    /* Pick off the memory and register parts of the move */
30    agx_index mem = I->dest[0].memory ? I->dest[0] : I->src[0];
31    agx_index reg = I->dest[0].memory ? I->src[0] : I->dest[0];
32 
33    assert(mem.type == AGX_INDEX_REGISTER && mem.memory);
34    assert(reg.type == AGX_INDEX_REGISTER && !reg.memory);
35 
36    /* Slice the register according to the part of the spill we're handling */
37    if (component_offset_el > 0 || channels != agx_channels(reg)) {
38       reg.value += component_offset_el * agx_size_align_16(reg.size);
39       reg.channels_m1 = channels - 1;
40    }
41 
42    /* Calculate stack offset in bytes. IR registers are 2-bytes each. */
43    unsigned stack_offs_B = b->shader->spill_base_B + (mem.value * 2) + offset_B;
44    unsigned stack_offs_end_B = stack_offs_B + (effective_chans * format_size_B);
45 
46    assert(stack_offs_end_B <= b->shader->scratch_size_B &&
47           "RA allocates enough scratch");
48 
49    /* Emit the spill/fill */
50    if (I->dest[0].memory) {
51       agx_stack_store(b, reg, agx_immediate(stack_offs_B), format, mask);
52    } else {
53       agx_stack_load_to(b, reg, agx_immediate(stack_offs_B), format, mask);
54    }
55 }
56 
57 void
agx_lower_spill(agx_context * ctx)58 agx_lower_spill(agx_context *ctx)
59 {
60    agx_foreach_instr_global_safe(ctx, I) {
61       if (I->op != AGX_OPCODE_MOV || (!I->dest[0].memory && !I->src[0].memory))
62          continue;
63 
64       enum agx_size size = I->dest[0].size;
65       unsigned channels = agx_channels(I->dest[0]);
66 
67       assert(size == I->src[0].size);
68       assert(channels == agx_channels(I->src[0]));
69 
70       /* Texture gradient sources can be vec6, and if such a vector is spilled,
71        * we need to be able to spill/fill a vec6. Since stack_store/stack_load
72        * only work up to vec4, we break up into (at most) vec4 components.
73        */
74       agx_builder b = agx_init_builder(ctx, agx_before_instr(I));
75 
76       for (unsigned c = 0; c < channels; c += 4) {
77          spill_fill(&b, I, size, MIN2(channels - c, 4), c);
78       }
79 
80       agx_remove_instruction(I);
81    }
82 }
83