• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Alyssa Rosenzweig
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "agx_builder.h"
7 #include "agx_compile.h"
8 #include "agx_compiler.h"
9 #include "agx_test.h"
10 
11 #include "util/macros.h"
12 #include <gtest/gtest.h>
13 
14 #define CASE(expected_spills, expected_fills, instr, expected)                 \
15    do {                                                                        \
16       agx_builder *A = agx_test_builder(mem_ctx);                              \
17       agx_builder *B = agx_test_builder(mem_ctx);                              \
18       {                                                                        \
19          agx_builder *b = A;                                                   \
20          instr;                                                                \
21       }                                                                        \
22       {                                                                        \
23          agx_builder *b = B;                                                   \
24          expected;                                                             \
25       }                                                                        \
26       agx_lower_spill(A->shader);                                              \
27       ASSERT_SHADER_EQUAL(A->shader, B->shader);                               \
28       ASSERT_EQ(A->shader->spills, expected_spills);                           \
29       ASSERT_EQ(A->shader->fills, expected_fills);                             \
30    } while (0)
31 
32 class LowerSpill : public testing::Test {
33  protected:
LowerSpill()34    LowerSpill()
35    {
36       mem_ctx = ralloc_context(NULL);
37 
38       wx = agx_register(0, AGX_SIZE_32);
39       hy = agx_register(2, AGX_SIZE_16);
40 
41       mw4 = agx_memory_register(0, AGX_SIZE_32);
42       mh4 = agx_memory_register(0, AGX_SIZE_16);
43       mw4.channels_m1 = 4 - 1;
44       mh4.channels_m1 = 4 - 1;
45 
46       wx4 = wx;
47       wx4.channels_m1 = 4 - 1;
48 
49       hy4 = hy;
50       hy4.channels_m1 = 4 - 1;
51    }
52 
~LowerSpill()53    ~LowerSpill()
54    {
55       ralloc_free(mem_ctx);
56    }
57 
58    void *mem_ctx;
59    agx_index wx, hy, wx4, hy4;
60    agx_index mw4, mh4;
61 
62    unsigned scalar = BITFIELD_MASK(1);
63    unsigned vec4 = BITFIELD_MASK(4);
64 
65    enum agx_format i16 = AGX_FORMAT_I16;
66    enum agx_format i32 = AGX_FORMAT_I32;
67 };
68 
TEST_F(LowerSpill,ScalarSpills)69 TEST_F(LowerSpill, ScalarSpills)
70 {
71    CASE(1, 0, agx_mov_to(b, agx_memory_register(11, AGX_SIZE_16), hy),
72         agx_stack_store(b, hy, agx_immediate(22), i16, scalar));
73 
74    CASE(1, 0, agx_mov_to(b, agx_memory_register(18, AGX_SIZE_32), wx),
75         agx_stack_store(b, wx, agx_immediate(36), i32, scalar));
76 }
77 
TEST_F(LowerSpill,ScalarFills)78 TEST_F(LowerSpill, ScalarFills)
79 {
80    CASE(0, 1, agx_mov_to(b, hy, agx_memory_register(11, AGX_SIZE_16)),
81         agx_stack_load_to(b, hy, agx_immediate(22), i16, scalar));
82 
83    CASE(0, 1, agx_mov_to(b, wx, agx_memory_register(18, AGX_SIZE_32)),
84         agx_stack_load_to(b, wx, agx_immediate(36), i32, scalar));
85 }
86 
TEST_F(LowerSpill,VectorSpills)87 TEST_F(LowerSpill, VectorSpills)
88 {
89    CASE(1, 0, agx_mov_to(b, mh4, hy4),
90         agx_stack_store(b, hy4, agx_immediate(0), i16, vec4));
91 
92    CASE(1, 0, agx_mov_to(b, mw4, wx4),
93         agx_stack_store(b, wx4, agx_immediate(0), i32, vec4));
94 }
95 
TEST_F(LowerSpill,VectorFills)96 TEST_F(LowerSpill, VectorFills)
97 {
98    CASE(0, 1, agx_mov_to(b, hy4, mh4),
99         agx_stack_load_to(b, hy4, agx_immediate(0), i16, vec4));
100 
101    CASE(0, 1, agx_mov_to(b, wx4, mw4),
102         agx_stack_load_to(b, wx4, agx_immediate(0), i32, vec4));
103 }
104 
TEST_F(LowerSpill,ScalarSpill64)105 TEST_F(LowerSpill, ScalarSpill64)
106 {
107    CASE(1, 0,
108         agx_mov_to(b, agx_memory_register(16, AGX_SIZE_64),
109                    agx_register(8, AGX_SIZE_64)),
110         agx_stack_store(b, agx_register(8, AGX_SIZE_64), agx_immediate(32), i32,
111                         BITFIELD_MASK(2)));
112 }
113 
TEST_F(LowerSpill,ScalarFill64)114 TEST_F(LowerSpill, ScalarFill64)
115 {
116    CASE(0, 1,
117         agx_mov_to(b, agx_register(16, AGX_SIZE_64),
118                    agx_memory_register(8, AGX_SIZE_64)),
119         agx_stack_load_to(b, agx_register(16, AGX_SIZE_64), agx_immediate(16),
120                           i32, BITFIELD_MASK(2)));
121 }
122 
TEST_F(LowerSpill,Vec6Spill)123 TEST_F(LowerSpill, Vec6Spill)
124 {
125    CASE(
126       2, 0,
127       {
128          agx_index mvec6 = agx_memory_register(16, AGX_SIZE_32);
129          agx_index vec6 = agx_register(8, AGX_SIZE_32);
130          vec6.channels_m1 = 6 - 1;
131          mvec6.channels_m1 = 6 - 1;
132 
133          agx_mov_to(b, mvec6, vec6);
134       },
135       {
136          agx_index vec4 = agx_register(8, AGX_SIZE_32);
137          agx_index vec2 = agx_register(8 + (4 * 2), AGX_SIZE_32);
138          vec4.channels_m1 = 4 - 1;
139          vec2.channels_m1 = 2 - 1;
140 
141          agx_stack_store(b, vec4, agx_immediate(32), i32, BITFIELD_MASK(4));
142          agx_stack_store(b, vec2, agx_immediate(32 + 4 * 4), i32,
143                          BITFIELD_MASK(2));
144       });
145 }
146 
TEST_F(LowerSpill,Vec6Fill)147 TEST_F(LowerSpill, Vec6Fill)
148 {
149    CASE(
150       0, 2,
151       {
152          agx_index mvec6 = agx_memory_register(16, AGX_SIZE_32);
153          agx_index vec6 = agx_register(8, AGX_SIZE_32);
154          vec6.channels_m1 = 6 - 1;
155          mvec6.channels_m1 = 6 - 1;
156 
157          agx_mov_to(b, vec6, mvec6);
158       },
159       {
160          agx_index vec4 = agx_register(8, AGX_SIZE_32);
161          agx_index vec2 = agx_register(8 + (4 * 2), AGX_SIZE_32);
162          vec4.channels_m1 = 4 - 1;
163          vec2.channels_m1 = 2 - 1;
164 
165          agx_stack_load_to(b, vec4, agx_immediate(32), i32, BITFIELD_MASK(4));
166          agx_stack_load_to(b, vec2, agx_immediate(32 + 4 * 4), i32,
167                            BITFIELD_MASK(2));
168       });
169 }
170