1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "elk_private.h"
25 #include "compiler/shader_info.h"
26 #include "intel/dev/intel_debug.h"
27 #include "intel/dev/intel_device_info.h"
28 #include "util/ralloc.h"
29
30 unsigned
elk_required_dispatch_width(const struct shader_info * info)31 elk_required_dispatch_width(const struct shader_info *info)
32 {
33 if ((int)info->subgroup_size >= (int)SUBGROUP_SIZE_REQUIRE_8) {
34 assert(gl_shader_stage_uses_workgroup(info->stage));
35 /* These enum values are expressly chosen to be equal to the subgroup
36 * size that they require.
37 */
38 return (unsigned)info->subgroup_size;
39 } else {
40 return 0;
41 }
42 }
43
44 static inline bool
test_bit(unsigned mask,unsigned bit)45 test_bit(unsigned mask, unsigned bit) {
46 return mask & (1u << bit);
47 }
48
49 namespace {
50
51 struct elk_cs_prog_data *
get_cs_prog_data(elk_simd_selection_state & state)52 get_cs_prog_data(elk_simd_selection_state &state)
53 {
54 if (std::holds_alternative<struct elk_cs_prog_data *>(state.prog_data))
55 return std::get<struct elk_cs_prog_data *>(state.prog_data);
56 else
57 return nullptr;
58 }
59
60 struct elk_stage_prog_data *
get_prog_data(elk_simd_selection_state & state)61 get_prog_data(elk_simd_selection_state &state)
62 {
63 if (std::holds_alternative<struct elk_cs_prog_data *>(state.prog_data))
64 return &std::get<struct elk_cs_prog_data *>(state.prog_data)->base;
65 else
66 return nullptr;
67 }
68
69 }
70
71 bool
elk_simd_should_compile(elk_simd_selection_state & state,unsigned simd)72 elk_simd_should_compile(elk_simd_selection_state &state, unsigned simd)
73 {
74 assert(simd < SIMD_COUNT);
75 assert(!state.compiled[simd]);
76
77 const auto cs_prog_data = get_cs_prog_data(state);
78 const auto prog_data = get_prog_data(state);
79 const unsigned width = 8u << simd;
80
81 /* For shaders with variable size workgroup, in most cases we can compile
82 * all the variants (exceptions are bindless dispatch & ray queries), since
83 * the choice will happen only at dispatch time.
84 */
85 const bool workgroup_size_variable = cs_prog_data && cs_prog_data->local_size[0] == 0;
86
87 if (!workgroup_size_variable) {
88 if (state.spilled[simd]) {
89 state.error[simd] = "Would spill";
90 return false;
91 }
92
93 if (state.required_width && state.required_width != width) {
94 state.error[simd] = "Different than required dispatch width";
95 return false;
96 }
97
98 if (cs_prog_data) {
99 const unsigned workgroup_size = cs_prog_data->local_size[0] *
100 cs_prog_data->local_size[1] *
101 cs_prog_data->local_size[2];
102
103 unsigned max_threads = state.devinfo->max_cs_workgroup_threads;
104
105 const unsigned min_simd = state.devinfo->ver >= 20 ? 1 : 0;
106 if (simd > min_simd && state.compiled[simd - 1] &&
107 workgroup_size <= (width / 2)) {
108 state.error[simd] = "Workgroup size already fits in smaller SIMD";
109 return false;
110 }
111
112 if (DIV_ROUND_UP(workgroup_size, width) > max_threads) {
113 state.error[simd] = "Would need more than max_threads to fit all invocations";
114 return false;
115 }
116 }
117
118 /* The SIMD32 is only enabled for cases it is needed unless forced.
119 *
120 * TODO: Use performance_analysis and drop this rule.
121 */
122 if (width == 32 && state.devinfo->ver < 20) {
123 if (!INTEL_DEBUG(DEBUG_DO32) && (state.compiled[0] || state.compiled[1])) {
124 state.error[simd] = "SIMD32 not required (use INTEL_DEBUG=do32 to force)";
125 return false;
126 }
127 }
128 }
129
130 if (width == 8 && state.devinfo->ver >= 20) {
131 state.error[simd] = "SIMD8 not supported on Xe2+";
132 return false;
133 }
134
135 if (width == 32 && cs_prog_data && cs_prog_data->base.ray_queries > 0) {
136 state.error[simd] = "Ray queries not supported";
137 return false;
138 }
139
140 if (width == 32 && cs_prog_data && cs_prog_data->uses_btd_stack_ids) {
141 state.error[simd] = "Bindless shader calls not supported";
142 return false;
143 }
144
145 uint64_t start;
146 switch (prog_data->stage) {
147 case MESA_SHADER_COMPUTE:
148 start = DEBUG_CS_SIMD8;
149 break;
150 default:
151 unreachable("unknown shader stage in elk_simd_should_compile");
152 }
153
154 const bool env_skip[] = {
155 (intel_simd & (start << 0)) == 0,
156 (intel_simd & (start << 1)) == 0,
157 (intel_simd & (start << 2)) == 0,
158 };
159
160 static_assert(ARRAY_SIZE(env_skip) == SIMD_COUNT);
161
162 if (unlikely(env_skip[simd])) {
163 state.error[simd] = "Disabled by INTEL_DEBUG environment variable";
164 return false;
165 }
166
167 return true;
168 }
169
170 void
elk_simd_mark_compiled(elk_simd_selection_state & state,unsigned simd,bool spilled)171 elk_simd_mark_compiled(elk_simd_selection_state &state, unsigned simd, bool spilled)
172 {
173 assert(simd < SIMD_COUNT);
174 assert(!state.compiled[simd]);
175
176 auto cs_prog_data = get_cs_prog_data(state);
177
178 state.compiled[simd] = true;
179 if (cs_prog_data)
180 cs_prog_data->prog_mask |= 1u << simd;
181
182 /* If a SIMD spilled, all the larger ones would spill too. */
183 if (spilled) {
184 for (unsigned i = simd; i < SIMD_COUNT; i++) {
185 state.spilled[i] = true;
186 if (cs_prog_data)
187 cs_prog_data->prog_spilled |= 1u << i;
188 }
189 }
190 }
191
192 int
elk_simd_select(const struct elk_simd_selection_state & state)193 elk_simd_select(const struct elk_simd_selection_state &state)
194 {
195 for (int i = SIMD_COUNT - 1; i >= 0; i--) {
196 if (state.compiled[i] && !state.spilled[i])
197 return i;
198 }
199 for (int i = SIMD_COUNT - 1; i >= 0; i--) {
200 if (state.compiled[i])
201 return i;
202 }
203 return -1;
204 }
205
206 int
elk_simd_select_for_workgroup_size(const struct intel_device_info * devinfo,const struct elk_cs_prog_data * prog_data,const unsigned * sizes)207 elk_simd_select_for_workgroup_size(const struct intel_device_info *devinfo,
208 const struct elk_cs_prog_data *prog_data,
209 const unsigned *sizes)
210 {
211 if (!sizes || (prog_data->local_size[0] == sizes[0] &&
212 prog_data->local_size[1] == sizes[1] &&
213 prog_data->local_size[2] == sizes[2])) {
214 elk_simd_selection_state simd_state{
215 .prog_data = const_cast<struct elk_cs_prog_data *>(prog_data),
216 };
217
218 /* Propagate the prog_data information back to the simd_state,
219 * so we can use select() directly.
220 */
221 for (int i = 0; i < SIMD_COUNT; i++) {
222 simd_state.compiled[i] = test_bit(prog_data->prog_mask, i);
223 simd_state.spilled[i] = test_bit(prog_data->prog_spilled, i);
224 }
225
226 return elk_simd_select(simd_state);
227 }
228
229 struct elk_cs_prog_data cloned = *prog_data;
230 for (unsigned i = 0; i < 3; i++)
231 cloned.local_size[i] = sizes[i];
232
233 cloned.prog_mask = 0;
234 cloned.prog_spilled = 0;
235
236 elk_simd_selection_state simd_state{
237 .devinfo = devinfo,
238 .prog_data = &cloned,
239 };
240
241 for (unsigned simd = 0; simd < SIMD_COUNT; simd++) {
242 /* We are not recompiling, so use original results of prog_mask and
243 * prog_spilled as they will already contain all possible compilations.
244 */
245 if (elk_simd_should_compile(simd_state, simd) &&
246 test_bit(prog_data->prog_mask, simd)) {
247 elk_simd_mark_compiled(simd_state, simd, test_bit(prog_data->prog_spilled, simd));
248 }
249 }
250
251 return elk_simd_select(simd_state);
252 }
253