1 /*
2 * Copyright © 2021 Collabora Ltd.
3 *
4 * Derived from tu_shader.c which is:
5 * Copyright © 2019 Google LLC
6 *
7 * Also derived from anv_pipeline.c which is
8 * Copyright © 2015 Intel Corporation
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the next
18 * paragraph) shall be included in all copies or substantial portions of the
19 * Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS IN THE SOFTWARE.
28 */
29
30 #include "panvk_private.h"
31
32 #include "nir.h"
33 #include "nir_builder.h"
34
35 struct apply_descriptors_ctx {
36 const struct panvk_pipeline_layout *layout;
37 bool add_bounds_checks;
38 bool has_img_access;
39 nir_address_format desc_addr_format;
40 nir_address_format ubo_addr_format;
41 nir_address_format ssbo_addr_format;
42 };
43
44 static nir_address_format
addr_format_for_desc_type(VkDescriptorType desc_type,const struct apply_descriptors_ctx * ctx)45 addr_format_for_desc_type(VkDescriptorType desc_type,
46 const struct apply_descriptors_ctx *ctx)
47 {
48 switch (desc_type) {
49 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
50 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
51 return ctx->ubo_addr_format;
52
53 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
54 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
55 return ctx->ssbo_addr_format;
56
57 default:
58 unreachable("Unsupported descriptor type");
59 }
60 }
61
62 static const struct panvk_descriptor_set_layout *
get_set_layout(uint32_t set,const struct apply_descriptors_ctx * ctx)63 get_set_layout(uint32_t set, const struct apply_descriptors_ctx *ctx)
64 {
65 return vk_to_panvk_descriptor_set_layout(ctx->layout->vk.set_layouts[set]);
66 }
67
68 static const struct panvk_descriptor_set_binding_layout *
get_binding_layout(uint32_t set,uint32_t binding,const struct apply_descriptors_ctx * ctx)69 get_binding_layout(uint32_t set, uint32_t binding,
70 const struct apply_descriptors_ctx *ctx)
71 {
72 return &get_set_layout(set, ctx)->bindings[binding];
73 }
74
75 /** Build a Vulkan resource index
76 *
77 * A "resource index" is the term used by our SPIR-V parser and the relevant
78 * NIR intrinsics for a reference into a descriptor set. It acts much like a
79 * deref in NIR except that it accesses opaque descriptors instead of memory.
80 *
81 * Coming out of SPIR-V, both the resource indices (in the form of
82 * vulkan_resource_[re]index intrinsics) and the memory derefs (in the form
83 * of nir_deref_instr) use the same vector component/bit size. The meaning
84 * of those values for memory derefs (nir_deref_instr) is given by the
85 * nir_address_format associated with the descriptor type. For resource
86 * indices, it's an entirely internal to panvk encoding which describes, in
87 * some sense, the address of the descriptor. Thanks to the NIR/SPIR-V rules,
88 * it must be packed into the same size SSA values as a memory address. For
89 * this reason, the actual encoding may depend both on the address format for
90 * memory derefs and the descriptor address format.
91 *
92 * The load_vulkan_descriptor intrinsic exists to provide a transition point
93 * between these two forms of derefs: descriptor and memory.
94 */
95 static nir_def *
build_res_index(nir_builder * b,uint32_t set,uint32_t binding,nir_def * array_index,nir_address_format addr_format,const struct apply_descriptors_ctx * ctx)96 build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
97 nir_def *array_index, nir_address_format addr_format,
98 const struct apply_descriptors_ctx *ctx)
99 {
100 const struct panvk_descriptor_set_layout *set_layout =
101 get_set_layout(set, ctx);
102 const struct panvk_descriptor_set_binding_layout *bind_layout =
103 &set_layout->bindings[binding];
104
105 uint32_t array_size = bind_layout->array_size;
106
107 switch (bind_layout->type) {
108 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
109 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
110 assert(addr_format == nir_address_format_32bit_index_offset);
111
112 const unsigned ubo_idx =
113 panvk_pipeline_layout_ubo_index(ctx->layout, set, binding, 0);
114
115 const uint32_t packed = (array_size - 1) << 16 | ubo_idx;
116
117 return nir_vec2(b, nir_imm_int(b, packed), array_index);
118 }
119
120 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
121 assert(addr_format == nir_address_format_64bit_bounded_global ||
122 addr_format == nir_address_format_64bit_global_32bit_offset);
123
124 const unsigned set_ubo_idx =
125 panvk_pipeline_layout_ubo_start(ctx->layout, set, false) +
126 set_layout->desc_ubo_index;
127
128 const uint32_t packed =
129 (bind_layout->desc_ubo_stride << 16) | set_ubo_idx;
130
131 return nir_vec4(b, nir_imm_int(b, packed),
132 nir_imm_int(b, bind_layout->desc_ubo_offset),
133 nir_imm_int(b, array_size - 1), array_index);
134 }
135
136 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
137 assert(addr_format == nir_address_format_64bit_bounded_global ||
138 addr_format == nir_address_format_64bit_global_32bit_offset);
139
140 const unsigned dyn_ssbo_idx =
141 ctx->layout->sets[set].dyn_ssbo_offset + bind_layout->dyn_ssbo_idx;
142
143 const unsigned ubo_idx = PANVK_SYSVAL_UBO_INDEX;
144 const unsigned desc_stride = sizeof(struct panvk_ssbo_addr);
145 const uint32_t ubo_offset =
146 offsetof(struct panvk_sysvals, dyn_ssbos) + dyn_ssbo_idx * desc_stride;
147
148 const uint32_t packed = (desc_stride << 16) | ubo_idx;
149
150 return nir_vec4(b, nir_imm_int(b, packed), nir_imm_int(b, ubo_offset),
151 nir_imm_int(b, array_size - 1), array_index);
152 }
153
154 default:
155 unreachable("Unsupported descriptor type");
156 }
157 }
158
159 /** Adjust a Vulkan resource index
160 *
161 * This is the equivalent of nir_deref_type_ptr_as_array for resource indices.
162 * For array descriptors, it allows us to adjust the array index. Thanks to
163 * variable pointers, we cannot always fold this re-index operation into the
164 * vulkan_resource_index intrinsic and we have to do it based on nothing but
165 * the address format.
166 */
167 static nir_def *
build_res_reindex(nir_builder * b,nir_def * orig,nir_def * delta,nir_address_format addr_format)168 build_res_reindex(nir_builder *b, nir_def *orig, nir_def *delta,
169 nir_address_format addr_format)
170 {
171 switch (addr_format) {
172 case nir_address_format_32bit_index_offset:
173 return nir_vec2(b, nir_channel(b, orig, 0),
174 nir_iadd(b, nir_channel(b, orig, 1), delta));
175
176 case nir_address_format_64bit_bounded_global:
177 case nir_address_format_64bit_global_32bit_offset:
178 return nir_vec4(b, nir_channel(b, orig, 0), nir_channel(b, orig, 1),
179 nir_channel(b, orig, 2),
180 nir_iadd(b, nir_channel(b, orig, 3), delta));
181
182 default:
183 unreachable("Unhandled address format");
184 }
185 }
186
187 /** Convert a Vulkan resource index into a buffer address
188 *
189 * In some cases, this does a memory load from the descriptor set and, in
190 * others, it simply converts from one form to another.
191 *
192 * See build_res_index for details about each resource index format.
193 */
194 static nir_def *
build_buffer_addr_for_res_index(nir_builder * b,nir_def * res_index,nir_address_format addr_format,const struct apply_descriptors_ctx * ctx)195 build_buffer_addr_for_res_index(nir_builder *b, nir_def *res_index,
196 nir_address_format addr_format,
197 const struct apply_descriptors_ctx *ctx)
198 {
199 switch (addr_format) {
200 case nir_address_format_32bit_index_offset: {
201 nir_def *packed = nir_channel(b, res_index, 0);
202 nir_def *array_index = nir_channel(b, res_index, 1);
203 nir_def *surface_index = nir_extract_u16(b, packed, nir_imm_int(b, 0));
204 nir_def *array_max = nir_extract_u16(b, packed, nir_imm_int(b, 1));
205
206 if (ctx->add_bounds_checks)
207 array_index = nir_umin(b, array_index, array_max);
208
209 return nir_vec2(b, nir_iadd(b, surface_index, array_index),
210 nir_imm_int(b, 0));
211 }
212
213 case nir_address_format_64bit_bounded_global:
214 case nir_address_format_64bit_global_32bit_offset: {
215 nir_def *packed = nir_channel(b, res_index, 0);
216 nir_def *desc_ubo_offset = nir_channel(b, res_index, 1);
217 nir_def *array_max = nir_channel(b, res_index, 2);
218 nir_def *array_index = nir_channel(b, res_index, 3);
219
220 nir_def *desc_ubo_idx = nir_extract_u16(b, packed, nir_imm_int(b, 0));
221 nir_def *desc_ubo_stride = nir_extract_u16(b, packed, nir_imm_int(b, 1));
222
223 if (ctx->add_bounds_checks)
224 array_index = nir_umin(b, array_index, array_max);
225
226 desc_ubo_offset = nir_iadd(b, desc_ubo_offset,
227 nir_imul(b, array_index, desc_ubo_stride));
228
229 nir_def *desc = nir_load_ubo(b, 4, 32, desc_ubo_idx, desc_ubo_offset,
230 .align_mul = 16, .range = ~0);
231
232 /* The offset in the descriptor is guaranteed to be zero when it's
233 * written into the descriptor set. This lets us avoid some unnecessary
234 * adds.
235 */
236 return nir_vec4(b, nir_channel(b, desc, 0), nir_channel(b, desc, 1),
237 nir_channel(b, desc, 2), nir_imm_int(b, 0));
238 }
239
240 default:
241 unreachable("Unhandled address format");
242 }
243 }
244
245 static bool
lower_res_intrinsic(nir_builder * b,nir_intrinsic_instr * intrin,const struct apply_descriptors_ctx * ctx)246 lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
247 const struct apply_descriptors_ctx *ctx)
248 {
249 b->cursor = nir_before_instr(&intrin->instr);
250
251 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
252 nir_address_format addr_format = addr_format_for_desc_type(desc_type, ctx);
253
254 nir_def *res;
255 switch (intrin->intrinsic) {
256 case nir_intrinsic_vulkan_resource_index:
257 res = build_res_index(b, nir_intrinsic_desc_set(intrin),
258 nir_intrinsic_binding(intrin), intrin->src[0].ssa,
259 addr_format, ctx);
260 break;
261
262 case nir_intrinsic_vulkan_resource_reindex:
263 res = build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa,
264 addr_format);
265 break;
266
267 case nir_intrinsic_load_vulkan_descriptor:
268 res = build_buffer_addr_for_res_index(b, intrin->src[0].ssa, addr_format,
269 ctx);
270 break;
271
272 default:
273 unreachable("Unhandled resource intrinsic");
274 }
275
276 assert(intrin->def.bit_size == res->bit_size);
277 assert(intrin->def.num_components == res->num_components);
278 nir_def_rewrite_uses(&intrin->def, res);
279 nir_instr_remove(&intrin->instr);
280
281 return true;
282 }
283
284 static void
get_resource_deref_binding(nir_deref_instr * deref,uint32_t * set,uint32_t * binding,uint32_t * index_imm,nir_def ** index_ssa)285 get_resource_deref_binding(nir_deref_instr *deref, uint32_t *set,
286 uint32_t *binding, uint32_t *index_imm,
287 nir_def **index_ssa)
288 {
289 *index_imm = 0;
290 *index_ssa = NULL;
291
292 if (deref->deref_type == nir_deref_type_array) {
293 if (index_imm != NULL && nir_src_is_const(deref->arr.index))
294 *index_imm = nir_src_as_uint(deref->arr.index);
295 else
296 *index_ssa = deref->arr.index.ssa;
297
298 deref = nir_deref_instr_parent(deref);
299 }
300
301 assert(deref->deref_type == nir_deref_type_var);
302 nir_variable *var = deref->var;
303
304 *set = var->data.descriptor_set;
305 *binding = var->data.binding;
306 }
307
308 static nir_def *
load_resource_deref_desc(nir_builder * b,nir_deref_instr * deref,unsigned desc_offset,unsigned num_components,unsigned bit_size,const struct apply_descriptors_ctx * ctx)309 load_resource_deref_desc(nir_builder *b, nir_deref_instr *deref,
310 unsigned desc_offset, unsigned num_components,
311 unsigned bit_size,
312 const struct apply_descriptors_ctx *ctx)
313 {
314 uint32_t set, binding, index_imm;
315 nir_def *index_ssa;
316 get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
317
318 const struct panvk_descriptor_set_layout *set_layout =
319 get_set_layout(set, ctx);
320 const struct panvk_descriptor_set_binding_layout *bind_layout =
321 &set_layout->bindings[binding];
322
323 assert(index_ssa == NULL || index_imm == 0);
324 if (index_ssa == NULL)
325 index_ssa = nir_imm_int(b, index_imm);
326
327 const unsigned set_ubo_idx =
328 panvk_pipeline_layout_ubo_start(ctx->layout, set, false) +
329 set_layout->desc_ubo_index;
330
331 nir_def *desc_ubo_offset =
332 nir_iadd_imm(b, nir_imul_imm(b, index_ssa, bind_layout->desc_ubo_stride),
333 bind_layout->desc_ubo_offset + desc_offset);
334
335 assert(bind_layout->desc_ubo_stride > 0);
336 unsigned desc_align = (1 << (ffs(bind_layout->desc_ubo_stride) - 1));
337 desc_align = MIN2(desc_align, 16);
338
339 return nir_load_ubo(b, num_components, bit_size, nir_imm_int(b, set_ubo_idx),
340 desc_ubo_offset, .align_mul = desc_align,
341 .align_offset = (desc_offset % desc_align), .range = ~0);
342 }
343
344 static nir_def *
load_tex_img_size(nir_builder * b,nir_deref_instr * deref,enum glsl_sampler_dim dim,const struct apply_descriptors_ctx * ctx)345 load_tex_img_size(nir_builder *b, nir_deref_instr *deref,
346 enum glsl_sampler_dim dim,
347 const struct apply_descriptors_ctx *ctx)
348 {
349 if (dim == GLSL_SAMPLER_DIM_BUF) {
350 return load_resource_deref_desc(b, deref, 0, 1, 32, ctx);
351 } else {
352 nir_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
353
354 /* The sizes are provided as 16-bit values with 1 subtracted so
355 * convert to 32-bit and add 1.
356 */
357 return nir_iadd_imm(b, nir_u2u32(b, desc), 1);
358 }
359 }
360
361 static nir_def *
load_tex_img_levels(nir_builder * b,nir_deref_instr * deref,enum glsl_sampler_dim dim,const struct apply_descriptors_ctx * ctx)362 load_tex_img_levels(nir_builder *b, nir_deref_instr *deref,
363 enum glsl_sampler_dim dim,
364 const struct apply_descriptors_ctx *ctx)
365 {
366 assert(dim != GLSL_SAMPLER_DIM_BUF);
367 nir_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
368 return nir_u2u32(b, nir_iand_imm(b, nir_channel(b, desc, 3), 0xff));
369 }
370
371 static nir_def *
load_tex_img_samples(nir_builder * b,nir_deref_instr * deref,enum glsl_sampler_dim dim,const struct apply_descriptors_ctx * ctx)372 load_tex_img_samples(nir_builder *b, nir_deref_instr *deref,
373 enum glsl_sampler_dim dim,
374 const struct apply_descriptors_ctx *ctx)
375 {
376 assert(dim != GLSL_SAMPLER_DIM_BUF);
377 nir_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
378 return nir_u2u32(b, nir_ushr_imm(b, nir_channel(b, desc, 3), 8));
379 }
380
381 static bool
lower_tex(nir_builder * b,nir_tex_instr * tex,const struct apply_descriptors_ctx * ctx)382 lower_tex(nir_builder *b, nir_tex_instr *tex,
383 const struct apply_descriptors_ctx *ctx)
384 {
385 bool progress = false;
386
387 b->cursor = nir_before_instr(&tex->instr);
388
389 if (tex->op == nir_texop_txs || tex->op == nir_texop_query_levels ||
390 tex->op == nir_texop_texture_samples) {
391 int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
392 assert(tex_src_idx >= 0);
393 nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
394
395 const enum glsl_sampler_dim dim = tex->sampler_dim;
396
397 nir_def *res;
398 switch (tex->op) {
399 case nir_texop_txs:
400 res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
401 nir_component_mask(tex->def.num_components));
402 break;
403 case nir_texop_query_levels:
404 assert(tex->def.num_components == 1);
405 res = load_tex_img_levels(b, deref, dim, ctx);
406 break;
407 case nir_texop_texture_samples:
408 assert(tex->def.num_components == 1);
409 res = load_tex_img_samples(b, deref, dim, ctx);
410 break;
411 default:
412 unreachable("Unsupported texture query op");
413 }
414
415 nir_def_rewrite_uses(&tex->def, res);
416 nir_instr_remove(&tex->instr);
417 return true;
418 }
419
420 int sampler_src_idx =
421 nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
422 if (sampler_src_idx >= 0) {
423 nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
424 nir_tex_instr_remove_src(tex, sampler_src_idx);
425
426 uint32_t set, binding, index_imm;
427 nir_def *index_ssa;
428 get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
429
430 const struct panvk_descriptor_set_binding_layout *bind_layout =
431 get_binding_layout(set, binding, ctx);
432
433 tex->sampler_index = ctx->layout->sets[set].sampler_offset +
434 bind_layout->sampler_idx + index_imm;
435
436 if (index_ssa != NULL) {
437 nir_tex_instr_add_src(tex, nir_tex_src_sampler_offset, index_ssa);
438 }
439 progress = true;
440 }
441
442 int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
443 if (tex_src_idx >= 0) {
444 nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
445 nir_tex_instr_remove_src(tex, tex_src_idx);
446
447 uint32_t set, binding, index_imm;
448 nir_def *index_ssa;
449 get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
450
451 const struct panvk_descriptor_set_binding_layout *bind_layout =
452 get_binding_layout(set, binding, ctx);
453
454 tex->texture_index =
455 ctx->layout->sets[set].tex_offset + bind_layout->tex_idx + index_imm;
456
457 if (index_ssa != NULL) {
458 nir_tex_instr_add_src(tex, nir_tex_src_texture_offset, index_ssa);
459 }
460 progress = true;
461 }
462
463 return progress;
464 }
465
466 static nir_def *
get_img_index(nir_builder * b,nir_deref_instr * deref,const struct apply_descriptors_ctx * ctx)467 get_img_index(nir_builder *b, nir_deref_instr *deref,
468 const struct apply_descriptors_ctx *ctx)
469 {
470 uint32_t set, binding, index_imm;
471 nir_def *index_ssa;
472 get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
473
474 const struct panvk_descriptor_set_binding_layout *bind_layout =
475 get_binding_layout(set, binding, ctx);
476 assert(bind_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
477 bind_layout->type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
478 bind_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
479
480 unsigned img_offset =
481 ctx->layout->sets[set].img_offset + bind_layout->img_idx;
482
483 if (index_ssa == NULL) {
484 return nir_imm_int(b, img_offset + index_imm);
485 } else {
486 assert(index_imm == 0);
487 return nir_iadd_imm(b, index_ssa, img_offset);
488 }
489 }
490
491 static bool
lower_img_intrinsic(nir_builder * b,nir_intrinsic_instr * intr,struct apply_descriptors_ctx * ctx)492 lower_img_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
493 struct apply_descriptors_ctx *ctx)
494 {
495 b->cursor = nir_before_instr(&intr->instr);
496 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
497
498 if (intr->intrinsic == nir_intrinsic_image_deref_size ||
499 intr->intrinsic == nir_intrinsic_image_deref_samples) {
500 const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
501
502 nir_def *res;
503 switch (intr->intrinsic) {
504 case nir_intrinsic_image_deref_size:
505 res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
506 nir_component_mask(intr->def.num_components));
507 break;
508 case nir_intrinsic_image_deref_samples:
509 res = load_tex_img_samples(b, deref, dim, ctx);
510 break;
511 default:
512 unreachable("Unsupported image query op");
513 }
514
515 nir_def_rewrite_uses(&intr->def, res);
516 nir_instr_remove(&intr->instr);
517 } else {
518 nir_rewrite_image_intrinsic(intr, get_img_index(b, deref, ctx), false);
519 ctx->has_img_access = true;
520 }
521
522 return true;
523 }
524
525 static bool
lower_intrinsic(nir_builder * b,nir_intrinsic_instr * intr,struct apply_descriptors_ctx * ctx)526 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
527 struct apply_descriptors_ctx *ctx)
528 {
529 switch (intr->intrinsic) {
530 case nir_intrinsic_vulkan_resource_index:
531 case nir_intrinsic_vulkan_resource_reindex:
532 case nir_intrinsic_load_vulkan_descriptor:
533 return lower_res_intrinsic(b, intr, ctx);
534 case nir_intrinsic_image_deref_store:
535 case nir_intrinsic_image_deref_load:
536 case nir_intrinsic_image_deref_atomic:
537 case nir_intrinsic_image_deref_atomic_swap:
538 case nir_intrinsic_image_deref_size:
539 case nir_intrinsic_image_deref_samples:
540 return lower_img_intrinsic(b, intr, ctx);
541 default:
542 return false;
543 }
544 }
545
546 static bool
lower_descriptors_instr(nir_builder * b,nir_instr * instr,void * data)547 lower_descriptors_instr(nir_builder *b, nir_instr *instr, void *data)
548 {
549 struct apply_descriptors_ctx *ctx = data;
550
551 switch (instr->type) {
552 case nir_instr_type_tex:
553 return lower_tex(b, nir_instr_as_tex(instr), ctx);
554 case nir_instr_type_intrinsic:
555 return lower_intrinsic(b, nir_instr_as_intrinsic(instr), ctx);
556 default:
557 return false;
558 }
559 }
560
561 bool
panvk_per_arch(nir_lower_descriptors)562 panvk_per_arch(nir_lower_descriptors)(nir_shader *nir, struct panvk_device *dev,
563 const struct panvk_pipeline_layout *layout,
564 bool *has_img_access_out)
565 {
566 struct apply_descriptors_ctx ctx = {
567 .layout = layout,
568 .desc_addr_format = nir_address_format_32bit_index_offset,
569 .ubo_addr_format = nir_address_format_32bit_index_offset,
570 .ssbo_addr_format = dev->vk.enabled_features.robustBufferAccess
571 ? nir_address_format_64bit_bounded_global
572 : nir_address_format_64bit_global_32bit_offset,
573 };
574
575 bool progress = nir_shader_instructions_pass(
576 nir, lower_descriptors_instr,
577 nir_metadata_block_index | nir_metadata_dominance, (void *)&ctx);
578 if (has_img_access_out)
579 *has_img_access_out = ctx.has_img_access;
580
581 return progress;
582 }
583