• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Collabora Ltd.
3  *
4  * Derived from tu_shader.c which is:
5  * Copyright © 2019 Google LLC
6  *
7  * Also derived from anv_pipeline.c which is
8  * Copyright © 2015 Intel Corporation
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27  * DEALINGS IN THE SOFTWARE.
28  */
29 
30 #include "panvk_private.h"
31 
32 #include "nir.h"
33 #include "nir_builder.h"
34 
35 struct apply_descriptors_ctx {
36    const struct panvk_pipeline_layout *layout;
37    bool add_bounds_checks;
38    bool has_img_access;
39    nir_address_format desc_addr_format;
40    nir_address_format ubo_addr_format;
41    nir_address_format ssbo_addr_format;
42 };
43 
44 static nir_address_format
addr_format_for_desc_type(VkDescriptorType desc_type,const struct apply_descriptors_ctx * ctx)45 addr_format_for_desc_type(VkDescriptorType desc_type,
46                           const struct apply_descriptors_ctx *ctx)
47 {
48    switch (desc_type) {
49    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
50    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
51       return ctx->ubo_addr_format;
52 
53    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
54    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
55       return ctx->ssbo_addr_format;
56 
57    default:
58       unreachable("Unsupported descriptor type");
59    }
60 }
61 
62 static const struct panvk_descriptor_set_layout *
get_set_layout(uint32_t set,const struct apply_descriptors_ctx * ctx)63 get_set_layout(uint32_t set, const struct apply_descriptors_ctx *ctx)
64 {
65    return vk_to_panvk_descriptor_set_layout(ctx->layout->vk.set_layouts[set]);
66 }
67 
68 static const struct panvk_descriptor_set_binding_layout *
get_binding_layout(uint32_t set,uint32_t binding,const struct apply_descriptors_ctx * ctx)69 get_binding_layout(uint32_t set, uint32_t binding,
70                    const struct apply_descriptors_ctx *ctx)
71 {
72    return &get_set_layout(set, ctx)->bindings[binding];
73 }
74 
75 /** Build a Vulkan resource index
76  *
77  * A "resource index" is the term used by our SPIR-V parser and the relevant
78  * NIR intrinsics for a reference into a descriptor set.  It acts much like a
79  * deref in NIR except that it accesses opaque descriptors instead of memory.
80  *
81  * Coming out of SPIR-V, both the resource indices (in the form of
82  * vulkan_resource_[re]index intrinsics) and the memory derefs (in the form
83  * of nir_deref_instr) use the same vector component/bit size.  The meaning
84  * of those values for memory derefs (nir_deref_instr) is given by the
85  * nir_address_format associated with the descriptor type.  For resource
86  * indices, it's an entirely internal to panvk encoding which describes, in
87  * some sense, the address of the descriptor.  Thanks to the NIR/SPIR-V rules,
88  * it must be packed into the same size SSA values as a memory address.  For
89  * this reason, the actual encoding may depend both on the address format for
90  * memory derefs and the descriptor address format.
91  *
92  * The load_vulkan_descriptor intrinsic exists to provide a transition point
93  * between these two forms of derefs: descriptor and memory.
94  */
95 static nir_ssa_def *
build_res_index(nir_builder * b,uint32_t set,uint32_t binding,nir_ssa_def * array_index,nir_address_format addr_format,const struct apply_descriptors_ctx * ctx)96 build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
97                 nir_ssa_def *array_index, nir_address_format addr_format,
98                 const struct apply_descriptors_ctx *ctx)
99 {
100    const struct panvk_descriptor_set_layout *set_layout =
101       get_set_layout(set, ctx);
102    const struct panvk_descriptor_set_binding_layout *bind_layout =
103       &set_layout->bindings[binding];
104 
105    uint32_t array_size = bind_layout->array_size;
106 
107    switch (bind_layout->type) {
108    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
109    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
110       assert(addr_format == nir_address_format_32bit_index_offset);
111 
112       const unsigned ubo_idx =
113          panvk_pipeline_layout_ubo_index(ctx->layout, set, binding, 0);
114 
115       const uint32_t packed = (array_size - 1) << 16 | ubo_idx;
116 
117       return nir_vec2(b, nir_imm_int(b, packed), array_index);
118    }
119 
120    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
121       assert(addr_format == nir_address_format_64bit_bounded_global ||
122              addr_format == nir_address_format_64bit_global_32bit_offset);
123 
124       const unsigned set_ubo_idx =
125          panvk_pipeline_layout_ubo_start(ctx->layout, set, false) +
126          set_layout->desc_ubo_index;
127 
128       const uint32_t packed = (bind_layout->desc_ubo_stride << 16 ) |
129                               set_ubo_idx;
130 
131       return nir_vec4(b, nir_imm_int(b, packed),
132                          nir_imm_int(b, bind_layout->desc_ubo_offset),
133                          nir_imm_int(b, array_size - 1),
134                          array_index);
135    }
136 
137    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
138       assert(addr_format == nir_address_format_64bit_bounded_global ||
139              addr_format == nir_address_format_64bit_global_32bit_offset);
140 
141       const unsigned dyn_ssbo_idx = ctx->layout->sets[set].dyn_ssbo_offset +
142                                     bind_layout->dyn_ssbo_idx;
143 
144       const unsigned ubo_idx = PANVK_SYSVAL_UBO_INDEX;
145       const unsigned desc_stride = sizeof(struct panvk_ssbo_addr);
146       const uint32_t ubo_offset = offsetof(struct panvk_sysvals, dyn_ssbos) +
147                                   dyn_ssbo_idx * desc_stride;
148 
149       const uint32_t packed = (desc_stride << 16) | ubo_idx;
150 
151       return nir_vec4(b, nir_imm_int(b, packed),
152                          nir_imm_int(b, ubo_offset),
153                          nir_imm_int(b, array_size - 1),
154                          array_index);
155    }
156 
157    default:
158       unreachable("Unsupported descriptor type");
159    }
160 }
161 
162 /** Adjust a Vulkan resource index
163  *
164  * This is the equivalent of nir_deref_type_ptr_as_array for resource indices.
165  * For array descriptors, it allows us to adjust the array index.  Thanks to
166  * variable pointers, we cannot always fold this re-index operation into the
167  * vulkan_resource_index intrinsic and we have to do it based on nothing but
168  * the address format.
169  */
170 static nir_ssa_def *
build_res_reindex(nir_builder * b,nir_ssa_def * orig,nir_ssa_def * delta,nir_address_format addr_format)171 build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta,
172                   nir_address_format addr_format)
173 {
174    switch (addr_format) {
175    case nir_address_format_32bit_index_offset:
176       return nir_vec2(b, nir_channel(b, orig, 0),
177                          nir_iadd(b, nir_channel(b, orig, 1), delta));
178 
179    case nir_address_format_64bit_bounded_global:
180    case nir_address_format_64bit_global_32bit_offset:
181       return nir_vec4(b, nir_channel(b, orig, 0),
182                          nir_channel(b, orig, 1),
183                          nir_channel(b, orig, 2),
184                          nir_iadd(b, nir_channel(b, orig, 3), delta));
185 
186    default:
187       unreachable("Unhandled address format");
188    }
189 }
190 
191 /** Convert a Vulkan resource index into a buffer address
192  *
193  * In some cases, this does a  memory load from the descriptor set and, in
194  * others, it simply converts from one form to another.
195  *
196  * See build_res_index for details about each resource index format.
197  */
198 static nir_ssa_def *
build_buffer_addr_for_res_index(nir_builder * b,nir_ssa_def * res_index,nir_address_format addr_format,const struct apply_descriptors_ctx * ctx)199 build_buffer_addr_for_res_index(nir_builder *b,
200                                 nir_ssa_def *res_index,
201                                 nir_address_format addr_format,
202                                 const struct apply_descriptors_ctx *ctx)
203 {
204    switch (addr_format) {
205    case nir_address_format_32bit_index_offset: {
206       nir_ssa_def *packed = nir_channel(b, res_index, 0);
207       nir_ssa_def *array_index = nir_channel(b, res_index, 1);
208       nir_ssa_def *surface_index = nir_extract_u16(b, packed, nir_imm_int(b, 0));
209       nir_ssa_def *array_max = nir_extract_u16(b, packed, nir_imm_int(b, 1));
210 
211       if (ctx->add_bounds_checks)
212          array_index = nir_umin(b, array_index, array_max);
213 
214       return nir_vec2(b, nir_iadd(b, surface_index, array_index),
215                          nir_imm_int(b, 0));
216    }
217 
218    case nir_address_format_64bit_bounded_global:
219    case nir_address_format_64bit_global_32bit_offset: {
220       nir_ssa_def *packed = nir_channel(b, res_index, 0);
221       nir_ssa_def *desc_ubo_offset = nir_channel(b, res_index, 1);
222       nir_ssa_def *array_max = nir_channel(b, res_index, 2);
223       nir_ssa_def *array_index = nir_channel(b, res_index, 3);
224 
225       nir_ssa_def *desc_ubo_idx = nir_extract_u16(b, packed, nir_imm_int(b, 0));
226       nir_ssa_def *desc_ubo_stride = nir_extract_u16(b, packed, nir_imm_int(b, 1));
227 
228       if (ctx->add_bounds_checks)
229          array_index = nir_umin(b, array_index, array_max);
230 
231       desc_ubo_offset = nir_iadd(b, desc_ubo_offset,
232                                     nir_imul(b, array_index, desc_ubo_stride));
233 
234       nir_ssa_def *desc = nir_load_ubo(b, 4, 32, desc_ubo_idx,
235                                        desc_ubo_offset,
236                                        .align_mul=16, .range=~0);
237 
238       /* The offset in the descriptor is guaranteed to be zero when it's
239        * written into the descriptor set.  This lets us avoid some unnecessary
240        * adds.
241        */
242       return nir_vec4(b, nir_channel(b, desc, 0),
243                          nir_channel(b, desc, 1),
244                          nir_channel(b, desc, 2),
245                          nir_imm_int(b, 0));
246    }
247 
248    default:
249       unreachable("Unhandled address format");
250    }
251 }
252 
253 
254 static bool
lower_res_intrinsic(nir_builder * b,nir_intrinsic_instr * intrin,const struct apply_descriptors_ctx * ctx)255 lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
256                     const struct apply_descriptors_ctx *ctx)
257 {
258    b->cursor = nir_before_instr(&intrin->instr);
259 
260    const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
261    nir_address_format addr_format = addr_format_for_desc_type(desc_type, ctx);
262 
263    nir_ssa_def *res;
264    switch (intrin->intrinsic) {
265    case nir_intrinsic_vulkan_resource_index:
266       assert(intrin->src[0].is_ssa);
267       res = build_res_index(b, nir_intrinsic_desc_set(intrin),
268                                nir_intrinsic_binding(intrin),
269                                intrin->src[0].ssa,
270                                addr_format, ctx);
271       break;
272 
273    case nir_intrinsic_vulkan_resource_reindex:
274       assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
275       res = build_res_reindex(b, intrin->src[0].ssa,
276                                  intrin->src[1].ssa,
277                                  addr_format);
278       break;
279 
280    case nir_intrinsic_load_vulkan_descriptor:
281       assert(intrin->src[0].is_ssa);
282       res = build_buffer_addr_for_res_index(b, intrin->src[0].ssa,
283                                                addr_format, ctx);
284       break;
285 
286    default:
287       unreachable("Unhandled resource intrinsic");
288    }
289 
290    assert(intrin->dest.is_ssa);
291    assert(intrin->dest.ssa.bit_size == res->bit_size);
292    assert(intrin->dest.ssa.num_components == res->num_components);
293    nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
294    nir_instr_remove(&intrin->instr);
295 
296    return true;
297 }
298 
299 static bool
lower_get_ssbo_size(nir_builder * b,nir_intrinsic_instr * intrin,const struct apply_descriptors_ctx * ctx)300 lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin,
301                     const struct apply_descriptors_ctx *ctx)
302 {
303    b->cursor = nir_before_instr(&intrin->instr);
304 
305    nir_address_format addr_format =
306       addr_format_for_desc_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, ctx);
307 
308    assert(intrin->src[0].is_ssa);
309    nir_ssa_def *desc = build_buffer_addr_for_res_index(b, intrin->src[0].ssa,
310                                                           addr_format, ctx);
311 
312    switch (addr_format) {
313    case nir_address_format_64bit_bounded_global:
314    case nir_address_format_64bit_global_32bit_offset: {
315       nir_ssa_def *size = nir_channel(b, desc, 2);
316       nir_ssa_def_rewrite_uses(&intrin->dest.ssa, size);
317       nir_instr_remove(&intrin->instr);
318       break;
319    }
320 
321    default:
322       unreachable("Unsupported address format");
323    }
324 
325    return true;
326 }
327 
328 static void
get_resource_deref_binding(nir_deref_instr * deref,uint32_t * set,uint32_t * binding,uint32_t * index_imm,nir_ssa_def ** index_ssa)329 get_resource_deref_binding(nir_deref_instr *deref,
330                            uint32_t *set, uint32_t *binding,
331                            uint32_t *index_imm, nir_ssa_def **index_ssa)
332 {
333    *index_imm = 0;
334    *index_ssa = NULL;
335 
336    if (deref->deref_type == nir_deref_type_array) {
337       assert(deref->arr.index.is_ssa);
338       if (index_imm != NULL && nir_src_is_const(deref->arr.index))
339          *index_imm = nir_src_as_uint(deref->arr.index);
340       else
341          *index_ssa = deref->arr.index.ssa;
342 
343       deref = nir_deref_instr_parent(deref);
344    }
345 
346    assert(deref->deref_type == nir_deref_type_var);
347    nir_variable *var = deref->var;
348 
349    *set = var->data.descriptor_set;
350    *binding = var->data.binding;
351 }
352 
353 static nir_ssa_def *
load_resource_deref_desc(nir_builder * b,nir_deref_instr * deref,unsigned desc_offset,unsigned num_components,unsigned bit_size,const struct apply_descriptors_ctx * ctx)354 load_resource_deref_desc(nir_builder *b, nir_deref_instr *deref,
355                          unsigned desc_offset,
356                          unsigned num_components, unsigned bit_size,
357                          const struct apply_descriptors_ctx *ctx)
358 {
359    uint32_t set, binding, index_imm;
360    nir_ssa_def *index_ssa;
361    get_resource_deref_binding(deref, &set, &binding,
362                               &index_imm, &index_ssa);
363 
364    const struct panvk_descriptor_set_layout *set_layout =
365       get_set_layout(set, ctx);
366    const struct panvk_descriptor_set_binding_layout *bind_layout =
367       &set_layout->bindings[binding];
368 
369    assert(index_ssa == NULL || index_imm == 0);
370    if (index_ssa == NULL)
371       index_ssa = nir_imm_int(b, index_imm);
372 
373    const unsigned set_ubo_idx =
374       panvk_pipeline_layout_ubo_start(ctx->layout, set, false) +
375       set_layout->desc_ubo_index;
376 
377    nir_ssa_def *desc_ubo_offset =
378       nir_iadd_imm(b, nir_imul_imm(b, index_ssa,
379                                       bind_layout->desc_ubo_stride),
380                       bind_layout->desc_ubo_offset + desc_offset);
381 
382    assert(bind_layout->desc_ubo_stride > 0);
383    unsigned desc_align = (1 << (ffs(bind_layout->desc_ubo_stride) - 1));
384    desc_align = MIN2(desc_align, 16);
385 
386    return nir_load_ubo(b, num_components, bit_size,
387                        nir_imm_int(b, set_ubo_idx),
388                        desc_ubo_offset,
389                        .align_mul=desc_align,
390                        .align_offset=(desc_offset % desc_align),
391                        .range=~0);
392 }
393 
394 static nir_ssa_def *
load_tex_img_size(nir_builder * b,nir_deref_instr * deref,enum glsl_sampler_dim dim,const struct apply_descriptors_ctx * ctx)395 load_tex_img_size(nir_builder *b, nir_deref_instr *deref,
396                   enum glsl_sampler_dim dim,
397                   const struct apply_descriptors_ctx *ctx)
398 {
399    if (dim == GLSL_SAMPLER_DIM_BUF) {
400       return load_resource_deref_desc(b, deref, 0, 1, 32, ctx);
401    } else {
402       nir_ssa_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
403 
404       /* The sizes are provided as 16-bit values with 1 subtracted so
405        * convert to 32-bit and add 1.
406        */
407       return nir_iadd_imm(b, nir_u2u32(b, desc), 1);
408    }
409 }
410 
411 static nir_ssa_def *
load_tex_img_levels(nir_builder * b,nir_deref_instr * deref,enum glsl_sampler_dim dim,const struct apply_descriptors_ctx * ctx)412 load_tex_img_levels(nir_builder *b, nir_deref_instr *deref,
413                     enum glsl_sampler_dim dim,
414                     const struct apply_descriptors_ctx *ctx)
415 {
416    assert(dim != GLSL_SAMPLER_DIM_BUF);
417    nir_ssa_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
418    return nir_u2u32(b, nir_iand_imm(b, nir_channel(b, desc, 3), 0xff));
419 }
420 
421 static nir_ssa_def *
load_tex_img_samples(nir_builder * b,nir_deref_instr * deref,enum glsl_sampler_dim dim,const struct apply_descriptors_ctx * ctx)422 load_tex_img_samples(nir_builder *b, nir_deref_instr *deref,
423                      enum glsl_sampler_dim dim,
424                      const struct apply_descriptors_ctx *ctx)
425 {
426    assert(dim != GLSL_SAMPLER_DIM_BUF);
427    nir_ssa_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
428    return nir_u2u32(b, nir_ushr_imm(b, nir_channel(b, desc, 3), 8));
429 }
430 
431 static bool
lower_tex(nir_builder * b,nir_tex_instr * tex,const struct apply_descriptors_ctx * ctx)432 lower_tex(nir_builder *b, nir_tex_instr *tex,
433           const struct apply_descriptors_ctx *ctx)
434 {
435    bool progress = false;
436 
437    b->cursor = nir_before_instr(&tex->instr);
438 
439    if (tex->op == nir_texop_txs ||
440        tex->op == nir_texop_query_levels ||
441        tex->op == nir_texop_texture_samples) {
442       int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
443       assert(tex_src_idx >= 0);
444       nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
445 
446       const enum glsl_sampler_dim dim = tex->sampler_dim;
447 
448       nir_ssa_def *res;
449       switch (tex->op) {
450       case nir_texop_txs:
451          res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
452                             nir_component_mask(tex->dest.ssa.num_components));
453          break;
454       case nir_texop_query_levels:
455          assert(tex->dest.ssa.num_components == 1);
456          res = load_tex_img_levels(b, deref, dim, ctx);
457          break;
458       case nir_texop_texture_samples:
459          assert(tex->dest.ssa.num_components == 1);
460          res = load_tex_img_samples(b, deref, dim, ctx);
461          break;
462       default:
463          unreachable("Unsupported texture query op");
464       }
465 
466       nir_ssa_def_rewrite_uses(&tex->dest.ssa, res);
467       nir_instr_remove(&tex->instr);
468       return true;
469    }
470 
471    int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
472    if (sampler_src_idx >= 0) {
473       nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
474       nir_tex_instr_remove_src(tex, sampler_src_idx);
475 
476       uint32_t set, binding, index_imm;
477       nir_ssa_def *index_ssa;
478       get_resource_deref_binding(deref, &set, &binding,
479                                  &index_imm, &index_ssa);
480 
481       const struct panvk_descriptor_set_binding_layout *bind_layout =
482          get_binding_layout(set, binding, ctx);
483 
484       tex->sampler_index = ctx->layout->sets[set].sampler_offset +
485                            bind_layout->sampler_idx + index_imm;
486 
487       if (index_ssa != NULL) {
488          nir_tex_instr_add_src(tex, nir_tex_src_sampler_offset,
489                                nir_src_for_ssa(index_ssa));
490       }
491       progress = true;
492    }
493 
494    int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
495    if (tex_src_idx >= 0) {
496       nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
497       nir_tex_instr_remove_src(tex, tex_src_idx);
498 
499       uint32_t set, binding, index_imm;
500       nir_ssa_def *index_ssa;
501       get_resource_deref_binding(deref, &set, &binding,
502                                  &index_imm, &index_ssa);
503 
504       const struct panvk_descriptor_set_binding_layout *bind_layout =
505          get_binding_layout(set, binding, ctx);
506 
507       tex->texture_index = ctx->layout->sets[set].tex_offset +
508                            bind_layout->tex_idx + index_imm;
509 
510       if (index_ssa != NULL) {
511          nir_tex_instr_add_src(tex, nir_tex_src_texture_offset,
512                                nir_src_for_ssa(index_ssa));
513       }
514       progress = true;
515    }
516 
517    return progress;
518 }
519 
520 static nir_ssa_def *
get_img_index(nir_builder * b,nir_deref_instr * deref,const struct apply_descriptors_ctx * ctx)521 get_img_index(nir_builder *b, nir_deref_instr *deref,
522               const struct apply_descriptors_ctx *ctx)
523 {
524    uint32_t set, binding, index_imm;
525    nir_ssa_def *index_ssa;
526    get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
527 
528    const struct panvk_descriptor_set_binding_layout *bind_layout =
529       get_binding_layout(set, binding, ctx);
530    assert(bind_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
531           bind_layout->type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
532           bind_layout->type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
533 
534    unsigned img_offset = ctx->layout->sets[set].img_offset +
535                          bind_layout->img_idx;
536 
537    if (index_ssa == NULL) {
538       return nir_imm_int(b, img_offset + index_imm);
539    } else {
540       assert(index_imm == 0);
541       return nir_iadd_imm(b, index_ssa, img_offset);
542    }
543 }
544 
545 static bool
lower_img_intrinsic(nir_builder * b,nir_intrinsic_instr * intr,struct apply_descriptors_ctx * ctx)546 lower_img_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
547                     struct apply_descriptors_ctx *ctx)
548 {
549    b->cursor = nir_before_instr(&intr->instr);
550    nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
551 
552    if (intr->intrinsic == nir_intrinsic_image_deref_size ||
553        intr->intrinsic == nir_intrinsic_image_deref_samples) {
554       assert(intr->dest.is_ssa);
555 
556       const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
557 
558       nir_ssa_def *res;
559       switch (intr->intrinsic) {
560       case nir_intrinsic_image_deref_size:
561          res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
562                             nir_component_mask(intr->dest.ssa.num_components));
563          break;
564       case nir_intrinsic_image_deref_samples:
565          res = load_tex_img_samples(b, deref, dim, ctx);
566          break;
567       default:
568          unreachable("Unsupported image query op");
569       }
570 
571       nir_ssa_def_rewrite_uses(&intr->dest.ssa, res);
572       nir_instr_remove(&intr->instr);
573    } else {
574       nir_rewrite_image_intrinsic(intr, get_img_index(b, deref, ctx), false);
575       ctx->has_img_access = true;
576    }
577 
578    return true;
579 }
580 
581 static bool
lower_intrinsic(nir_builder * b,nir_intrinsic_instr * intr,struct apply_descriptors_ctx * ctx)582 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
583                 struct apply_descriptors_ctx *ctx)
584 {
585    switch (intr->intrinsic) {
586    case nir_intrinsic_vulkan_resource_index:
587    case nir_intrinsic_vulkan_resource_reindex:
588    case nir_intrinsic_load_vulkan_descriptor:
589       return lower_res_intrinsic(b, intr, ctx);
590    case nir_intrinsic_get_ssbo_size:
591       return lower_get_ssbo_size(b, intr, ctx);
592    case nir_intrinsic_image_deref_store:
593    case nir_intrinsic_image_deref_load:
594    case nir_intrinsic_image_deref_atomic_add:
595    case nir_intrinsic_image_deref_atomic_imin:
596    case nir_intrinsic_image_deref_atomic_umin:
597    case nir_intrinsic_image_deref_atomic_imax:
598    case nir_intrinsic_image_deref_atomic_umax:
599    case nir_intrinsic_image_deref_atomic_and:
600    case nir_intrinsic_image_deref_atomic_or:
601    case nir_intrinsic_image_deref_atomic_xor:
602    case nir_intrinsic_image_deref_atomic_exchange:
603    case nir_intrinsic_image_deref_atomic_comp_swap:
604    case nir_intrinsic_image_deref_atomic_fadd:
605    case nir_intrinsic_image_deref_size:
606    case nir_intrinsic_image_deref_samples:
607       return lower_img_intrinsic(b, intr, ctx);
608    default:
609       return false;
610    }
611 
612 }
613 
614 static bool
lower_descriptors_instr(nir_builder * b,nir_instr * instr,void * data)615 lower_descriptors_instr(nir_builder *b,
616                         nir_instr *instr,
617                         void *data)
618 {
619    struct apply_descriptors_ctx *ctx = data;
620 
621    switch (instr->type) {
622    case nir_instr_type_tex:
623       return lower_tex(b, nir_instr_as_tex(instr), ctx);
624    case nir_instr_type_intrinsic:
625       return lower_intrinsic(b, nir_instr_as_intrinsic(instr), ctx);
626    default:
627       return false;
628    }
629 }
630 
631 bool
panvk_per_arch(nir_lower_descriptors)632 panvk_per_arch(nir_lower_descriptors)(nir_shader *nir,
633                                       struct panvk_device *dev,
634                                       const struct panvk_pipeline_layout *layout,
635                                       bool *has_img_access_out)
636 {
637    struct apply_descriptors_ctx ctx = {
638       .layout = layout,
639       .desc_addr_format = nir_address_format_32bit_index_offset,
640       .ubo_addr_format = nir_address_format_32bit_index_offset,
641       .ssbo_addr_format = dev->vk.enabled_features.robustBufferAccess ?
642                           nir_address_format_64bit_bounded_global :
643                           nir_address_format_64bit_global_32bit_offset,
644    };
645 
646    bool progress = nir_shader_instructions_pass(nir, lower_descriptors_instr,
647                                                 nir_metadata_block_index |
648                                                 nir_metadata_dominance,
649                                                 (void *)&ctx);
650    if (has_img_access_out)
651       *has_img_access_out = ctx.has_img_access;
652 
653    return progress;
654 }
655