• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "si_pipe.h"
26 #include "si_shader_internal.h"
27 #include "sid.h"
28 
29 /**
30  * Return a value that is equal to the given i32 \p index if it lies in [0,num)
31  * or an undefined value in the same interval otherwise.
32  */
si_llvm_bound_index(struct si_shader_context * ctx,LLVMValueRef index,unsigned num)33 static LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx, LLVMValueRef index,
34                                         unsigned num)
35 {
36    LLVMBuilderRef builder = ctx->ac.builder;
37    LLVMValueRef c_max = LLVMConstInt(ctx->ac.i32, num - 1, 0);
38    LLVMValueRef cc;
39 
40    if (util_is_power_of_two_or_zero(num)) {
41       index = LLVMBuildAnd(builder, index, c_max, "");
42    } else {
43       /* In theory, this MAX pattern should result in code that is
44        * as good as the bit-wise AND above.
45        *
46        * In practice, LLVM generates worse code (at the time of
47        * writing), because its value tracking is not strong enough.
48        */
49       cc = LLVMBuildICmp(builder, LLVMIntULE, index, c_max, "");
50       index = LLVMBuildSelect(builder, cc, index, c_max, "");
51    }
52 
53    return index;
54 }
55 
load_const_buffer_desc_fast_path(struct si_shader_context * ctx)56 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
57 {
58    LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
59    struct si_shader_selector *sel = ctx->shader->selector;
60 
61    /* Do the bounds checking with a descriptor, because
62     * doing computation and manual bounds checking of 64-bit
63     * addresses generates horrible VALU code with very high
64     * VGPR usage and very low SIMD occupancy.
65     */
66    ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
67 
68    LLVMValueRef desc0, desc1;
69    desc0 = ptr;
70    desc1 = LLVMConstInt(ctx->ac.i32, S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
71 
72    uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
73                     S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
74 
75    if (ctx->screen->info.chip_class >= GFX10)
76       rsrc3 |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
77                S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
78    else
79       rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
80                S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
81 
82    LLVMValueRef desc_elems[] = {desc0, desc1,
83                                 LLVMConstInt(ctx->ac.i32, sel->info.constbuf0_num_slots * 16, 0),
84                                 LLVMConstInt(ctx->ac.i32, rsrc3, false)};
85 
86    return ac_build_gather_values(&ctx->ac, desc_elems, 4);
87 }
88 
load_ubo(struct ac_shader_abi * abi,unsigned desc_set,unsigned binding,bool valid_binding,LLVMValueRef index)89 static LLVMValueRef load_ubo(struct ac_shader_abi *abi,
90                              unsigned desc_set, unsigned binding,
91                              bool valid_binding, LLVMValueRef index)
92 {
93    struct si_shader_context *ctx = si_shader_context_from_abi(abi);
94    struct si_shader_selector *sel = ctx->shader->selector;
95 
96    LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
97 
98    if (sel->info.base.num_ubos == 1 && sel->info.base.num_ssbos == 0) {
99       return load_const_buffer_desc_fast_path(ctx);
100    }
101 
102    index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
103    index =
104       LLVMBuildAdd(ctx->ac.builder, index, LLVMConstInt(ctx->ac.i32, SI_NUM_SHADER_BUFFERS, 0), "");
105 
106    return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
107 }
108 
load_ssbo(struct ac_shader_abi * abi,LLVMValueRef index,bool write,bool non_uniform)109 static LLVMValueRef load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write, bool non_uniform)
110 {
111    struct si_shader_context *ctx = si_shader_context_from_abi(abi);
112 
113    /* Fast path if the shader buffer is in user SGPRs. */
114    if (LLVMIsConstant(index) &&
115        LLVMConstIntGetZExtValue(index) < ctx->shader->selector->cs_num_shaderbufs_in_user_sgprs)
116       return ac_get_arg(&ctx->ac, ctx->cs_shaderbuf[LLVMConstIntGetZExtValue(index)]);
117 
118    LLVMValueRef rsrc_ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
119 
120    index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
121    index = LLVMBuildSub(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, SI_NUM_SHADER_BUFFERS - 1, 0),
122                         index, "");
123 
124    return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
125 }
126 
127 /**
128  * Given a 256-bit resource descriptor, force the DCC enable bit to off.
129  *
130  * At least on Tonga, executing image stores on images with DCC enabled and
131  * non-trivial can eventually lead to lockups. This can occur when an
132  * application binds an image as read-only but then uses a shader that writes
133  * to it. The OpenGL spec allows almost arbitrarily bad behavior (including
134  * program termination) in this case, but it doesn't cost much to be a bit
135  * nicer: disabling DCC in the shader still leads to undefined results but
136  * avoids the lockup.
137  */
force_dcc_off(struct si_shader_context * ctx,LLVMValueRef rsrc)138 static LLVMValueRef force_dcc_off(struct si_shader_context *ctx, LLVMValueRef rsrc)
139 {
140    if (ctx->screen->info.chip_class <= GFX7) {
141       return rsrc;
142    } else {
143       LLVMValueRef i32_6 = LLVMConstInt(ctx->ac.i32, 6, 0);
144       LLVMValueRef i32_C = LLVMConstInt(ctx->ac.i32, C_008F28_COMPRESSION_EN, 0);
145       LLVMValueRef tmp;
146 
147       tmp = LLVMBuildExtractElement(ctx->ac.builder, rsrc, i32_6, "");
148       tmp = LLVMBuildAnd(ctx->ac.builder, tmp, i32_C, "");
149       return LLVMBuildInsertElement(ctx->ac.builder, rsrc, tmp, i32_6, "");
150    }
151 }
152 
force_write_compress_off(struct si_shader_context * ctx,LLVMValueRef rsrc)153 static LLVMValueRef force_write_compress_off(struct si_shader_context *ctx, LLVMValueRef rsrc)
154 {
155    LLVMValueRef i32_6 = LLVMConstInt(ctx->ac.i32, 6, 0);
156    LLVMValueRef i32_C = LLVMConstInt(ctx->ac.i32, C_00A018_WRITE_COMPRESS_ENABLE, 0);
157    LLVMValueRef tmp;
158 
159    tmp = LLVMBuildExtractElement(ctx->ac.builder, rsrc, i32_6, "");
160    tmp = LLVMBuildAnd(ctx->ac.builder, tmp, i32_C, "");
161    return LLVMBuildInsertElement(ctx->ac.builder, rsrc, tmp, i32_6, "");
162 }
163 
fixup_image_desc(struct si_shader_context * ctx,LLVMValueRef rsrc,bool uses_store)164 static LLVMValueRef fixup_image_desc(struct si_shader_context *ctx, LLVMValueRef rsrc,
165                                      bool uses_store)
166 {
167    if (uses_store && ctx->ac.chip_class <= GFX9)
168       rsrc = force_dcc_off(ctx, rsrc);
169 
170    if (!uses_store && ctx->screen->info.has_image_load_dcc_bug &&
171        ctx->screen->always_allow_dcc_stores)
172       rsrc = force_write_compress_off(ctx, rsrc);
173 
174    return rsrc;
175 }
176 
177 /* AC_DESC_FMASK is handled exactly like AC_DESC_IMAGE. The caller should
178  * adjust "index" to point to FMASK. */
si_load_image_desc(struct si_shader_context * ctx,LLVMValueRef list,LLVMValueRef index,enum ac_descriptor_type desc_type,bool uses_store,bool bindless)179 static LLVMValueRef si_load_image_desc(struct si_shader_context *ctx, LLVMValueRef list,
180                                        LLVMValueRef index, enum ac_descriptor_type desc_type,
181                                        bool uses_store, bool bindless)
182 {
183    LLVMBuilderRef builder = ctx->ac.builder;
184    LLVMValueRef rsrc;
185 
186    if (desc_type == AC_DESC_BUFFER) {
187       index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->ac.i32, 2, 0), ctx->ac.i32_1);
188       list = LLVMBuildPointerCast(builder, list, ac_array_in_const32_addr_space(ctx->ac.v4i32), "");
189    } else {
190       assert(desc_type == AC_DESC_IMAGE || desc_type == AC_DESC_FMASK);
191    }
192 
193    if (bindless)
194       rsrc = ac_build_load_to_sgpr_uint_wraparound(&ctx->ac, list, index);
195    else
196       rsrc = ac_build_load_to_sgpr(&ctx->ac, list, index);
197 
198    if (desc_type == AC_DESC_IMAGE)
199       rsrc = fixup_image_desc(ctx, rsrc, uses_store);
200 
201    return rsrc;
202 }
203 
204 /**
205  * Load an image view, fmask view. or sampler state descriptor.
206  */
si_load_sampler_desc(struct si_shader_context * ctx,LLVMValueRef list,LLVMValueRef index,enum ac_descriptor_type type)207 static LLVMValueRef si_load_sampler_desc(struct si_shader_context *ctx, LLVMValueRef list,
208                                          LLVMValueRef index, enum ac_descriptor_type type)
209 {
210    LLVMBuilderRef builder = ctx->ac.builder;
211 
212    switch (type) {
213    case AC_DESC_IMAGE:
214       /* The image is at [0:7]. */
215       index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, 2, 0), "");
216       break;
217    case AC_DESC_BUFFER:
218       /* The buffer is in [4:7]. */
219       index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->ac.i32, 4, 0), ctx->ac.i32_1);
220       list = LLVMBuildPointerCast(builder, list, ac_array_in_const32_addr_space(ctx->ac.v4i32), "");
221       break;
222    case AC_DESC_FMASK:
223       /* The FMASK is at [8:15]. */
224       index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->ac.i32, 2, 0), ctx->ac.i32_1);
225       break;
226    case AC_DESC_SAMPLER:
227       /* The sampler state is at [12:15]. */
228       index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->ac.i32, 4, 0),
229                             LLVMConstInt(ctx->ac.i32, 3, 0));
230       list = LLVMBuildPointerCast(builder, list, ac_array_in_const32_addr_space(ctx->ac.v4i32), "");
231       break;
232    case AC_DESC_PLANE_0:
233    case AC_DESC_PLANE_1:
234    case AC_DESC_PLANE_2:
235       /* Only used for the multiplane image support for Vulkan. Should
236        * never be reached in radeonsi.
237        */
238       unreachable("Plane descriptor requested in radeonsi.");
239    }
240 
241    return ac_build_load_to_sgpr(&ctx->ac, list, index);
242 }
243 
si_nir_load_sampler_desc(struct ac_shader_abi * abi,unsigned descriptor_set,unsigned base_index,unsigned constant_index,LLVMValueRef dynamic_index,enum ac_descriptor_type desc_type,bool image,bool write,bool bindless)244 static LLVMValueRef si_nir_load_sampler_desc(struct ac_shader_abi *abi, unsigned descriptor_set,
245                                              unsigned base_index, unsigned constant_index,
246                                              LLVMValueRef dynamic_index,
247                                              enum ac_descriptor_type desc_type, bool image,
248                                              bool write, bool bindless)
249 {
250    struct si_shader_context *ctx = si_shader_context_from_abi(abi);
251    LLVMBuilderRef builder = ctx->ac.builder;
252    unsigned const_index = base_index + constant_index;
253 
254    assert(!descriptor_set);
255    assert(desc_type <= AC_DESC_BUFFER);
256 
257    if (bindless) {
258       LLVMValueRef list = ac_get_arg(&ctx->ac, ctx->bindless_samplers_and_images);
259 
260       /* dynamic_index is the bindless handle */
261       if (image) {
262          /* Bindless image descriptors use 16-dword slots. */
263          dynamic_index =
264             LLVMBuildMul(ctx->ac.builder, dynamic_index, LLVMConstInt(ctx->ac.i64, 2, 0), "");
265          /* FMASK is right after the image. */
266          if (desc_type == AC_DESC_FMASK) {
267             dynamic_index = LLVMBuildAdd(ctx->ac.builder, dynamic_index, ctx->ac.i32_1, "");
268          }
269 
270          return si_load_image_desc(ctx, list, dynamic_index, desc_type, write, true);
271       }
272 
273       /* Since bindless handle arithmetic can contain an unsigned integer
274        * wraparound and si_load_sampler_desc assumes there isn't any,
275        * use GEP without "inbounds" (inside ac_build_pointer_add)
276        * to prevent incorrect code generation and hangs.
277        */
278       dynamic_index =
279          LLVMBuildMul(ctx->ac.builder, dynamic_index, LLVMConstInt(ctx->ac.i64, 2, 0), "");
280       list = ac_build_pointer_add(&ctx->ac, list, dynamic_index);
281       return si_load_sampler_desc(ctx, list, ctx->ac.i32_0, desc_type);
282    }
283 
284    unsigned num_slots = image ? ctx->num_images : ctx->num_samplers;
285    assert(const_index < num_slots || dynamic_index);
286 
287    LLVMValueRef list = ac_get_arg(&ctx->ac, ctx->samplers_and_images);
288    LLVMValueRef index = LLVMConstInt(ctx->ac.i32, const_index, false);
289 
290    if (dynamic_index) {
291       index = LLVMBuildAdd(builder, index, dynamic_index, "");
292 
293       /* From the GL_ARB_shader_image_load_store extension spec:
294        *
295        *    If a shader performs an image load, store, or atomic
296        *    operation using an image variable declared as an array,
297        *    and if the index used to select an individual element is
298        *    negative or greater than or equal to the size of the
299        *    array, the results of the operation are undefined but may
300        *    not lead to termination.
301        */
302       index = si_llvm_bound_index(ctx, index, num_slots);
303    }
304 
305    if (image) {
306       /* Fast path if the image is in user SGPRs. */
307       if (!dynamic_index &&
308           const_index < ctx->shader->selector->cs_num_images_in_user_sgprs &&
309           (desc_type == AC_DESC_IMAGE || desc_type == AC_DESC_BUFFER)) {
310          LLVMValueRef rsrc = ac_get_arg(&ctx->ac, ctx->cs_image[const_index]);
311 
312          if (desc_type == AC_DESC_IMAGE)
313             rsrc = fixup_image_desc(ctx, rsrc, write);
314          return rsrc;
315       }
316 
317       /* FMASKs are separate from images. */
318       if (desc_type == AC_DESC_FMASK) {
319          index =
320             LLVMBuildAdd(ctx->ac.builder, index, LLVMConstInt(ctx->ac.i32, SI_NUM_IMAGES, 0), "");
321       }
322       index = LLVMBuildSub(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, SI_NUM_IMAGE_SLOTS - 1, 0),
323                            index, "");
324       return si_load_image_desc(ctx, list, index, desc_type, write, false);
325    }
326 
327    index = LLVMBuildAdd(ctx->ac.builder, index,
328                         LLVMConstInt(ctx->ac.i32, SI_NUM_IMAGE_SLOTS / 2, 0), "");
329    return si_load_sampler_desc(ctx, list, index, desc_type);
330 }
331 
si_llvm_init_resource_callbacks(struct si_shader_context * ctx)332 void si_llvm_init_resource_callbacks(struct si_shader_context *ctx)
333 {
334    ctx->abi.load_ubo = load_ubo;
335    ctx->abi.load_ssbo = load_ssbo;
336    ctx->abi.load_sampler_desc = si_nir_load_sampler_desc;
337 }
338