• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file lower_shared_reference.cpp
26  *
27  * IR lower pass to replace dereferences of compute shader shared variables
28  * with intrinsic function calls.
29  *
30  * This relieves drivers of the responsibility of allocating space for the
31  * shared variables in the shared memory region.
32  */
33 
34 #include "lower_buffer_access.h"
35 #include "ir_builder.h"
36 #include "linker.h"
37 #include "main/macros.h"
38 #include "util/list.h"
39 #include "glsl_parser_extras.h"
40 #include "main/mtypes.h"
41 
42 using namespace ir_builder;
43 
44 namespace {
45 
46 struct var_offset {
47    struct list_head node;
48    const ir_variable *var;
49    unsigned offset;
50 };
51 
52 class lower_shared_reference_visitor :
53       public lower_buffer_access::lower_buffer_access {
54 public:
55 
lower_shared_reference_visitor(struct gl_linked_shader * shader)56    lower_shared_reference_visitor(struct gl_linked_shader *shader)
57       : buffer_access_type(shared_load_access),
58       list_ctx(ralloc_context(NULL)), shader(shader), shared_size(0u),
59       progress(false)
60    {
61       list_inithead(&var_offsets);
62    }
63 
~lower_shared_reference_visitor()64    ~lower_shared_reference_visitor()
65    {
66       ralloc_free(list_ctx);
67    }
68 
69    enum {
70       shared_load_access,
71       shared_store_access,
72       shared_atomic_access,
73    } buffer_access_type;
74 
75    void insert_buffer_access(void *mem_ctx, ir_dereference *deref,
76                              const glsl_type *type, ir_rvalue *offset,
77                              unsigned mask, int channel);
78 
79    void handle_rvalue(ir_rvalue **rvalue);
80    ir_visitor_status visit_enter(ir_assignment *ir);
81    void handle_assignment(ir_assignment *ir);
82 
83    ir_call *lower_shared_atomic_intrinsic(ir_call *ir);
84    ir_call *check_for_shared_atomic_intrinsic(ir_call *ir);
85    ir_visitor_status visit_enter(ir_call *ir);
86 
87    unsigned get_shared_offset(const ir_variable *);
88 
89    ir_call *shared_load(void *mem_ctx, const struct glsl_type *type,
90                         ir_rvalue *offset);
91    ir_call *shared_store(void *mem_ctx, ir_rvalue *deref, ir_rvalue *offset,
92                          unsigned write_mask);
93 
94    void *list_ctx;
95    struct gl_linked_shader *shader;
96    struct list_head var_offsets;
97    unsigned shared_size;
98    bool progress;
99 };
100 
101 unsigned
get_shared_offset(const ir_variable * var)102 lower_shared_reference_visitor::get_shared_offset(const ir_variable *var)
103 {
104    list_for_each_entry(var_offset, var_entry, &var_offsets, node) {
105       if (var_entry->var == var)
106          return var_entry->offset;
107    }
108 
109    struct var_offset *new_entry = rzalloc(list_ctx, struct var_offset);
110    list_add(&new_entry->node, &var_offsets);
111    new_entry->var = var;
112 
113    unsigned var_align = var->type->std430_base_alignment(false);
114    new_entry->offset = glsl_align(shared_size, var_align);
115 
116    unsigned var_size = var->type->std430_size(false);
117    shared_size = new_entry->offset + var_size;
118 
119    return new_entry->offset;
120 }
121 
122 void
handle_rvalue(ir_rvalue ** rvalue)123 lower_shared_reference_visitor::handle_rvalue(ir_rvalue **rvalue)
124 {
125    if (!*rvalue)
126       return;
127 
128    ir_dereference *deref = (*rvalue)->as_dereference();
129    if (!deref)
130       return;
131 
132    ir_variable *var = deref->variable_referenced();
133    if (!var || var->data.mode != ir_var_shader_shared)
134       return;
135 
136    buffer_access_type = shared_load_access;
137 
138    void *mem_ctx = ralloc_parent(shader->ir);
139 
140    ir_rvalue *offset = NULL;
141    unsigned const_offset = get_shared_offset(var);
142    bool row_major;
143    const glsl_type *matrix_type;
144    assert(var->get_interface_type() == NULL);
145    const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
146 
147    setup_buffer_access(mem_ctx, deref,
148                        &offset, &const_offset,
149                        &row_major, &matrix_type, NULL, packing);
150 
151    /* Now that we've calculated the offset to the start of the
152     * dereference, walk over the type and emit loads into a temporary.
153     */
154    const glsl_type *type = (*rvalue)->type;
155    ir_variable *load_var = new(mem_ctx) ir_variable(type,
156                                                     "shared_load_temp",
157                                                     ir_var_temporary);
158    base_ir->insert_before(load_var);
159 
160    ir_variable *load_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
161                                                        "shared_load_temp_offset",
162                                                        ir_var_temporary);
163    base_ir->insert_before(load_offset);
164    base_ir->insert_before(assign(load_offset, offset));
165 
166    deref = new(mem_ctx) ir_dereference_variable(load_var);
167 
168    emit_access(mem_ctx, false, deref, load_offset, const_offset, row_major,
169                matrix_type, packing, 0);
170 
171    *rvalue = deref;
172 
173    progress = true;
174 }
175 
176 void
handle_assignment(ir_assignment * ir)177 lower_shared_reference_visitor::handle_assignment(ir_assignment *ir)
178 {
179    if (!ir || !ir->lhs)
180       return;
181 
182    ir_rvalue *rvalue = ir->lhs->as_rvalue();
183    if (!rvalue)
184       return;
185 
186    ir_dereference *deref = ir->lhs->as_dereference();
187    if (!deref)
188       return;
189 
190    ir_variable *var = ir->lhs->variable_referenced();
191    if (!var || var->data.mode != ir_var_shader_shared)
192       return;
193 
194    buffer_access_type = shared_store_access;
195 
196    /* We have a write to a shared variable, so declare a temporary and rewrite
197     * the assignment so that the temporary is the LHS.
198     */
199    void *mem_ctx = ralloc_parent(shader->ir);
200 
201    const glsl_type *type = rvalue->type;
202    ir_variable *store_var = new(mem_ctx) ir_variable(type,
203                                                      "shared_store_temp",
204                                                      ir_var_temporary);
205    base_ir->insert_before(store_var);
206    ir->lhs = new(mem_ctx) ir_dereference_variable(store_var);
207 
208    ir_rvalue *offset = NULL;
209    unsigned const_offset = get_shared_offset(var);
210    bool row_major;
211    const glsl_type *matrix_type;
212    assert(var->get_interface_type() == NULL);
213    const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
214 
215    setup_buffer_access(mem_ctx, deref,
216                        &offset, &const_offset,
217                        &row_major, &matrix_type, NULL, packing);
218 
219    deref = new(mem_ctx) ir_dereference_variable(store_var);
220 
221    ir_variable *store_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
222                                                         "shared_store_temp_offset",
223                                                         ir_var_temporary);
224    base_ir->insert_before(store_offset);
225    base_ir->insert_before(assign(store_offset, offset));
226 
227    /* Now we have to write the value assigned to the temporary back to memory */
228    emit_access(mem_ctx, true, deref, store_offset, const_offset, row_major,
229                matrix_type, packing, ir->write_mask);
230 
231    progress = true;
232 }
233 
234 ir_visitor_status
visit_enter(ir_assignment * ir)235 lower_shared_reference_visitor::visit_enter(ir_assignment *ir)
236 {
237    handle_assignment(ir);
238    return rvalue_visit(ir);
239 }
240 
241 void
insert_buffer_access(void * mem_ctx,ir_dereference * deref,const glsl_type * type,ir_rvalue * offset,unsigned mask,int)242 lower_shared_reference_visitor::insert_buffer_access(void *mem_ctx,
243                                                      ir_dereference *deref,
244                                                      const glsl_type *type,
245                                                      ir_rvalue *offset,
246                                                      unsigned mask,
247                                                      int /* channel */)
248 {
249    if (buffer_access_type == shared_store_access) {
250       ir_call *store = shared_store(mem_ctx, deref, offset, mask);
251       base_ir->insert_after(store);
252    } else {
253       ir_call *load = shared_load(mem_ctx, type, offset);
254       base_ir->insert_before(load);
255       ir_rvalue *value = load->return_deref->as_rvalue()->clone(mem_ctx, NULL);
256       base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
257                                     value));
258    }
259 }
260 
261 static bool
compute_shader_enabled(const _mesa_glsl_parse_state * state)262 compute_shader_enabled(const _mesa_glsl_parse_state *state)
263 {
264    return state->stage == MESA_SHADER_COMPUTE;
265 }
266 
267 ir_call *
shared_store(void * mem_ctx,ir_rvalue * deref,ir_rvalue * offset,unsigned write_mask)268 lower_shared_reference_visitor::shared_store(void *mem_ctx,
269                                              ir_rvalue *deref,
270                                              ir_rvalue *offset,
271                                              unsigned write_mask)
272 {
273    exec_list sig_params;
274 
275    ir_variable *offset_ref = new(mem_ctx)
276       ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
277    sig_params.push_tail(offset_ref);
278 
279    ir_variable *val_ref = new(mem_ctx)
280       ir_variable(deref->type, "value" , ir_var_function_in);
281    sig_params.push_tail(val_ref);
282 
283    ir_variable *writemask_ref = new(mem_ctx)
284       ir_variable(glsl_type::uint_type, "write_mask" , ir_var_function_in);
285    sig_params.push_tail(writemask_ref);
286 
287    ir_function_signature *sig = new(mem_ctx)
288       ir_function_signature(glsl_type::void_type, compute_shader_enabled);
289    assert(sig);
290    sig->replace_parameters(&sig_params);
291    sig->intrinsic_id = ir_intrinsic_shared_store;
292 
293    ir_function *f = new(mem_ctx) ir_function("__intrinsic_store_shared");
294    f->add_signature(sig);
295 
296    exec_list call_params;
297    call_params.push_tail(offset->clone(mem_ctx, NULL));
298    call_params.push_tail(deref->clone(mem_ctx, NULL));
299    call_params.push_tail(new(mem_ctx) ir_constant(write_mask));
300    return new(mem_ctx) ir_call(sig, NULL, &call_params);
301 }
302 
303 ir_call *
shared_load(void * mem_ctx,const struct glsl_type * type,ir_rvalue * offset)304 lower_shared_reference_visitor::shared_load(void *mem_ctx,
305                                             const struct glsl_type *type,
306                                             ir_rvalue *offset)
307 {
308    exec_list sig_params;
309 
310    ir_variable *offset_ref = new(mem_ctx)
311       ir_variable(glsl_type::uint_type, "offset_ref" , ir_var_function_in);
312    sig_params.push_tail(offset_ref);
313 
314    ir_function_signature *sig =
315       new(mem_ctx) ir_function_signature(type, compute_shader_enabled);
316    assert(sig);
317    sig->replace_parameters(&sig_params);
318    sig->intrinsic_id = ir_intrinsic_shared_load;
319 
320    ir_function *f = new(mem_ctx) ir_function("__intrinsic_load_shared");
321    f->add_signature(sig);
322 
323    ir_variable *result = new(mem_ctx)
324       ir_variable(type, "shared_load_result", ir_var_temporary);
325    base_ir->insert_before(result);
326    ir_dereference_variable *deref_result = new(mem_ctx)
327       ir_dereference_variable(result);
328 
329    exec_list call_params;
330    call_params.push_tail(offset->clone(mem_ctx, NULL));
331 
332    return new(mem_ctx) ir_call(sig, deref_result, &call_params);
333 }
334 
335 /* Lowers the intrinsic call to a new internal intrinsic that swaps the access
336  * to the shared variable in the first parameter by an offset. This involves
337  * creating the new internal intrinsic (i.e. the new function signature).
338  */
339 ir_call *
lower_shared_atomic_intrinsic(ir_call * ir)340 lower_shared_reference_visitor::lower_shared_atomic_intrinsic(ir_call *ir)
341 {
342    /* Shared atomics usually have 2 parameters, the shared variable and an
343     * integer argument. The exception is CompSwap, that has an additional
344     * integer parameter.
345     */
346    int param_count = ir->actual_parameters.length();
347    assert(param_count == 2 || param_count == 3);
348 
349    /* First argument must be a scalar integer shared variable */
350    exec_node *param = ir->actual_parameters.get_head();
351    ir_instruction *inst = (ir_instruction *) param;
352    assert(inst->ir_type == ir_type_dereference_variable ||
353           inst->ir_type == ir_type_dereference_array ||
354           inst->ir_type == ir_type_dereference_record ||
355           inst->ir_type == ir_type_swizzle);
356 
357    ir_rvalue *deref = (ir_rvalue *) inst;
358    assert(deref->type->is_scalar() &&
359           (deref->type->is_integer_32_64() || deref->type->is_float()));
360 
361    ir_variable *var = deref->variable_referenced();
362    assert(var);
363 
364    /* Compute the offset to the start if the dereference
365     */
366    void *mem_ctx = ralloc_parent(shader->ir);
367 
368    ir_rvalue *offset = NULL;
369    unsigned const_offset = get_shared_offset(var);
370    bool row_major;
371    const glsl_type *matrix_type;
372    assert(var->get_interface_type() == NULL);
373    const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
374    buffer_access_type = shared_atomic_access;
375 
376    setup_buffer_access(mem_ctx, deref,
377                        &offset, &const_offset,
378                        &row_major, &matrix_type, NULL, packing);
379 
380    assert(offset);
381    assert(!row_major);
382    assert(matrix_type == NULL);
383 
384    ir_rvalue *deref_offset =
385       add(offset, new(mem_ctx) ir_constant(const_offset));
386 
387    /* Create the new internal function signature that will take an offset
388     * instead of a shared variable
389     */
390    exec_list sig_params;
391    ir_variable *sig_param = new(mem_ctx)
392       ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
393    sig_params.push_tail(sig_param);
394 
395    const glsl_type *type = deref->type->get_scalar_type();
396    sig_param = new(mem_ctx)
397          ir_variable(type, "data1", ir_var_function_in);
398    sig_params.push_tail(sig_param);
399 
400    if (param_count == 3) {
401       sig_param = new(mem_ctx)
402             ir_variable(type, "data2", ir_var_function_in);
403       sig_params.push_tail(sig_param);
404    }
405 
406    ir_function_signature *sig =
407       new(mem_ctx) ir_function_signature(deref->type,
408                                          compute_shader_enabled);
409    assert(sig);
410    sig->replace_parameters(&sig_params);
411 
412    assert(ir->callee->intrinsic_id >= ir_intrinsic_generic_load);
413    assert(ir->callee->intrinsic_id <= ir_intrinsic_generic_atomic_comp_swap);
414    sig->intrinsic_id = MAP_INTRINSIC_TO_TYPE(ir->callee->intrinsic_id, shared);
415 
416    char func_name[64];
417    sprintf(func_name, "%s_shared", ir->callee_name());
418    ir_function *f = new(mem_ctx) ir_function(func_name);
419    f->add_signature(sig);
420 
421    /* Now, create the call to the internal intrinsic */
422    exec_list call_params;
423    call_params.push_tail(deref_offset);
424    param = ir->actual_parameters.get_head()->get_next();
425    ir_rvalue *param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
426    call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
427    if (param_count == 3) {
428       param = param->get_next();
429       param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
430       call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
431    }
432    ir_dereference_variable *return_deref =
433       ir->return_deref->clone(mem_ctx, NULL);
434    return new(mem_ctx) ir_call(sig, return_deref, &call_params);
435 }
436 
437 ir_call *
check_for_shared_atomic_intrinsic(ir_call * ir)438 lower_shared_reference_visitor::check_for_shared_atomic_intrinsic(ir_call *ir)
439 {
440    exec_list& params = ir->actual_parameters;
441 
442    if (params.length() < 2 || params.length() > 3)
443       return ir;
444 
445    ir_rvalue *rvalue =
446       ((ir_instruction *) params.get_head())->as_rvalue();
447    if (!rvalue)
448       return ir;
449 
450    ir_variable *var = rvalue->variable_referenced();
451    if (!var || var->data.mode != ir_var_shader_shared)
452       return ir;
453 
454    const enum ir_intrinsic_id id = ir->callee->intrinsic_id;
455    if (id == ir_intrinsic_generic_atomic_add ||
456        id == ir_intrinsic_generic_atomic_min ||
457        id == ir_intrinsic_generic_atomic_max ||
458        id == ir_intrinsic_generic_atomic_and ||
459        id == ir_intrinsic_generic_atomic_or ||
460        id == ir_intrinsic_generic_atomic_xor ||
461        id == ir_intrinsic_generic_atomic_exchange ||
462        id == ir_intrinsic_generic_atomic_comp_swap) {
463       return lower_shared_atomic_intrinsic(ir);
464    }
465 
466    return ir;
467 }
468 
469 ir_visitor_status
visit_enter(ir_call * ir)470 lower_shared_reference_visitor::visit_enter(ir_call *ir)
471 {
472    ir_call *new_ir = check_for_shared_atomic_intrinsic(ir);
473    if (new_ir != ir) {
474       progress = true;
475       base_ir->replace_with(new_ir);
476       return visit_continue_with_parent;
477    }
478 
479    return rvalue_visit(ir);
480 }
481 
482 } /* unnamed namespace */
483 
484 void
lower_shared_reference(struct gl_context * ctx,struct gl_shader_program * prog,struct gl_linked_shader * shader)485 lower_shared_reference(struct gl_context *ctx,
486                        struct gl_shader_program *prog,
487                        struct gl_linked_shader *shader)
488 {
489    if (shader->Stage != MESA_SHADER_COMPUTE)
490       return;
491 
492    lower_shared_reference_visitor v(shader);
493 
494    /* Loop over the instructions lowering references, because we take a deref
495     * of an shared variable array using a shared variable dereference as the
496     * index will produce a collection of instructions all of which have cloned
497     * shared variable dereferences for that array index.
498     */
499    do {
500       v.progress = false;
501       visit_list_elements(&v, shader->ir);
502    } while (v.progress);
503 
504    prog->Comp.SharedSize = v.shared_size;
505 
506    /* Section 19.1 (Compute Shader Variables) of the OpenGL 4.5 (Core Profile)
507     * specification says:
508     *
509     *   "There is a limit to the total size of all variables declared as
510     *    shared in a single program object. This limit, expressed in units of
511     *    basic machine units, may be queried as the value of
512     *    MAX_COMPUTE_SHARED_MEMORY_SIZE."
513     */
514    if (prog->Comp.SharedSize > ctx->Const.MaxComputeSharedMemorySize) {
515       linker_error(prog, "Too much shared memory used (%u/%u)\n",
516                    prog->Comp.SharedSize,
517                    ctx->Const.MaxComputeSharedMemorySize);
518    }
519 }
520