1 /*
2 * Copyright © 2016 Bas Nieuwenhuizen
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "ac_nir.h"
25 #include "nir_builder.h"
26
27 nir_ssa_def *
ac_nir_load_arg(nir_builder * b,const struct ac_shader_args * ac_args,struct ac_arg arg)28 ac_nir_load_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg)
29 {
30 unsigned num_components = ac_args->args[arg.arg_index].size;
31
32 if (ac_args->args[arg.arg_index].file == AC_ARG_SGPR)
33 return nir_load_scalar_arg_amd(b, num_components, .base = arg.arg_index);
34 else
35 return nir_load_vector_arg_amd(b, num_components, .base = arg.arg_index);
36 }
37
38 /**
39 * This function takes an I/O intrinsic like load/store_input,
40 * and emits a sequence that calculates the full offset of that instruction,
41 * including a stride to the base and component offsets.
42 */
43 nir_ssa_def *
ac_nir_calc_io_offset(nir_builder * b,nir_intrinsic_instr * intrin,nir_ssa_def * base_stride,unsigned component_stride,ac_nir_map_io_driver_location map_io)44 ac_nir_calc_io_offset(nir_builder *b,
45 nir_intrinsic_instr *intrin,
46 nir_ssa_def *base_stride,
47 unsigned component_stride,
48 ac_nir_map_io_driver_location map_io)
49 {
50 unsigned base = nir_intrinsic_base(intrin);
51 unsigned semantic = nir_intrinsic_io_semantics(intrin).location;
52 unsigned mapped_driver_location = map_io ? map_io(semantic) : base;
53
54 /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */
55 nir_ssa_def *base_op = nir_imul_imm(b, base_stride, mapped_driver_location);
56
57 /* offset should be interpreted in relation to the base,
58 * so the instruction effectively reads/writes another input/output
59 * when it has an offset
60 */
61 nir_ssa_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
62
63 /* component is in bytes */
64 unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
65
66 return nir_iadd_imm_nuw(b, nir_iadd_nuw(b, base_op, offset_op), const_op);
67 }
68
69 bool
ac_nir_lower_indirect_derefs(nir_shader * shader,enum amd_gfx_level gfx_level)70 ac_nir_lower_indirect_derefs(nir_shader *shader,
71 enum amd_gfx_level gfx_level)
72 {
73 bool progress = false;
74
75 /* Lower large variables to scratch first so that we won't bloat the
76 * shader by generating large if ladders for them. We later lower
77 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
78 */
79 NIR_PASS(progress, shader, nir_lower_vars_to_scratch, nir_var_function_temp, 256,
80 glsl_get_natural_size_align_bytes);
81
82 /* LLVM doesn't support VGPR indexing on GFX9. */
83 bool llvm_has_working_vgpr_indexing = gfx_level != GFX9;
84
85 /* TODO: Indirect indexing of GS inputs is unimplemented.
86 *
87 * TCS and TES load inputs directly from LDS or offchip memory, so
88 * indirect indexing is trivial.
89 */
90 nir_variable_mode indirect_mask = 0;
91 if (shader->info.stage == MESA_SHADER_GEOMETRY ||
92 (shader->info.stage != MESA_SHADER_TESS_CTRL && shader->info.stage != MESA_SHADER_TESS_EVAL &&
93 !llvm_has_working_vgpr_indexing)) {
94 indirect_mask |= nir_var_shader_in;
95 }
96 if (!llvm_has_working_vgpr_indexing && shader->info.stage != MESA_SHADER_TESS_CTRL)
97 indirect_mask |= nir_var_shader_out;
98
99 /* TODO: We shouldn't need to do this, however LLVM isn't currently
100 * smart enough to handle indirects without causing excess spilling
101 * causing the gpu to hang.
102 *
103 * See the following thread for more details of the problem:
104 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
105 */
106 indirect_mask |= nir_var_function_temp;
107
108 NIR_PASS(progress, shader, nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
109 return progress;
110 }
111