• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 
27 static nir_def *
load_frag_coord(nir_builder * b,nir_deref_instr * deref,const nir_input_attachment_options * options)28 load_frag_coord(nir_builder *b, nir_deref_instr *deref,
29                 const nir_input_attachment_options *options)
30 {
31    if (options->use_fragcoord_sysval) {
32       nir_def *frag_coord = nir_load_frag_coord(b);
33       if (options->unscaled_input_attachment_ir3 ||
34           options->unscaled_depth_stencil_ir3) {
35          nir_variable *var = nir_deref_instr_get_variable(deref);
36          unsigned base = var->data.index;
37          nir_def *unscaled_frag_coord = nir_load_frag_coord_unscaled_ir3(b);
38          if (deref->deref_type == nir_deref_type_array &&
39              options->unscaled_input_attachment_ir3) {
40             nir_def *unscaled =
41                nir_i2b(b, nir_iand(b, nir_ishr(b, nir_imm_int(b, options->unscaled_input_attachment_ir3 >> base), deref->arr.index.ssa),
42                                    nir_imm_int(b, 1)));
43             frag_coord = nir_bcsel(b, unscaled, unscaled_frag_coord, frag_coord);
44          } else {
45             assert(deref->deref_type == nir_deref_type_var);
46             bool unscaled = base == NIR_VARIABLE_NO_INDEX ?
47                options->unscaled_depth_stencil_ir3 :
48                ((options->unscaled_input_attachment_ir3 >> base) & 1);
49             frag_coord = unscaled ? unscaled_frag_coord : frag_coord;
50          }
51       }
52       return frag_coord;
53    }
54 
55    nir_variable *pos = nir_get_variable_with_location(b->shader, nir_var_shader_in,
56                                                       VARYING_SLOT_POS, glsl_vec4_type());
57 
58    /**
59     * From Vulkan spec:
60     *   "The OriginLowerLeft execution mode must not be used; fragment entry
61     *    points must declare OriginUpperLeft."
62     *
63     * So at this point origin_upper_left should be true
64     */
65    assert(b->shader->info.fs.origin_upper_left == true);
66 
67    return nir_load_var(b, pos);
68 }
69 
70 static nir_def *
load_layer_id(nir_builder * b,const nir_input_attachment_options * options)71 load_layer_id(nir_builder *b, const nir_input_attachment_options *options)
72 {
73    if (options->use_layer_id_sysval) {
74       if (options->use_view_id_for_layer)
75          return nir_load_view_index(b);
76       else
77          return nir_load_layer_id(b);
78    }
79 
80    gl_varying_slot slot = options->use_view_id_for_layer ? VARYING_SLOT_VIEW_INDEX : VARYING_SLOT_LAYER;
81    nir_variable *layer_id = nir_get_variable_with_location(b->shader, nir_var_shader_in,
82                                                            slot, glsl_int_type());
83    layer_id->data.interpolation = INTERP_MODE_FLAT;
84 
85    return nir_load_var(b, layer_id);
86 }
87 
88 static bool
try_lower_input_load(nir_builder * b,nir_intrinsic_instr * load,const nir_input_attachment_options * options)89 try_lower_input_load(nir_builder *b, nir_intrinsic_instr *load,
90                      const nir_input_attachment_options *options)
91 {
92    nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
93    assert(glsl_type_is_image(deref->type));
94 
95    enum glsl_sampler_dim image_dim = glsl_get_sampler_dim(deref->type);
96    if (image_dim != GLSL_SAMPLER_DIM_SUBPASS &&
97        image_dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
98       return false;
99 
100    const bool multisampled = (image_dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
101 
102    b->cursor = nir_instr_remove(&load->instr);
103 
104    nir_def *frag_coord = load_frag_coord(b, deref, options);
105    frag_coord = nir_f2i32(b, frag_coord);
106    nir_def *offset = nir_trim_vector(b, load->src[1].ssa, 2);
107    nir_def *pos = nir_iadd(b, frag_coord, offset);
108 
109    nir_def *layer = load_layer_id(b, options);
110    nir_def *coord =
111       nir_vec3(b, nir_channel(b, pos, 0), nir_channel(b, pos, 1), layer);
112 
113    nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3 + multisampled);
114 
115    tex->op = nir_texop_txf;
116    tex->sampler_dim = image_dim;
117 
118    tex->dest_type =
119       nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(deref->type));
120    tex->is_array = true;
121    tex->is_shadow = false;
122    tex->is_sparse = load->intrinsic == nir_intrinsic_image_deref_sparse_load;
123 
124    tex->texture_index = 0;
125    tex->sampler_index = 0;
126 
127    tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
128                                      &deref->def);
129    tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
130    tex->coord_components = 3;
131 
132    tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(b, 0));
133 
134    if (image_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) {
135       tex->op = nir_texop_txf_ms;
136       tex->src[3].src_type = nir_tex_src_ms_index;
137       tex->src[3].src = load->src[2];
138    }
139 
140    tex->texture_non_uniform = nir_intrinsic_access(load) & ACCESS_NON_UNIFORM;
141 
142    nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32);
143    nir_builder_instr_insert(b, &tex->instr);
144 
145    if (tex->is_sparse) {
146       unsigned load_result_size = load->def.num_components - 1;
147       nir_component_mask_t load_result_mask = nir_component_mask(load_result_size);
148       nir_def *res = nir_channels(
149          b, &tex->def, load_result_mask | 0x10);
150 
151       nir_def_rewrite_uses(&load->def, res);
152    } else {
153       nir_def_rewrite_uses(&load->def,
154                            &tex->def);
155    }
156 
157    return true;
158 }
159 
160 static bool
try_lower_input_texop(nir_builder * b,nir_tex_instr * tex,const nir_input_attachment_options * options)161 try_lower_input_texop(nir_builder *b, nir_tex_instr *tex,
162                       const nir_input_attachment_options *options)
163 {
164    nir_deref_instr *deref = nir_src_as_deref(tex->src[0].src);
165 
166    if (glsl_get_sampler_dim(deref->type) != GLSL_SAMPLER_DIM_SUBPASS_MS)
167       return false;
168 
169    b->cursor = nir_before_instr(&tex->instr);
170 
171    nir_def *frag_coord = load_frag_coord(b, deref, options);
172    frag_coord = nir_f2i32(b, frag_coord);
173 
174    nir_def *layer = load_layer_id(b, options);
175    nir_def *coord = nir_vec3(b, nir_channel(b, frag_coord, 0),
176                              nir_channel(b, frag_coord, 1), layer);
177 
178    tex->coord_components = 3;
179 
180    nir_src_rewrite(&tex->src[1].src, coord);
181 
182    return true;
183 }
184 
185 static bool
lower_input_attachments_instr(nir_builder * b,nir_instr * instr,void * _data)186 lower_input_attachments_instr(nir_builder *b, nir_instr *instr, void *_data)
187 {
188    const nir_input_attachment_options *options = _data;
189 
190    switch (instr->type) {
191    case nir_instr_type_tex: {
192       nir_tex_instr *tex = nir_instr_as_tex(instr);
193 
194       if (tex->op == nir_texop_fragment_mask_fetch_amd ||
195           tex->op == nir_texop_fragment_fetch_amd)
196          return try_lower_input_texop(b, tex, options);
197 
198       return false;
199    }
200    case nir_instr_type_intrinsic: {
201       nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
202 
203       if (load->intrinsic == nir_intrinsic_image_deref_load ||
204           load->intrinsic == nir_intrinsic_image_deref_sparse_load)
205          return try_lower_input_load(b, load, options);
206 
207       return false;
208    }
209 
210    default:
211       return false;
212    }
213 }
214 
215 bool
nir_lower_input_attachments(nir_shader * shader,const nir_input_attachment_options * options)216 nir_lower_input_attachments(nir_shader *shader,
217                             const nir_input_attachment_options *options)
218 {
219    assert(shader->info.stage == MESA_SHADER_FRAGMENT);
220 
221    return nir_shader_instructions_pass(shader, lower_input_attachments_instr,
222                                        nir_metadata_control_flow,
223                                        (void *)options);
224 }
225