1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /*
26 * This is ported mostly out of radeonsi, if we can drop TGSI, we can likely
27 * make a lot this go away.
28 */
29
30 #include "nir_to_tgsi_info.h"
31 #include "util/u_math.h"
32 #include "util/u_prim.h"
33 #include "nir.h"
34 #include "nir_deref.h"
35 #include "tgsi/tgsi_scan.h"
36 #include "tgsi/tgsi_from_mesa.h"
37
tex_get_texture_var(const nir_tex_instr * instr)38 static nir_variable* tex_get_texture_var(const nir_tex_instr *instr)
39 {
40 for (unsigned i = 0; i < instr->num_srcs; i++) {
41 switch (instr->src[i].src_type) {
42 case nir_tex_src_texture_deref:
43 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
44 default:
45 break;
46 }
47 }
48
49 return NULL;
50 }
51
intrinsic_get_var(const nir_intrinsic_instr * instr)52 static nir_variable* intrinsic_get_var(const nir_intrinsic_instr *instr)
53 {
54 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
55 }
56
57
gather_usage_helper(const nir_deref_instr ** deref_ptr,unsigned location,uint8_t mask,uint8_t * usage_mask)58 static void gather_usage_helper(const nir_deref_instr **deref_ptr,
59 unsigned location,
60 uint8_t mask,
61 uint8_t *usage_mask)
62 {
63 for (; *deref_ptr; deref_ptr++) {
64 const nir_deref_instr *deref = *deref_ptr;
65 switch (deref->deref_type) {
66 case nir_deref_type_array: {
67 bool is_compact = nir_deref_instr_get_variable(deref)->data.compact;
68 unsigned elem_size = is_compact ? DIV_ROUND_UP(glsl_get_length(deref->type), 4) :
69 glsl_count_attribute_slots(deref->type, false);
70 if (nir_src_is_const(deref->arr.index)) {
71 if (is_compact) {
72 location += nir_src_as_uint(deref->arr.index) / 4;
73 mask <<= nir_src_as_uint(deref->arr.index) % 4;
74 } else
75 location += elem_size * nir_src_as_uint(deref->arr.index);
76 } else {
77 unsigned array_elems =
78 glsl_get_length(deref_ptr[-1]->type);
79 for (unsigned i = 0; i < array_elems; i++) {
80 gather_usage_helper(deref_ptr + 1,
81 location + elem_size * i,
82 mask, usage_mask);
83 }
84 return;
85 }
86 break;
87 }
88 case nir_deref_type_struct: {
89 const struct glsl_type *parent_type =
90 deref_ptr[-1]->type;
91 unsigned index = deref->strct.index;
92 for (unsigned i = 0; i < index; i++) {
93 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
94 location += glsl_count_attribute_slots(ft, false);
95 }
96 break;
97 }
98 default:
99 unreachable("Unhandled deref type in gather_components_used_helper");
100 }
101 }
102
103 usage_mask[location] |= mask & 0xf;
104 if (mask & 0xf0)
105 usage_mask[location + 1] |= (mask >> 4) & 0xf;
106 }
107
gather_usage(const nir_deref_instr * deref,uint8_t mask,uint8_t * usage_mask)108 static void gather_usage(const nir_deref_instr *deref,
109 uint8_t mask,
110 uint8_t *usage_mask)
111 {
112 nir_deref_path path;
113 nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
114
115 unsigned location_frac = path.path[0]->var->data.location_frac;
116 if (glsl_type_is_64bit(deref->type)) {
117 uint8_t new_mask = 0;
118 for (unsigned i = 0; i < 4; i++) {
119 if (mask & (1 << i))
120 new_mask |= 0x3 << (2 * i);
121 }
122 mask = new_mask << location_frac;
123 } else {
124 mask <<= location_frac;
125 mask &= 0xf;
126 }
127
128 gather_usage_helper((const nir_deref_instr **)&path.path[1],
129 path.path[0]->var->data.driver_location,
130 mask, usage_mask);
131
132 nir_deref_path_finish(&path);
133 }
134
gather_intrinsic_load_deref_info(const nir_shader * nir,const nir_intrinsic_instr * instr,const nir_deref_instr * deref,bool need_texcoord,const nir_variable * var,struct tgsi_shader_info * info)135 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
136 const nir_intrinsic_instr *instr,
137 const nir_deref_instr *deref,
138 bool need_texcoord,
139 const nir_variable *var,
140 struct tgsi_shader_info *info)
141 {
142 assert(var && var->data.mode == nir_var_shader_in);
143
144 if (nir->info.stage == MESA_SHADER_FRAGMENT)
145 gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
146 info->input_usage_mask);
147
148 switch (nir->info.stage) {
149 case MESA_SHADER_VERTEX: {
150
151 break;
152 }
153 default: {
154 unsigned semantic_name, semantic_index;
155 tgsi_get_gl_varying_semantic(var->data.location, need_texcoord,
156 &semantic_name, &semantic_index);
157
158 if (semantic_name == TGSI_SEMANTIC_COLOR) {
159 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
160 info->colors_read |= mask << (semantic_index * 4);
161 }
162 if (semantic_name == TGSI_SEMANTIC_FACE) {
163 info->uses_frontface = true;
164 }
165 break;
166 }
167 }
168 }
169
scan_instruction(const struct nir_shader * nir,bool need_texcoord,struct tgsi_shader_info * info,const nir_instr * instr)170 static void scan_instruction(const struct nir_shader *nir,
171 bool need_texcoord,
172 struct tgsi_shader_info *info,
173 const nir_instr *instr)
174 {
175 if (instr->type == nir_instr_type_alu) {
176 const nir_alu_instr *alu = nir_instr_as_alu(instr);
177
178 switch (alu->op) {
179 case nir_op_fddx:
180 case nir_op_fddy:
181 case nir_op_fddx_fine:
182 case nir_op_fddy_fine:
183 case nir_op_fddx_coarse:
184 case nir_op_fddy_coarse:
185 info->uses_derivatives = true;
186 break;
187 default:
188 break;
189 }
190 } else if (instr->type == nir_instr_type_tex) {
191 nir_tex_instr *tex = nir_instr_as_tex(instr);
192 const nir_variable *texture = tex_get_texture_var(tex);
193
194 if (texture && texture->data.bindless)
195 info->uses_bindless_samplers = true;
196
197 switch (tex->op) {
198 case nir_texop_tex:
199 case nir_texop_txb:
200 case nir_texop_lod:
201 info->uses_derivatives = true;
202 break;
203 default:
204 break;
205 }
206 } else if (instr->type == nir_instr_type_intrinsic) {
207 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
208
209 switch (intr->intrinsic) {
210 case nir_intrinsic_load_front_face:
211 info->uses_frontface = 1;
212 break;
213 case nir_intrinsic_load_instance_id:
214 info->uses_instanceid = 1;
215 break;
216 case nir_intrinsic_load_invocation_id:
217 info->uses_invocationid = true;
218 break;
219 case nir_intrinsic_load_num_workgroups:
220 info->uses_grid_size = true;
221 break;
222 case nir_intrinsic_load_workgroup_size:
223 /* The block size is translated to IMM with a fixed block size. */
224 if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
225 info->uses_block_size = true;
226 break;
227 case nir_intrinsic_load_local_invocation_id:
228 case nir_intrinsic_load_workgroup_id: {
229 unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
230 while (mask) {
231 unsigned i = u_bit_scan(&mask);
232
233 if (intr->intrinsic == nir_intrinsic_load_workgroup_id)
234 info->uses_block_id[i] = true;
235 else
236 info->uses_thread_id[i] = true;
237 }
238 break;
239 }
240 case nir_intrinsic_load_vertex_id:
241 info->uses_vertexid = 1;
242 break;
243 case nir_intrinsic_load_vertex_id_zero_base:
244 info->uses_vertexid_nobase = 1;
245 break;
246 case nir_intrinsic_load_base_vertex:
247 info->uses_basevertex = 1;
248 break;
249 case nir_intrinsic_load_draw_id:
250 info->uses_drawid = 1;
251 break;
252 case nir_intrinsic_load_primitive_id:
253 info->uses_primid = 1;
254 break;
255 case nir_intrinsic_load_sample_mask_in:
256 info->reads_samplemask = true;
257 break;
258 case nir_intrinsic_load_tess_level_inner:
259 case nir_intrinsic_load_tess_level_outer:
260 info->reads_tess_factors = true;
261 break;
262 case nir_intrinsic_bindless_image_load:
263 info->uses_bindless_images = true;
264
265 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
266 info->uses_bindless_buffer_load = true;
267 else
268 info->uses_bindless_image_load = true;
269 break;
270 case nir_intrinsic_bindless_image_size:
271 case nir_intrinsic_bindless_image_samples:
272 info->uses_bindless_images = true;
273 break;
274 case nir_intrinsic_bindless_image_store:
275 info->uses_bindless_images = true;
276
277 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
278 info->uses_bindless_buffer_store = true;
279 else
280 info->uses_bindless_image_store = true;
281
282 info->writes_memory = true;
283 break;
284 case nir_intrinsic_image_deref_store:
285 info->writes_memory = true;
286 break;
287 case nir_intrinsic_bindless_image_atomic_add:
288 case nir_intrinsic_bindless_image_atomic_imin:
289 case nir_intrinsic_bindless_image_atomic_imax:
290 case nir_intrinsic_bindless_image_atomic_umin:
291 case nir_intrinsic_bindless_image_atomic_umax:
292 case nir_intrinsic_bindless_image_atomic_and:
293 case nir_intrinsic_bindless_image_atomic_or:
294 case nir_intrinsic_bindless_image_atomic_xor:
295 case nir_intrinsic_bindless_image_atomic_exchange:
296 case nir_intrinsic_bindless_image_atomic_comp_swap:
297 info->uses_bindless_images = true;
298
299 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
300 info->uses_bindless_buffer_atomic = true;
301 else
302 info->uses_bindless_image_atomic = true;
303
304 info->writes_memory = true;
305 break;
306 case nir_intrinsic_image_deref_atomic_add:
307 case nir_intrinsic_image_deref_atomic_imin:
308 case nir_intrinsic_image_deref_atomic_imax:
309 case nir_intrinsic_image_deref_atomic_umin:
310 case nir_intrinsic_image_deref_atomic_umax:
311 case nir_intrinsic_image_deref_atomic_and:
312 case nir_intrinsic_image_deref_atomic_or:
313 case nir_intrinsic_image_deref_atomic_xor:
314 case nir_intrinsic_image_deref_atomic_exchange:
315 case nir_intrinsic_image_deref_atomic_comp_swap:
316 info->writes_memory = true;
317 break;
318 case nir_intrinsic_store_ssbo:
319 case nir_intrinsic_ssbo_atomic_add:
320 case nir_intrinsic_ssbo_atomic_imin:
321 case nir_intrinsic_ssbo_atomic_umin:
322 case nir_intrinsic_ssbo_atomic_imax:
323 case nir_intrinsic_ssbo_atomic_umax:
324 case nir_intrinsic_ssbo_atomic_and:
325 case nir_intrinsic_ssbo_atomic_or:
326 case nir_intrinsic_ssbo_atomic_xor:
327 case nir_intrinsic_ssbo_atomic_exchange:
328 case nir_intrinsic_ssbo_atomic_comp_swap:
329 info->writes_memory = true;
330 break;
331 case nir_intrinsic_load_deref: {
332 const nir_variable *var = intrinsic_get_var(intr);
333 const nir_variable_mode mode = var->data.mode;
334 nir_deref_instr *const deref = nir_src_as_deref(intr->src[0]);
335 enum glsl_base_type base_type =
336 glsl_get_base_type(glsl_without_array(var->type));
337
338 if (nir_deref_instr_has_indirect(deref)) {
339 if (mode == nir_var_shader_in)
340 info->indirect_files |= (1 << TGSI_FILE_INPUT);
341 }
342 if (mode == nir_var_shader_in) {
343 gather_intrinsic_load_deref_info(nir, intr, deref, need_texcoord, var, info);
344
345 switch (var->data.interpolation) {
346 case INTERP_MODE_NONE:
347 if (glsl_base_type_is_integer(base_type))
348 break;
349
350 FALLTHROUGH;
351 case INTERP_MODE_SMOOTH:
352 if (var->data.sample)
353 info->uses_persp_sample = true;
354 else if (var->data.centroid)
355 info->uses_persp_centroid = true;
356 else
357 info->uses_persp_center = true;
358 break;
359
360 case INTERP_MODE_NOPERSPECTIVE:
361 if (var->data.sample)
362 info->uses_linear_sample = true;
363 else if (var->data.centroid)
364 info->uses_linear_centroid = true;
365 else
366 info->uses_linear_center = true;
367 break;
368 }
369 }
370 break;
371 }
372 case nir_intrinsic_interp_deref_at_centroid:
373 case nir_intrinsic_interp_deref_at_sample:
374 case nir_intrinsic_interp_deref_at_offset: {
375 enum glsl_interp_mode interp = intrinsic_get_var(intr)->data.interpolation;
376 switch (interp) {
377 case INTERP_MODE_SMOOTH:
378 case INTERP_MODE_NONE:
379 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
380 info->uses_persp_opcode_interp_centroid = true;
381 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
382 info->uses_persp_opcode_interp_sample = true;
383 else
384 info->uses_persp_opcode_interp_offset = true;
385 break;
386 case INTERP_MODE_NOPERSPECTIVE:
387 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
388 info->uses_linear_opcode_interp_centroid = true;
389 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
390 info->uses_linear_opcode_interp_sample = true;
391 else
392 info->uses_linear_opcode_interp_offset = true;
393 break;
394 case INTERP_MODE_FLAT:
395 break;
396 default:
397 unreachable("Unsupported interpoation type");
398 }
399 break;
400 }
401 default:
402 break;
403 }
404 }
405 }
406
nir_tgsi_scan_shader(const struct nir_shader * nir,struct tgsi_shader_info * info,bool need_texcoord)407 void nir_tgsi_scan_shader(const struct nir_shader *nir,
408 struct tgsi_shader_info *info,
409 bool need_texcoord)
410 {
411 unsigned i;
412
413 info->processor = pipe_shader_type_from_mesa(nir->info.stage);
414 info->num_tokens = 2; /* indicate that the shader is non-empty */
415 info->num_instructions = 2;
416
417 info->properties[TGSI_PROPERTY_NEXT_SHADER] =
418 pipe_shader_type_from_mesa(nir->info.next_stage);
419
420 if (nir->info.stage == MESA_SHADER_VERTEX) {
421 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
422 nir->info.vs.window_space_position;
423 }
424
425 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
426 info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
427 nir->info.tess.tcs_vertices_out;
428 }
429
430 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
431 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = u_tess_prim_from_shader(nir->info.tess._primitive_mode);
432
433 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
434 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
435 PIPE_TESS_SPACING_FRACTIONAL_ODD);
436 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
437 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
438
439 info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
440 info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
441 info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
442 }
443
444 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
445 info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
446 info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
447 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
448 info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
449 }
450
451 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
452 info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
453 nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
454 info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
455 info->uses_fbfetch = nir->info.fs.uses_fbfetch_output;
456
457 if (nir->info.fs.pixel_center_integer) {
458 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
459 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
460 }
461
462 if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
463 switch (nir->info.fs.depth_layout) {
464 case FRAG_DEPTH_LAYOUT_ANY:
465 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
466 break;
467 case FRAG_DEPTH_LAYOUT_GREATER:
468 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
469 break;
470 case FRAG_DEPTH_LAYOUT_LESS:
471 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
472 break;
473 case FRAG_DEPTH_LAYOUT_UNCHANGED:
474 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
475 break;
476 default:
477 unreachable("Unknow depth layout");
478 }
479 }
480 }
481
482 if (gl_shader_stage_is_compute(nir->info.stage)) {
483 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.workgroup_size[0];
484 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.workgroup_size[1];
485 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.workgroup_size[2];
486 }
487
488 i = 0;
489 uint64_t processed_inputs = 0;
490 nir_foreach_shader_in_variable(variable, nir) {
491 unsigned semantic_name, semantic_index;
492
493 const struct glsl_type *type = variable->type;
494 if (nir_is_arrayed_io(variable, nir->info.stage)) {
495 assert(glsl_type_is_array(type));
496 type = glsl_get_array_element(type);
497 }
498
499 unsigned attrib_count = variable->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) :
500 glsl_count_attribute_slots(type, nir->info.stage == MESA_SHADER_VERTEX);
501
502 i = variable->data.driver_location;
503
504 /* Vertex shader inputs don't have semantics. The state
505 * tracker has already mapped them to attributes via
506 * variable->data.driver_location.
507 */
508 if (nir->info.stage == MESA_SHADER_VERTEX) {
509 continue;
510 }
511
512 for (unsigned j = 0; j < attrib_count; j++, i++) {
513
514 if (processed_inputs & ((uint64_t)1 << i))
515 continue;
516
517 processed_inputs |= ((uint64_t)1 << i);
518
519 tgsi_get_gl_varying_semantic(variable->data.location + j, need_texcoord,
520 &semantic_name, &semantic_index);
521
522 info->input_semantic_name[i] = semantic_name;
523 info->input_semantic_index[i] = semantic_index;
524
525 if (semantic_name == TGSI_SEMANTIC_PRIMID)
526 info->uses_primid = true;
527
528 enum glsl_base_type base_type =
529 glsl_get_base_type(glsl_without_array(variable->type));
530
531 if (variable->data.centroid)
532 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
533 if (variable->data.sample)
534 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
535
536 switch (variable->data.interpolation) {
537 case INTERP_MODE_NONE:
538 if (glsl_base_type_is_integer(base_type)) {
539 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
540 break;
541 }
542
543 if (semantic_name == TGSI_SEMANTIC_COLOR) {
544 info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
545 break;
546 }
547 FALLTHROUGH;
548
549 case INTERP_MODE_SMOOTH:
550 assert(!glsl_base_type_is_integer(base_type));
551
552 info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
553 break;
554
555 case INTERP_MODE_NOPERSPECTIVE:
556 assert(!glsl_base_type_is_integer(base_type));
557
558 info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
559 break;
560
561 case INTERP_MODE_FLAT:
562 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
563 break;
564 }
565 }
566 }
567
568 info->num_inputs = nir->num_inputs;
569 if (nir->info.io_lowered) {
570 info->num_inputs = util_bitcount64(nir->info.inputs_read);
571 if (nir->info.inputs_read_indirectly)
572 info->indirect_files |= 1 << TGSI_FILE_INPUT;
573 info->file_max[TGSI_FILE_INPUT] = info->num_inputs - 1;
574 } else {
575 int max = info->file_max[TGSI_FILE_INPUT] = -1;
576 nir_foreach_shader_in_variable(var, nir) {
577 int slots = glsl_count_attribute_slots(var->type, false);
578 int tmax = var->data.driver_location + slots - 1;
579 if (tmax > max)
580 max = tmax;
581 info->file_max[TGSI_FILE_INPUT] = max;
582 }
583 }
584
585 i = 0;
586 uint64_t processed_outputs = 0;
587 unsigned num_outputs = 0;
588 nir_foreach_shader_out_variable(variable, nir) {
589 unsigned semantic_name, semantic_index;
590
591 i = variable->data.driver_location;
592
593 const struct glsl_type *type = variable->type;
594 if (nir_is_arrayed_io(variable, nir->info.stage)) {
595 assert(glsl_type_is_array(type));
596 type = glsl_get_array_element(type);
597 }
598
599 unsigned attrib_count = variable->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) :
600 glsl_count_attribute_slots(type, false);
601 for (unsigned k = 0; k < attrib_count; k++, i++) {
602
603 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
604 tgsi_get_gl_frag_result_semantic(variable->data.location + k,
605 &semantic_name, &semantic_index);
606
607 /* Adjust for dual source blending */
608 if (variable->data.index > 0) {
609 semantic_index++;
610 }
611 } else {
612 tgsi_get_gl_varying_semantic(variable->data.location + k, need_texcoord,
613 &semantic_name, &semantic_index);
614 }
615
616 unsigned num_components = 4;
617 unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
618 if (vector_elements)
619 num_components = vector_elements;
620
621 unsigned component = variable->data.location_frac;
622 if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
623 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
624 num_components = (num_components * 2) - 4;
625 component = 0;
626 } else {
627 num_components = MIN2(num_components * 2, 4);
628 }
629 }
630
631 ubyte usagemask = 0;
632 for (unsigned j = component; j < num_components + component; j++) {
633 switch (j) {
634 case 0:
635 usagemask |= TGSI_WRITEMASK_X;
636 break;
637 case 1:
638 usagemask |= TGSI_WRITEMASK_Y;
639 break;
640 case 2:
641 usagemask |= TGSI_WRITEMASK_Z;
642 break;
643 case 3:
644 usagemask |= TGSI_WRITEMASK_W;
645 break;
646 default:
647 unreachable("error calculating component index");
648 }
649 }
650
651 unsigned gs_out_streams;
652 if (variable->data.stream & NIR_STREAM_PACKED) {
653 gs_out_streams = variable->data.stream & ~NIR_STREAM_PACKED;
654 } else {
655 assert(variable->data.stream < 4);
656 gs_out_streams = 0;
657 for (unsigned j = 0; j < num_components; ++j)
658 gs_out_streams |= variable->data.stream << (2 * (component + j));
659 }
660
661 unsigned streamx = gs_out_streams & 3;
662 unsigned streamy = (gs_out_streams >> 2) & 3;
663 unsigned streamz = (gs_out_streams >> 4) & 3;
664 unsigned streamw = (gs_out_streams >> 6) & 3;
665
666 if (usagemask & TGSI_WRITEMASK_X) {
667 info->output_usagemask[i] |= TGSI_WRITEMASK_X;
668 info->output_streams[i] |= streamx;
669 info->num_stream_output_components[streamx]++;
670 }
671 if (usagemask & TGSI_WRITEMASK_Y) {
672 info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
673 info->output_streams[i] |= streamy << 2;
674 info->num_stream_output_components[streamy]++;
675 }
676 if (usagemask & TGSI_WRITEMASK_Z) {
677 info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
678 info->output_streams[i] |= streamz << 4;
679 info->num_stream_output_components[streamz]++;
680 }
681 if (usagemask & TGSI_WRITEMASK_W) {
682 info->output_usagemask[i] |= TGSI_WRITEMASK_W;
683 info->output_streams[i] |= streamw << 6;
684 info->num_stream_output_components[streamw]++;
685 }
686
687 /* make sure we only count this location once against
688 * the num_outputs counter.
689 */
690 if (processed_outputs & ((uint64_t)1 << i))
691 continue;
692
693 processed_outputs |= ((uint64_t)1 << i);
694 num_outputs++;
695
696 info->output_semantic_name[i] = semantic_name;
697 info->output_semantic_index[i] = semantic_index;
698
699 switch (semantic_name) {
700 case TGSI_SEMANTIC_PRIMID:
701 info->writes_primid = true;
702 break;
703 case TGSI_SEMANTIC_VIEWPORT_INDEX:
704 info->writes_viewport_index = true;
705 break;
706 case TGSI_SEMANTIC_LAYER:
707 info->writes_layer = true;
708 break;
709 case TGSI_SEMANTIC_PSIZE:
710 info->writes_psize = true;
711 break;
712 case TGSI_SEMANTIC_CLIPVERTEX:
713 info->writes_clipvertex = true;
714 break;
715 case TGSI_SEMANTIC_COLOR:
716 info->colors_written |= 1 << semantic_index;
717 break;
718 case TGSI_SEMANTIC_STENCIL:
719 if (!variable->data.fb_fetch_output)
720 info->writes_stencil = true;
721 break;
722 case TGSI_SEMANTIC_SAMPLEMASK:
723 info->writes_samplemask = true;
724 break;
725 case TGSI_SEMANTIC_EDGEFLAG:
726 info->writes_edgeflag = true;
727 break;
728 case TGSI_SEMANTIC_POSITION:
729 if (info->processor == PIPE_SHADER_FRAGMENT) {
730 if (!variable->data.fb_fetch_output)
731 info->writes_z = true;
732 } else {
733 info->writes_position = true;
734 }
735 break;
736 }
737
738 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
739 switch (semantic_name) {
740 case TGSI_SEMANTIC_PATCH:
741 info->reads_perpatch_outputs = true;
742 break;
743 case TGSI_SEMANTIC_TESSINNER:
744 case TGSI_SEMANTIC_TESSOUTER:
745 info->reads_tessfactor_outputs = true;
746 break;
747 default:
748 info->reads_pervertex_outputs = true;
749 }
750 }
751 }
752
753 unsigned loc = variable->data.location;
754 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
755 loc == FRAG_RESULT_COLOR &&
756 nir->info.outputs_written & (1ull << loc)) {
757 assert(attrib_count == 1);
758 info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
759 }
760 }
761
762 if (nir->info.io_lowered) {
763 uint64_t outputs_written = nir->info.outputs_written;
764
765 while (outputs_written) {
766 unsigned location = u_bit_scan64(&outputs_written);
767 unsigned i = util_bitcount64(nir->info.outputs_written &
768 BITFIELD64_MASK(location));
769 unsigned semantic_name, semantic_index;
770
771 tgsi_get_gl_varying_semantic(location, need_texcoord,
772 &semantic_name, &semantic_index);
773
774 info->output_semantic_name[i] = semantic_name;
775 info->output_semantic_index[i] = semantic_index;
776 info->output_usagemask[i] = 0xf;
777 }
778 num_outputs = util_bitcount64(nir->info.outputs_written);
779 if (nir->info.outputs_accessed_indirectly)
780 info->indirect_files |= 1 << TGSI_FILE_OUTPUT;
781 }
782
783 info->num_outputs = num_outputs;
784
785 info->const_file_max[0] = nir->num_uniforms - 1;
786 info->images_declared = nir->info.images_used[0];
787 info->samplers_declared = nir->info.textures_used[0];
788
789 info->file_max[TGSI_FILE_SAMPLER] = BITSET_LAST_BIT(nir->info.samplers_used) - 1;
790 info->file_max[TGSI_FILE_SAMPLER_VIEW] = BITSET_LAST_BIT(nir->info.textures_used) - 1;
791 info->file_mask[TGSI_FILE_SAMPLER] = nir->info.samplers_used[0];
792 info->file_mask[TGSI_FILE_SAMPLER_VIEW] = nir->info.textures_used[0];
793 info->file_max[TGSI_FILE_IMAGE] = BITSET_LAST_BIT(nir->info.images_used) - 1;
794 info->file_mask[TGSI_FILE_IMAGE] = info->images_declared;
795
796 info->num_written_clipdistance = nir->info.clip_distance_array_size;
797 info->num_written_culldistance = nir->info.cull_distance_array_size;
798 info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
799 info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
800
801 if (info->processor == PIPE_SHADER_FRAGMENT)
802 info->uses_kill = nir->info.fs.uses_discard;
803
804 nir_function *func = (struct nir_function *)
805 exec_list_get_head_const(&nir->functions);
806
807 nir_foreach_block(block, func->impl) {
808 nir_foreach_instr(instr, block)
809 scan_instruction(nir, need_texcoord, info, instr);
810 }
811 }
812