1 /*
2 * Copyright © 2017 Ilia Mirkin
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "compiler/nir/nir_builder.h"
26
27 /* A4XX has a broken GATHER4 operation. It performs the texture swizzle on the
28 * gather results, rather than before. As a result, it must be emulated with
29 * direct texture calls.
30 */
31
32 static nir_ssa_def *
ir3_nir_lower_tg4_to_tex_instr(nir_builder * b,nir_instr * instr,void * data)33 ir3_nir_lower_tg4_to_tex_instr(nir_builder *b, nir_instr *instr, void *data)
34 {
35 nir_tex_instr *tg4 = nir_instr_as_tex(instr);
36 static const int offsets[3][2] = { {0, 1}, {1, 1}, {1, 0} };
37
38 nir_ssa_def *results[4];
39 int offset_index = nir_tex_instr_src_index(tg4, nir_tex_src_offset);
40 for (int i = 0; i < 4; i++) {
41 int num_srcs = tg4->num_srcs + 1 /* lod */;
42 if (offset_index < 0 && i < 3)
43 num_srcs++;
44
45 nir_tex_instr *tex = nir_tex_instr_create(b->shader, num_srcs);
46 tex->op = nir_texop_txl;
47 tex->sampler_dim = tg4->sampler_dim;
48 tex->coord_components = tg4->coord_components;
49 tex->is_array = tg4->is_array;
50 tex->is_shadow = tg4->is_shadow;
51 tex->is_new_style_shadow = tg4->is_new_style_shadow;
52 tex->texture_index = tg4->texture_index;
53 tex->sampler_index = tg4->sampler_index;
54 tex->dest_type = tg4->dest_type;
55
56 for (int j = 0; j < tg4->num_srcs; j++) {
57 nir_src_copy(&tex->src[j].src, &tg4->src[j].src, tex);
58 tex->src[j].src_type = tg4->src[j].src_type;
59 }
60 if (i != 3) {
61 nir_ssa_def *offset =
62 nir_vec2(b, nir_imm_int(b, offsets[i][0]),
63 nir_imm_int(b, offsets[i][1]));
64 if (offset_index < 0) {
65 tex->src[tg4->num_srcs].src = nir_src_for_ssa(offset);
66 tex->src[tg4->num_srcs].src_type = nir_tex_src_offset;
67 } else {
68 assert(nir_tex_instr_src_size(tex, offset_index) == 2);
69 nir_ssa_def *orig = nir_ssa_for_src(
70 b, tex->src[offset_index].src, 2);
71 tex->src[offset_index].src =
72 nir_src_for_ssa(nir_iadd(b, orig, offset));
73 }
74 }
75 tex->src[num_srcs - 1].src = nir_src_for_ssa(nir_imm_float(b, 0));
76 tex->src[num_srcs - 1].src_type = nir_tex_src_lod;
77
78 nir_ssa_dest_init(&tex->instr, &tex->dest,
79 nir_tex_instr_dest_size(tex), 32, NULL);
80 nir_builder_instr_insert(b, &tex->instr);
81
82 results[i] = nir_channel(b, &tex->dest.ssa, tg4->component);
83 }
84
85 return nir_vec(b, results, 4);
86 }
87
88 static bool
ir3_nir_lower_tg4_to_tex_filter(const nir_instr * instr,const void * data)89 ir3_nir_lower_tg4_to_tex_filter(const nir_instr *instr, const void *data)
90 {
91 return (instr->type == nir_instr_type_tex &&
92 nir_instr_as_tex(instr)->op == nir_texop_tg4);
93 }
94
95 bool
ir3_nir_lower_tg4_to_tex(nir_shader * shader)96 ir3_nir_lower_tg4_to_tex(nir_shader *shader)
97 {
98 return nir_shader_lower_instructions(shader,
99 ir3_nir_lower_tg4_to_tex_filter,
100 ir3_nir_lower_tg4_to_tex_instr, NULL);
101 }
102