1 /*
2 * Copyright © 2017 Ilia Mirkin
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/nir/nir_builder.h"
25 #include "ir3_nir.h"
26
27 /* A4XX has a broken GATHER4 operation. It performs the texture swizzle on the
28 * gather results, rather than before. As a result, it must be emulated with
29 * direct texture calls.
30 */
31
32 static nir_ssa_def *
ir3_nir_lower_tg4_to_tex_instr(nir_builder * b,nir_instr * instr,void * data)33 ir3_nir_lower_tg4_to_tex_instr(nir_builder *b, nir_instr *instr, void *data)
34 {
35 nir_tex_instr *tg4 = nir_instr_as_tex(instr);
36 static const int offsets[3][2] = {{0, 1}, {1, 1}, {1, 0}};
37
38 nir_ssa_def *results[4];
39 int offset_index = nir_tex_instr_src_index(tg4, nir_tex_src_offset);
40 for (int i = 0; i < 4; i++) {
41 int num_srcs = tg4->num_srcs + 1 /* lod */;
42 if (offset_index < 0 && i < 3)
43 num_srcs++;
44
45 nir_tex_instr *tex = nir_tex_instr_create(b->shader, num_srcs);
46 tex->op = nir_texop_txl;
47 tex->sampler_dim = tg4->sampler_dim;
48 tex->coord_components = tg4->coord_components;
49 tex->is_array = tg4->is_array;
50 tex->is_shadow = tg4->is_shadow;
51 tex->is_new_style_shadow = tg4->is_new_style_shadow;
52 tex->texture_index = tg4->texture_index;
53 tex->sampler_index = tg4->sampler_index;
54 tex->dest_type = tg4->dest_type;
55
56 for (int j = 0; j < tg4->num_srcs; j++) {
57 nir_src_copy(&tex->src[j].src, &tg4->src[j].src);
58 tex->src[j].src_type = tg4->src[j].src_type;
59 }
60 if (i != 3) {
61 nir_ssa_def *offset = nir_vec2(b, nir_imm_int(b, offsets[i][0]),
62 nir_imm_int(b, offsets[i][1]));
63 if (offset_index < 0) {
64 tex->src[tg4->num_srcs].src = nir_src_for_ssa(offset);
65 tex->src[tg4->num_srcs].src_type = nir_tex_src_offset;
66 } else {
67 assert(nir_tex_instr_src_size(tex, offset_index) == 2);
68 nir_ssa_def *orig =
69 nir_ssa_for_src(b, tex->src[offset_index].src, 2);
70 tex->src[offset_index].src =
71 nir_src_for_ssa(nir_iadd(b, orig, offset));
72 }
73 }
74 tex->src[num_srcs - 1].src = nir_src_for_ssa(nir_imm_float(b, 0));
75 tex->src[num_srcs - 1].src_type = nir_tex_src_lod;
76
77 nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
78 32, NULL);
79 nir_builder_instr_insert(b, &tex->instr);
80
81 results[i] = nir_channel(b, &tex->dest.ssa, tg4->component);
82 }
83
84 return nir_vec(b, results, 4);
85 }
86
87 static bool
ir3_nir_lower_tg4_to_tex_filter(const nir_instr * instr,const void * data)88 ir3_nir_lower_tg4_to_tex_filter(const nir_instr *instr, const void *data)
89 {
90 return (instr->type == nir_instr_type_tex &&
91 nir_instr_as_tex(instr)->op == nir_texop_tg4);
92 }
93
94 bool
ir3_nir_lower_tg4_to_tex(nir_shader * shader)95 ir3_nir_lower_tg4_to_tex(nir_shader *shader)
96 {
97 return nir_shader_lower_instructions(shader, ir3_nir_lower_tg4_to_tex_filter,
98 ir3_nir_lower_tg4_to_tex_instr, NULL);
99 }
100