1 /*
2 * Copyright © 2016-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3d_compiler.h"
25
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v33_pack.h"
32
33 void
v3d33_vir_emit_tex(struct v3d_compile * c,nir_tex_instr * instr)34 v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
35 {
36 unsigned unit = instr->texture_index;
37
38 struct V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1 p0_unpacked = {
39 V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_header,
40
41 .fetch_sample_mode = instr->op == nir_texop_txf,
42 };
43
44 struct V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1 p1_unpacked = {
45 };
46
47 switch (instr->sampler_dim) {
48 case GLSL_SAMPLER_DIM_1D:
49 if (instr->is_array)
50 p0_unpacked.lookup_type = TEXTURE_1D_ARRAY;
51 else
52 p0_unpacked.lookup_type = TEXTURE_1D;
53 break;
54 case GLSL_SAMPLER_DIM_2D:
55 case GLSL_SAMPLER_DIM_RECT:
56 if (instr->is_array)
57 p0_unpacked.lookup_type = TEXTURE_2D_ARRAY;
58 else
59 p0_unpacked.lookup_type = TEXTURE_2D;
60 break;
61 case GLSL_SAMPLER_DIM_3D:
62 p0_unpacked.lookup_type = TEXTURE_3D;
63 break;
64 case GLSL_SAMPLER_DIM_CUBE:
65 p0_unpacked.lookup_type = TEXTURE_CUBE_MAP;
66 break;
67 default:
68 unreachable("Bad sampler type");
69 }
70
71 struct qreg coords[5];
72 int next_coord = 0;
73 for (unsigned i = 0; i < instr->num_srcs; i++) {
74 switch (instr->src[i].src_type) {
75 case nir_tex_src_coord:
76 for (int j = 0; j < instr->coord_components; j++) {
77 coords[next_coord++] =
78 ntq_get_src(c, instr->src[i].src, j);
79 }
80 if (instr->coord_components < 2)
81 coords[next_coord++] = vir_uniform_f(c, 0.5);
82 break;
83 case nir_tex_src_bias:
84 coords[next_coord++] =
85 ntq_get_src(c, instr->src[i].src, 0);
86
87 p0_unpacked.bias_supplied = true;
88 break;
89 case nir_tex_src_lod:
90 coords[next_coord++] =
91 vir_FADD(c,
92 ntq_get_src(c, instr->src[i].src, 0),
93 vir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL,
94 unit));
95
96 if (instr->op != nir_texop_txf &&
97 instr->op != nir_texop_tg4) {
98 p0_unpacked.disable_autolod_use_bias_only = true;
99 }
100 break;
101 case nir_tex_src_comparator:
102 coords[next_coord++] =
103 ntq_get_src(c, instr->src[i].src, 0);
104
105 p0_unpacked.shadow = true;
106 break;
107
108 case nir_tex_src_offset: {
109 nir_const_value *offset =
110 nir_src_as_const_value(instr->src[i].src);
111 p0_unpacked.texel_offset_for_s_coordinate =
112 offset->i32[0];
113
114 if (instr->coord_components >= 2)
115 p0_unpacked.texel_offset_for_t_coordinate =
116 offset->i32[1];
117
118 if (instr->coord_components >= 3)
119 p0_unpacked.texel_offset_for_r_coordinate =
120 offset->i32[2];
121 break;
122 }
123
124 default:
125 unreachable("unknown texture source");
126 }
127 }
128
129 bool return_16 = (c->key->tex[unit].return_size == 16 ||
130 p0_unpacked.shadow);
131
132 /* Limit the number of channels returned to both how many the NIR
133 * instruction writes and how many the instruction could produce.
134 */
135 uint32_t instr_return_channels = nir_tex_instr_dest_size(instr);
136 if (return_16)
137 instr_return_channels = (instr_return_channels + 1) / 2;
138
139 p1_unpacked.return_words_of_texture_data =
140 (1 << MIN2(instr_return_channels,
141 c->key->tex[unit].return_channels)) - 1;
142
143 uint32_t p0_packed;
144 V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL,
145 (uint8_t *)&p0_packed,
146 &p0_unpacked);
147
148 uint32_t p1_packed;
149 V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1_pack(NULL,
150 (uint8_t *)&p1_packed,
151 &p1_unpacked);
152 /* Load unit number into the address field, which will be be used by
153 * the driver to decide which texture to put in the actual address
154 * field.
155 */
156 p1_packed |= unit << 5;
157
158 /* There is no native support for GL texture rectangle coordinates, so
159 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
160 * 1]).
161 */
162 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
163 coords[0] = vir_FMUL(c, coords[0],
164 vir_uniform(c, QUNIFORM_TEXRECT_SCALE_X,
165 unit));
166 coords[1] = vir_FMUL(c, coords[1],
167 vir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y,
168 unit));
169 }
170
171 struct qreg texture_u[] = {
172 vir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0_0 + unit, p0_packed),
173 vir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, p1_packed),
174 };
175 uint32_t next_texture_u = 0;
176
177 for (int i = 0; i < next_coord; i++) {
178 struct qreg dst;
179
180 if (i == next_coord - 1)
181 dst = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUL);
182 else
183 dst = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMU);
184
185 struct qinst *tmu = vir_MOV_dest(c, dst, coords[i]);
186
187 if (i < 2) {
188 tmu->has_implicit_uniform = true;
189 tmu->src[vir_get_implicit_uniform_src(tmu)] =
190 texture_u[next_texture_u++];
191 }
192 }
193
194 vir_emit_thrsw(c);
195
196 struct qreg return_values[4];
197 for (int i = 0; i < 4; i++) {
198 /* Swizzling .zw of an RG texture should give undefined
199 * results, not crash the compiler.
200 */
201 if (p1_unpacked.return_words_of_texture_data & (1 << i))
202 return_values[i] = vir_LDTMU(c);
203 else
204 return_values[i] = c->undef;
205 }
206
207 for (int i = 0; i < nir_tex_instr_dest_size(instr); i++) {
208 struct qreg chan;
209
210 if (return_16) {
211 STATIC_ASSERT(PIPE_SWIZZLE_X == 0);
212 chan = return_values[i / 2];
213
214 if (nir_alu_type_get_base_type(instr->dest_type) ==
215 nir_type_float) {
216 enum v3d_qpu_input_unpack unpack;
217 if (i & 1)
218 unpack = V3D_QPU_UNPACK_H;
219 else
220 unpack = V3D_QPU_UNPACK_L;
221
222 chan = vir_FMOV(c, chan);
223 vir_set_unpack(c->defs[chan.index], 0, unpack);
224 } else {
225 /* If we're unpacking the low field, shift it
226 * up to the top first.
227 */
228 if ((i & 1) == 0) {
229 chan = vir_SHL(c, chan,
230 vir_uniform_ui(c, 16));
231 }
232
233 /* Do proper sign extension to a 32-bit int. */
234 if (nir_alu_type_get_base_type(instr->dest_type) ==
235 nir_type_int) {
236 chan = vir_ASR(c, chan,
237 vir_uniform_ui(c, 16));
238 } else {
239 chan = vir_SHR(c, chan,
240 vir_uniform_ui(c, 16));
241 }
242 }
243 } else {
244 chan = vir_MOV(c, return_values[i]);
245 }
246 ntq_store_dest(c, &instr->dest, i, chan);
247 }
248 }
249