• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016-2018 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "v3d_compiler.h"
25 
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32 
33 static void
vir_TMU_WRITE(struct v3d_compile * c,enum v3d_qpu_waddr waddr,struct qreg val,int * tmu_writes)34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val,
35               int *tmu_writes)
36 {
37         /* XXX perf: We should figure out how to merge ALU operations
38          * producing the val with this MOV, when possible.
39          */
40         vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
41 
42         (*tmu_writes)++;
43 }
44 
45 static void
vir_WRTMUC(struct v3d_compile * c,enum quniform_contents contents,uint32_t data)46 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
47 {
48         struct qinst *inst = vir_NOP(c);
49         inst->qpu.sig.wrtmuc = true;
50         inst->uniform = vir_get_uniform_index(c, contents, data);
51 }
52 
53 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
54         .per_pixel_mask_enable = true,
55 };
56 
57 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
58         .op = V3D_TMU_OP_REGULAR,
59 };
60 
61 void
v3d40_vir_emit_tex(struct v3d_compile * c,nir_tex_instr * instr)62 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
63 {
64         unsigned unit = instr->texture_index;
65         int tmu_writes = 0;
66 
67         struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
68         };
69 
70         assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42);
71 
72         struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
73                 .op = V3D_TMU_OP_REGULAR,
74 
75                 .gather_mode = instr->op == nir_texop_tg4,
76                 .gather_component = instr->component,
77 
78                 .coefficient_mode = instr->op == nir_texop_txd,
79 
80                 .disable_autolod = instr->op == nir_texop_tg4
81         };
82 
83         int non_array_components =
84            instr->op != nir_texop_lod ?
85            instr->coord_components - instr->is_array :
86            instr->coord_components;
87 
88         struct qreg s;
89 
90         for (unsigned i = 0; i < instr->num_srcs; i++) {
91                 switch (instr->src[i].src_type) {
92                 case nir_tex_src_coord:
93                         /* S triggers the lookup, so save it for the end. */
94                         s = ntq_get_src(c, instr->src[i].src, 0);
95 
96                         if (non_array_components > 1) {
97                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
98                                               ntq_get_src(c, instr->src[i].src,
99                                                           1), &tmu_writes);
100                         }
101                         if (non_array_components > 2) {
102                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
103                                               ntq_get_src(c, instr->src[i].src,
104                                                           2), &tmu_writes);
105                         }
106 
107                         if (instr->is_array) {
108                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
109                                               ntq_get_src(c, instr->src[i].src,
110                                                           instr->coord_components - 1),
111                                               &tmu_writes);
112                         }
113                         break;
114 
115                 case nir_tex_src_bias:
116                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
117                                       ntq_get_src(c, instr->src[i].src, 0),
118                                       &tmu_writes);
119                         break;
120 
121                 case nir_tex_src_lod:
122                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
123                                       ntq_get_src(c, instr->src[i].src, 0),
124                                       &tmu_writes);
125 
126                         /* With texel fetch automatic LOD is already disabled,
127                          * and disable_autolod must not be enabled. For
128                          * non-cubes we can use the register TMUSLOD, that
129                          * implicitly sets disable_autolod.
130                          */
131                         if (instr->op != nir_texop_txf &&
132                             instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
133                                 p2_unpacked.disable_autolod = true;
134                         }
135                         break;
136 
137                 case nir_tex_src_comparator:
138                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
139                                       ntq_get_src(c, instr->src[i].src, 0),
140                                       &tmu_writes);
141                         break;
142 
143                 case nir_tex_src_offset: {
144                         if (nir_src_is_const(instr->src[i].src)) {
145                                 p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0);
146                                 if (non_array_components >= 2)
147                                         p2_unpacked.offset_t =
148                                                 nir_src_comp_as_int(instr->src[i].src, 1);
149                                 if (non_array_components >= 3)
150                                         p2_unpacked.offset_r =
151                                                 nir_src_comp_as_int(instr->src[i].src, 2);
152                         } else {
153                                 struct qreg mask = vir_uniform_ui(c, 0xf);
154                                 struct qreg x, y, offset;
155 
156                                 x = vir_AND(c, ntq_get_src(c, instr->src[i].src,
157                                                            0), mask);
158                                 y = vir_AND(c, ntq_get_src(c, instr->src[i].src,
159                                                            1), mask);
160                                 offset = vir_OR(c, x,
161                                                 vir_SHL(c, y,
162                                                         vir_uniform_ui(c, 4)));
163 
164                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF,
165                                               offset, &tmu_writes);
166                         }
167                         break;
168                 }
169 
170                 default:
171                         unreachable("unknown texture source");
172                 }
173         }
174 
175         /* Limit the number of channels returned to both how many the NIR
176          * instruction writes and how many the instruction could produce.
177          */
178         p0_unpacked.return_words_of_texture_data =
179                 instr->dest.is_ssa ?
180                 nir_ssa_def_components_read(&instr->dest.ssa) :
181                 (1 << instr->dest.reg.reg->num_components) - 1;
182 
183         assert(p0_unpacked.return_words_of_texture_data != 0);
184 
185         uint32_t p0_packed;
186         V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
187                                           (uint8_t *)&p0_packed,
188                                           &p0_unpacked);
189 
190         uint32_t p2_packed;
191         V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
192                                           (uint8_t *)&p2_packed,
193                                           &p2_unpacked);
194 
195         /* We manually set the LOD Query bit (see
196          * V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific
197          * feature over V41 we are using
198          */
199         if (instr->op == nir_texop_lod)
200            p2_packed |= 1UL << 24;
201 
202         /* Load unit number into the high bits of the texture address field,
203          * which will be be used by the driver to decide which texture to put
204          * in the actual address field.
205          */
206         p0_packed |= unit << 24;
207 
208         vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
209 
210         /* Even if the texture operation doesn't need a sampler by
211          * itself, we still need to add the sampler configuration
212          * parameter if the output is 32 bit
213          */
214         bool output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
215                                    !instr->is_shadow);
216 
217         /*
218          * p1 is optional, but we can skip it only if p2 can be skipped too
219          */
220         bool needs_p2_config =
221                 (instr->op == nir_texop_lod ||
222                  memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0);
223 
224         /* To handle the cases were we can't just use p1_unpacked_default */
225         bool non_default_p1_config = nir_tex_instr_need_sampler(instr) ||
226                 output_type_32_bit;
227 
228         if (non_default_p1_config) {
229                 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
230                         .output_type_32_bit = output_type_32_bit,
231 
232                         .unnormalized_coordinates = (instr->sampler_dim ==
233                                                      GLSL_SAMPLER_DIM_RECT),
234                 };
235 
236                 /* Word enables can't ask for more channels than the
237                  * output type could provide (2 for f16, 4 for
238                  * 32-bit).
239                  */
240                 assert(!p1_unpacked.output_type_32_bit ||
241                        p0_unpacked.return_words_of_texture_data < (1 << 4));
242                 assert(p1_unpacked.output_type_32_bit ||
243                        p0_unpacked.return_words_of_texture_data < (1 << 2));
244 
245                 uint32_t p1_packed;
246                 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
247                                                   (uint8_t *)&p1_packed,
248                                                   &p1_unpacked);
249 
250                 if (nir_tex_instr_need_sampler(instr)) {
251                         /* Load unit number into the high bits of the sampler
252                          * address field, which will be be used by the driver
253                          * to decide which sampler to put in the actual
254                          * address field.
255                          */
256                         p1_packed |= unit << 24;
257 
258                         vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
259                 } else {
260                         /* In this case, we don't need to merge in any
261                          * sampler state from the API and can just use
262                          * our packed bits */
263                         vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
264                 }
265         } else if (needs_p2_config) {
266                 /* Configuration parameters need to be set up in
267                  * order, and if P2 is needed, you need to set up P1
268                  * too even if sampler info is not needed by the
269                  * texture operation. But we can set up default info,
270                  * and avoid asking the driver for the sampler state
271                  * address
272                  */
273                 uint32_t p1_packed_default;
274                 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
275                                                   (uint8_t *)&p1_packed_default,
276                                                   &p1_unpacked_default);
277                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed_default);
278         }
279 
280         if (needs_p2_config)
281                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
282 
283         if (instr->op == nir_texop_txf) {
284                 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
285                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes);
286         } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
287                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes);
288         } else if (instr->op == nir_texop_txl) {
289                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSLOD, s, &tmu_writes);
290         } else {
291                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes);
292         }
293 
294         vir_emit_thrsw(c);
295 
296         /* The input FIFO has 16 slots across all threads, so make sure we
297          * don't overfill our allocation.
298          */
299         while (tmu_writes > 16 / c->threads)
300                 c->threads /= 2;
301 
302         for (int i = 0; i < 4; i++) {
303                 if (p0_unpacked.return_words_of_texture_data & (1 << i))
304                         ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
305         }
306 }
307 
308 static uint32_t
v3d40_image_load_store_tmu_op(nir_intrinsic_instr * instr)309 v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
310 {
311         switch (instr->intrinsic) {
312         case nir_intrinsic_image_load:
313         case nir_intrinsic_image_store:
314                 return V3D_TMU_OP_REGULAR;
315         case nir_intrinsic_image_atomic_add:
316                 return v3d_get_op_for_atomic_add(instr, 3);
317         case nir_intrinsic_image_atomic_imin:
318                 return V3D_TMU_OP_WRITE_SMIN;
319         case nir_intrinsic_image_atomic_umin:
320                 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
321         case nir_intrinsic_image_atomic_imax:
322                 return V3D_TMU_OP_WRITE_SMAX;
323         case nir_intrinsic_image_atomic_umax:
324                 return V3D_TMU_OP_WRITE_UMAX;
325         case nir_intrinsic_image_atomic_and:
326                 return V3D_TMU_OP_WRITE_AND_READ_INC;
327         case nir_intrinsic_image_atomic_or:
328                 return V3D_TMU_OP_WRITE_OR_READ_DEC;
329         case nir_intrinsic_image_atomic_xor:
330                 return V3D_TMU_OP_WRITE_XOR_READ_NOT;
331         case nir_intrinsic_image_atomic_exchange:
332                 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
333         case nir_intrinsic_image_atomic_comp_swap:
334                 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
335         default:
336                 unreachable("unknown image intrinsic");
337         };
338 }
339 
340 void
v3d40_vir_emit_image_load_store(struct v3d_compile * c,nir_intrinsic_instr * instr)341 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
342                                 nir_intrinsic_instr *instr)
343 {
344         unsigned format = nir_intrinsic_format(instr);
345         unsigned unit = nir_src_as_uint(instr->src[0]);
346         int tmu_writes = 0;
347 
348         struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
349         };
350 
351         struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
352                 .per_pixel_mask_enable = true,
353                 .output_type_32_bit = v3d_gl_format_is_return_32(format),
354         };
355 
356         struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
357 
358         p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
359 
360         /* If we were able to replace atomic_add for an inc/dec, then we
361          * need/can to do things slightly different, like not loading the
362          * amount to add/sub, as that is implicit.
363          */
364         bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_atomic_add &&
365                                     (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
366                                      p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
367 
368         bool is_1d = false;
369         switch (nir_intrinsic_image_dim(instr)) {
370         case GLSL_SAMPLER_DIM_1D:
371                 is_1d = true;
372                 break;
373         case GLSL_SAMPLER_DIM_BUF:
374                 break;
375         case GLSL_SAMPLER_DIM_2D:
376         case GLSL_SAMPLER_DIM_RECT:
377         case GLSL_SAMPLER_DIM_CUBE:
378                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
379                               ntq_get_src(c, instr->src[1], 1), &tmu_writes);
380                 break;
381         case GLSL_SAMPLER_DIM_3D:
382                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
383                               ntq_get_src(c, instr->src[1], 1), &tmu_writes);
384                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
385                               ntq_get_src(c, instr->src[1], 2), &tmu_writes);
386                 break;
387         default:
388                 unreachable("bad image sampler dim");
389         }
390 
391         /* In order to fetch on a cube map, we need to interpret it as
392          * 2D arrays, where the third coord would be the face index.
393          */
394         if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE ||
395             nir_intrinsic_image_array(instr)) {
396                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
397                               ntq_get_src(c, instr->src[1],
398                                           is_1d ? 1 : 2), &tmu_writes);
399         }
400 
401         /* Limit the number of channels returned to both how many the NIR
402          * instruction writes and how many the instruction could produce.
403          */
404         uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
405         if (!p1_unpacked.output_type_32_bit)
406                 instr_return_channels = (instr_return_channels + 1) / 2;
407 
408         p0_unpacked.return_words_of_texture_data =
409                 (1 << instr_return_channels) - 1;
410 
411         uint32_t p0_packed;
412         V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
413                                           (uint8_t *)&p0_packed,
414                                           &p0_unpacked);
415 
416         uint32_t p1_packed;
417         V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
418                                           (uint8_t *)&p1_packed,
419                                           &p1_unpacked);
420 
421         uint32_t p2_packed;
422         V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
423                                           (uint8_t *)&p2_packed,
424                                           &p2_unpacked);
425 
426         /* Load unit number into the high bits of the texture or sampler
427          * address field, which will be be used by the driver to decide which
428          * texture to put in the actual address field.
429          */
430         p0_packed |= unit << 24;
431 
432         vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
433         if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
434                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
435         if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
436                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
437 
438         /* Emit the data writes for atomics or image store. */
439         if (instr->intrinsic != nir_intrinsic_image_load &&
440             !atomic_add_replaced) {
441                 /* Vector for stores, or first atomic argument */
442                 struct qreg src[4];
443                 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
444                         src[i] = ntq_get_src(c, instr->src[3], i);
445                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
446                                       &tmu_writes);
447                 }
448 
449                 /* Second atomic argument */
450                 if (instr->intrinsic ==
451                     nir_intrinsic_image_atomic_comp_swap) {
452                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
453                                       ntq_get_src(c, instr->src[4], 0),
454                                       &tmu_writes);
455                 }
456         }
457 
458         if (vir_in_nonuniform_control_flow(c) &&
459             instr->intrinsic != nir_intrinsic_image_load) {
460            vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
461                       V3D_QPU_PF_PUSHZ);
462         }
463 
464         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
465                       &tmu_writes);
466 
467         if (vir_in_nonuniform_control_flow(c) &&
468             instr->intrinsic != nir_intrinsic_image_load) {
469            struct qinst *last_inst= (struct  qinst *)c->cur_block->instructions.prev;
470            vir_set_cond(last_inst, V3D_QPU_COND_IFA);
471         }
472 
473         vir_emit_thrsw(c);
474 
475         /* The input FIFO has 16 slots across all threads, so make sure we
476          * don't overfill our allocation.
477          */
478         while (tmu_writes > 16 / c->threads)
479                 c->threads /= 2;
480 
481         for (int i = 0; i < 4; i++) {
482                 if (p0_unpacked.return_words_of_texture_data & (1 << i))
483                         ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
484         }
485 
486         if (nir_intrinsic_dest_components(instr) == 0)
487                 vir_TMUWT(c);
488 
489         if (instr->intrinsic != nir_intrinsic_image_load)
490                 c->tmu_dirty_rcl = true;
491 }
492