• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  * Copyright © 2018 Broadcom
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "v3d_compiler.h"
26 #include "compiler/nir/nir_builder.h"
27 #include "compiler/nir/nir_format_convert.h"
28 
29 /** @file v3d_nir_lower_scratch.c
30  *
31  * Swizzles around the addresses of
32  * nir_intrinsic_load_scratch/nir_intrinsic_store_scratch so that a QPU stores
33  * a cacheline at a time per dword of scratch access, scalarizing and removing
34  * writemasks in the process.
35  */
36 
37 static nir_def *
v3d_nir_scratch_offset(nir_builder * b,nir_intrinsic_instr * instr)38 v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
39 {
40         bool is_store = instr->intrinsic == nir_intrinsic_store_scratch;
41         nir_def *offset = instr->src[is_store ? 1 : 0].ssa;
42 
43         assert(nir_intrinsic_align_mul(instr) >= 4);
44         assert(nir_intrinsic_align_offset(instr) == 0);
45 
46         /* The spill_offset register will already have the subgroup ID (EIDX)
47          * shifted and ORed in at bit 2, so all we need to do is to move the
48          * dword index up above V3D_CHANNELS.
49          */
50         return nir_imul_imm(b, offset, V3D_CHANNELS);
51 }
52 
53 static void
v3d_nir_lower_load_scratch(nir_builder * b,nir_intrinsic_instr * instr)54 v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
55 {
56         b->cursor = nir_before_instr(&instr->instr);
57 
58         nir_def *offset = v3d_nir_scratch_offset(b,instr);
59 
60         nir_def *chans[NIR_MAX_VEC_COMPONENTS];
61         for (int i = 0; i < instr->num_components; i++) {
62                 nir_def *chan_offset =
63                         nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
64 
65                 nir_intrinsic_instr *chan_instr =
66                         nir_intrinsic_instr_create(b->shader, instr->intrinsic);
67                 chan_instr->num_components = 1;
68                 nir_def_init(&chan_instr->instr, &chan_instr->def, 1,
69                              instr->def.bit_size);
70 
71                 chan_instr->src[0] = nir_src_for_ssa(chan_offset);
72 
73                 nir_intrinsic_set_align(chan_instr, 4, 0);
74 
75                 nir_builder_instr_insert(b, &chan_instr->instr);
76 
77                 chans[i] = &chan_instr->def;
78         }
79 
80         nir_def *result = nir_vec(b, chans, instr->num_components);
81         nir_def_rewrite_uses(&instr->def, result);
82         nir_instr_remove(&instr->instr);
83 }
84 
85 static void
v3d_nir_lower_store_scratch(nir_builder * b,nir_intrinsic_instr * instr)86 v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
87 {
88         b->cursor = nir_before_instr(&instr->instr);
89 
90         nir_def *offset = v3d_nir_scratch_offset(b, instr);
91         nir_def *value = instr->src[0].ssa;
92 
93         for (int i = 0; i < instr->num_components; i++) {
94                 if (!(nir_intrinsic_write_mask(instr) & (1 << i)))
95                         continue;
96 
97                 nir_def *chan_offset =
98                         nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
99 
100                 nir_intrinsic_instr *chan_instr =
101                         nir_intrinsic_instr_create(b->shader, instr->intrinsic);
102                 chan_instr->num_components = 1;
103 
104                 chan_instr->src[0] = nir_src_for_ssa(nir_channel(b,
105                                                                  value,
106                                                                  i));
107                 chan_instr->src[1] = nir_src_for_ssa(chan_offset);
108                 nir_intrinsic_set_write_mask(chan_instr, 0x1);
109                 nir_intrinsic_set_align(chan_instr, 4, 0);
110 
111                 nir_builder_instr_insert(b, &chan_instr->instr);
112         }
113 
114         nir_instr_remove(&instr->instr);
115 }
116 
117 static bool
v3d_nir_lower_scratch_cb(nir_builder * b,nir_intrinsic_instr * intr,void * _state)118 v3d_nir_lower_scratch_cb(nir_builder *b,
119                          nir_intrinsic_instr *intr,
120                          void *_state)
121 {
122         switch (intr->intrinsic) {
123         case nir_intrinsic_load_scratch:
124                 v3d_nir_lower_load_scratch(b, intr);
125                 return true;
126         case nir_intrinsic_store_scratch:
127                 v3d_nir_lower_store_scratch(b, intr);
128                 return true;
129         default:
130                 return false;
131         }
132 
133         return false;
134 }
135 
136 bool
v3d_nir_lower_scratch(nir_shader * s)137 v3d_nir_lower_scratch(nir_shader *s)
138 {
139         return nir_shader_intrinsics_pass(s, v3d_nir_lower_scratch_cb,
140                                             nir_metadata_block_index |
141                                             nir_metadata_dominance, NULL);
142 }
143