• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018-2020 Collabora, Ltd.
3  * Copyright (C) 2019-2020 Icecream95
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "compiler/nir/nir_builder.h"
26 #include "pan_ir.h"
27 
28 /* Midgard can write all of color, depth and stencil in a single writeout
29  * operation, so we merge depth/stencil stores with color stores.
30  * If there are no color stores, we add a write to the "depth RT".
31  *
32  * For Bifrost, we want these combined so we can properly order
33  * +ZS_EMIT with respect to +ATEST and +BLEND, as well as combining
34  * depth/stencil stores into a single +ZS_EMIT op.
35  */
36 
37 /*
38  * Get the type to report for a piece of a combined store, given the store it
39  * is combining from. If there is no store to render target #0, a dummy <0.0,
40  * 0.0, 0.0, 0.0> write is used, so report a matching float32 type.
41  */
42 static nir_alu_type
pan_nir_rt_store_type(nir_intrinsic_instr * store)43 pan_nir_rt_store_type(nir_intrinsic_instr *store)
44 {
45    return store ? nir_intrinsic_src_type(store) : nir_type_float32;
46 }
47 
48 static void
pan_nir_emit_combined_store(nir_builder * b,nir_intrinsic_instr * rt0_store,unsigned writeout,nir_intrinsic_instr ** stores)49 pan_nir_emit_combined_store(nir_builder *b, nir_intrinsic_instr *rt0_store,
50                             unsigned writeout, nir_intrinsic_instr **stores)
51 {
52    nir_intrinsic_instr *intr = nir_intrinsic_instr_create(
53       b->shader, nir_intrinsic_store_combined_output_pan);
54 
55    intr->num_components = rt0_store ? rt0_store->src[0].ssa->num_components : 4;
56 
57    if (rt0_store)
58       nir_intrinsic_set_io_semantics(intr,
59                                      nir_intrinsic_io_semantics(rt0_store));
60    nir_intrinsic_set_src_type(intr, pan_nir_rt_store_type(rt0_store));
61    nir_intrinsic_set_dest_type(intr, pan_nir_rt_store_type(stores[2]));
62    nir_intrinsic_set_component(intr, writeout);
63 
64    nir_def *zero = nir_imm_int(b, 0);
65    nir_def *zero4 = nir_imm_ivec4(b, 0, 0, 0, 0);
66 
67    nir_def *src[] = {
68       rt0_store ? rt0_store->src[0].ssa : zero4,
69       rt0_store ? rt0_store->src[1].ssa : zero,
70       stores[0] ? stores[0]->src[0].ssa : zero,
71       stores[1] ? stores[1]->src[0].ssa : zero,
72       stores[2] ? stores[2]->src[0].ssa : zero4,
73    };
74 
75    for (int i = 0; i < ARRAY_SIZE(src); ++i)
76       intr->src[i] = nir_src_for_ssa(src[i]);
77 
78    nir_builder_instr_insert(b, &intr->instr);
79 }
80 
81 static bool
kill_depth_stencil_writes(nir_builder * b,nir_intrinsic_instr * intr,UNUSED void * data)82 kill_depth_stencil_writes(nir_builder *b, nir_intrinsic_instr *intr,
83                           UNUSED void *data)
84 {
85    if (intr->intrinsic != nir_intrinsic_store_output)
86       return false;
87 
88    nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
89    if (sem.location != FRAG_RESULT_DEPTH && sem.location != FRAG_RESULT_STENCIL)
90       return false;
91 
92    nir_instr_remove(&intr->instr);
93    return true;
94 }
95 
96 bool
pan_nir_lower_zs_store(nir_shader * nir)97 pan_nir_lower_zs_store(nir_shader *nir)
98 {
99    bool progress = false;
100 
101    if (nir->info.stage != MESA_SHADER_FRAGMENT)
102       return false;
103 
104    /* Remove all stencil/depth writes if early fragment test is forced. */
105    if (nir->info.fs.early_fragment_tests)
106       progress |= nir_shader_intrinsics_pass(nir, kill_depth_stencil_writes,
107                                              nir_metadata_control_flow, NULL);
108 
109    nir_foreach_function_impl(impl, nir) {
110       nir_intrinsic_instr *stores[3] = {NULL};
111       nir_intrinsic_instr *last_mask_store = NULL;
112       nir_block *mask_block = NULL;
113       unsigned writeout = 0;
114 
115       nir_foreach_block(block, impl) {
116          nir_foreach_instr_safe(instr, block) {
117             if (instr->type != nir_instr_type_intrinsic)
118                continue;
119 
120             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
121             if (intr->intrinsic != nir_intrinsic_store_output)
122                continue;
123 
124             nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
125             if (sem.location == FRAG_RESULT_DEPTH) {
126                stores[0] = intr;
127                writeout |= PAN_WRITEOUT_Z;
128             } else if (sem.location == FRAG_RESULT_STENCIL) {
129                stores[1] = intr;
130                writeout |= PAN_WRITEOUT_S;
131             } else if (sem.dual_source_blend_index) {
132                assert(!stores[2]); /* there should be only 1 source for dual blending */
133                stores[2] = intr;
134                writeout |= PAN_WRITEOUT_2;
135             } else if (sem.location == FRAG_RESULT_SAMPLE_MASK) {
136                last_mask_store = intr;
137                mask_block = intr->instr.block;
138             }
139          }
140       }
141 
142       if (!writeout && !last_mask_store)
143          continue;
144 
145       nir_block *common_block = mask_block;
146 
147       /* Ensure all stores are in the same block */
148       for (unsigned i = 0; i < ARRAY_SIZE(stores); ++i) {
149          if (!stores[i])
150             continue;
151 
152          nir_block *block = stores[i]->instr.block;
153 
154          if (common_block)
155             assert(common_block == block);
156          else
157             common_block = block;
158       }
159 
160       /* move data stores in the common block to after the last mask store */
161       if (last_mask_store) {
162          nir_cursor insert_point = nir_after_instr(&last_mask_store->instr);
163          nir_foreach_instr_safe(instr, mask_block) {
164             if (instr->type != nir_instr_type_intrinsic)
165                continue;
166             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
167 
168             /* stop when we've reached the last store to mask */
169             if (intr == last_mask_store)
170                break;
171             if (intr->intrinsic != nir_intrinsic_store_output)
172                continue;
173             nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
174             if (sem.location >= FRAG_RESULT_DATA0 &&
175                 sem.location <= FRAG_RESULT_DATA7) {
176                nir_instr_move(insert_point, instr);
177                insert_point = nir_after_instr(instr);
178             }
179          }
180       }
181 
182       bool replaced = false;
183 
184       nir_foreach_block(block, impl) {
185          nir_foreach_instr_safe(instr, block) {
186             if (instr->type != nir_instr_type_intrinsic)
187                continue;
188 
189             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
190             if (intr->intrinsic != nir_intrinsic_store_output)
191                continue;
192 
193             nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
194 
195             if (sem.location < FRAG_RESULT_DATA0)
196                continue;
197 
198             if (sem.dual_source_blend_index)
199                continue;
200 
201             assert(nir_src_is_const(intr->src[1]) && "no indirect outputs");
202 
203             nir_builder b =
204                nir_builder_at(nir_after_block_before_jump(instr->block));
205 
206             /* Trying to write depth twice results in the
207              * wrong blend shader being executed on
208              * Midgard */
209             unsigned this_store = PAN_WRITEOUT_C | (replaced ? 0 : writeout);
210 
211             pan_nir_emit_combined_store(&b, intr, this_store, stores);
212 
213             nir_instr_remove(instr);
214 
215             replaced = true;
216          }
217       }
218 
219       /* Insert a store to the depth RT (0xff) if needed */
220       if (!replaced) {
221          nir_builder b =
222             nir_builder_at(nir_after_block_before_jump(common_block));
223 
224          pan_nir_emit_combined_store(&b, NULL, writeout, stores);
225       }
226 
227       for (unsigned i = 0; i < ARRAY_SIZE(stores); ++i) {
228          if (stores[i])
229             nir_instr_remove(&stores[i]->instr);
230       }
231 
232       nir_metadata_preserve(impl,
233                             nir_metadata_control_flow);
234       progress = true;
235    }
236 
237    return progress;
238 }
239