1 /*
2 * Copyright (C) 2018 Alyssa Rosenzweig
3 * Copyright (C) 2020 Collabora Ltd.
4 * Copyright © 2017 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #ifndef __PAN_CMDSTREAM_H__
27 #define __PAN_CMDSTREAM_H__
28
29 #ifndef PAN_ARCH
30 #error "PAN_ARCH undefined!"
31 #endif
32
33 #include "genxml/gen_macros.h"
34
35 #include "pan_context.h"
36 #include "pan_job.h"
37
38 #include "pipe/p_defines.h"
39 #include "pipe/p_state.h"
40
41 #include "util/u_prim.h"
42
43 #define PAN_GPU_INDIRECTS (PAN_ARCH == 7)
44
45 struct panfrost_rasterizer {
46 struct pipe_rasterizer_state base;
47
48 #if PAN_ARCH <= 7
49 /* Partially packed RSD words */
50 struct mali_multisample_misc_packed multisample;
51 struct mali_stencil_mask_misc_packed stencil_misc;
52 #endif
53 };
54
55 struct panfrost_zsa_state {
56 struct pipe_depth_stencil_alpha_state base;
57
58 /* Is any depth, stencil, or alpha testing enabled? */
59 bool enabled;
60
61 /* Does the depth and stencil tests always pass? This ignores write
62 * masks, we are only interested in whether pixels may be killed.
63 */
64 bool zs_always_passes;
65
66 /* Are depth or stencil writes possible? */
67 bool writes_zs;
68
69 #if PAN_ARCH <= 7
70 /* Prepacked words from the RSD */
71 struct mali_multisample_misc_packed rsd_depth;
72 struct mali_stencil_mask_misc_packed rsd_stencil;
73 struct mali_stencil_packed stencil_front, stencil_back;
74 #else
75 /* Depth/stencil descriptor template */
76 struct mali_depth_stencil_packed desc;
77 #endif
78 };
79
80 struct panfrost_vertex_state {
81 unsigned num_elements;
82 struct pipe_vertex_element pipe[PIPE_MAX_ATTRIBS];
83 uint16_t strides[PIPE_MAX_ATTRIBS];
84
85 #if PAN_ARCH >= 9
86 /* Packed attribute descriptors */
87 struct mali_attribute_packed attributes[PIPE_MAX_ATTRIBS];
88 #else
89 /* buffers corresponds to attribute buffer, element_buffers corresponds
90 * to an index in buffers for each vertex element */
91 struct pan_vertex_buffer buffers[PIPE_MAX_ATTRIBS];
92 unsigned element_buffer[PIPE_MAX_ATTRIBS];
93 unsigned nr_bufs;
94
95 unsigned formats[PIPE_MAX_ATTRIBS];
96 #endif
97 };
98
99 static inline bool
panfrost_is_implicit_prim_restart(const struct pipe_draw_info * info)100 panfrost_is_implicit_prim_restart(const struct pipe_draw_info *info)
101 {
102 /* As a reminder primitive_restart should always be checked before any
103 access to restart_index. */
104 return info->primitive_restart &&
105 info->restart_index == (unsigned)BITFIELD_MASK(info->index_size * 8);
106 }
107
108 static inline bool
pan_allow_forward_pixel_to_kill(struct panfrost_context * ctx,struct panfrost_compiled_shader * fs)109 pan_allow_forward_pixel_to_kill(struct panfrost_context *ctx,
110 struct panfrost_compiled_shader *fs)
111 {
112 /* Track if any colour buffer is reused across draws, either
113 * from reading it directly, or from failing to write it
114 */
115 unsigned rt_mask = ctx->fb_rt_mask;
116 uint64_t rt_written = (fs->info.outputs_written >> FRAG_RESULT_DATA0) &
117 ctx->blend->enabled_mask;
118 bool blend_reads_dest = (ctx->blend->load_dest_mask & rt_mask);
119 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
120
121 return fs->info.fs.can_fpk && !(rt_mask & ~rt_written) &&
122 !alpha_to_coverage && !blend_reads_dest;
123 }
124
125 /*
126 * Determine whether to set the respective overdraw alpha flag.
127 *
128 * The overdraw alpha=1 flag should be set when alpha=1 implies full overdraw,
129 * equivalently, all enabled render targets have alpha_one_store set. Likewise,
130 * overdraw alpha=0 should be set when alpha=0 implies no overdraw,
131 * equivalently, all enabled render targets have alpha_zero_nop set.
132 */
133 #if PAN_ARCH >= 6
134 static inline bool
panfrost_overdraw_alpha(const struct panfrost_context * ctx,bool zero)135 panfrost_overdraw_alpha(const struct panfrost_context *ctx, bool zero)
136 {
137 const struct panfrost_blend_state *so = ctx->blend;
138
139 for (unsigned i = 0; i < ctx->pipe_framebuffer.nr_cbufs; ++i) {
140 const struct pan_blend_info info = so->info[i];
141
142 bool enabled = ctx->pipe_framebuffer.cbufs[i] && !info.enabled;
143 bool flag = zero ? info.alpha_zero_nop : info.alpha_one_store;
144
145 if (enabled && !flag)
146 return false;
147 }
148
149 return true;
150 }
151 #endif
152
153 static inline void
panfrost_emit_primitive_size(struct panfrost_context * ctx,bool points,mali_ptr size_array,void * prim_size)154 panfrost_emit_primitive_size(struct panfrost_context *ctx, bool points,
155 mali_ptr size_array, void *prim_size)
156 {
157 struct panfrost_rasterizer *rast = ctx->rasterizer;
158
159 pan_pack(prim_size, PRIMITIVE_SIZE, cfg) {
160 if (panfrost_writes_point_size(ctx)) {
161 cfg.size_array = size_array;
162 } else {
163 cfg.constant = points ? rast->base.point_size : rast->base.line_width;
164 }
165 }
166 }
167
168 static inline uint8_t
pan_draw_mode(enum mesa_prim mode)169 pan_draw_mode(enum mesa_prim mode)
170 {
171 switch (mode) {
172
173 #define DEFINE_CASE(c) \
174 case MESA_PRIM_##c: \
175 return MALI_DRAW_MODE_##c;
176
177 DEFINE_CASE(POINTS);
178 DEFINE_CASE(LINES);
179 DEFINE_CASE(LINE_LOOP);
180 DEFINE_CASE(LINE_STRIP);
181 DEFINE_CASE(TRIANGLES);
182 DEFINE_CASE(TRIANGLE_STRIP);
183 DEFINE_CASE(TRIANGLE_FAN);
184 DEFINE_CASE(QUADS);
185 DEFINE_CASE(POLYGON);
186 #if PAN_ARCH <= 6
187 DEFINE_CASE(QUAD_STRIP);
188 #endif
189
190 #undef DEFINE_CASE
191
192 default:
193 unreachable("Invalid draw mode");
194 }
195 }
196
197 static inline enum mali_index_type
panfrost_translate_index_size(unsigned size)198 panfrost_translate_index_size(unsigned size)
199 {
200 STATIC_ASSERT(MALI_INDEX_TYPE_NONE == 0);
201 STATIC_ASSERT(MALI_INDEX_TYPE_UINT8 == 1);
202 STATIC_ASSERT(MALI_INDEX_TYPE_UINT16 == 2);
203
204 return (size == 4) ? MALI_INDEX_TYPE_UINT32 : size;
205 }
206
207 static inline bool
panfrost_fs_required(struct panfrost_compiled_shader * fs,struct panfrost_blend_state * blend,struct pipe_framebuffer_state * state,const struct panfrost_zsa_state * zsa)208 panfrost_fs_required(struct panfrost_compiled_shader *fs,
209 struct panfrost_blend_state *blend,
210 struct pipe_framebuffer_state *state,
211 const struct panfrost_zsa_state *zsa)
212 {
213 /* If we generally have side effects. This inclues use of discard,
214 * which can affect the results of an occlusion query. */
215 if (fs->info.fs.sidefx)
216 return true;
217
218 /* Using an empty FS requires early-z to be enabled, but alpha test
219 * needs it disabled. Alpha test is only native on Midgard, so only
220 * check there.
221 */
222 if (PAN_ARCH <= 5 && zsa->base.alpha_func != PIPE_FUNC_ALWAYS)
223 return true;
224
225 /* If colour is written we need to execute */
226 for (unsigned i = 0; i < state->nr_cbufs; ++i) {
227 if (state->cbufs[i] && blend->info[i].enabled)
228 return true;
229 }
230
231 /* If depth is written and not implied we need to execute.
232 * TODO: Predicate on Z/S writes being enabled */
233 return (fs->info.fs.writes_depth || fs->info.fs.writes_stencil);
234 }
235
236 #if PAN_ARCH >= 9
237 static inline mali_ptr
panfrost_get_position_shader(struct panfrost_batch * batch,const struct pipe_draw_info * info)238 panfrost_get_position_shader(struct panfrost_batch *batch,
239 const struct pipe_draw_info *info)
240 {
241 /* IDVS/points vertex shader */
242 mali_ptr vs_ptr = batch->rsd[PIPE_SHADER_VERTEX];
243
244 /* IDVS/triangle vertex shader */
245 if (vs_ptr && info->mode != MESA_PRIM_POINTS)
246 vs_ptr += pan_size(SHADER_PROGRAM);
247
248 return vs_ptr;
249 }
250
251 static inline mali_ptr
panfrost_get_varying_shader(struct panfrost_batch * batch)252 panfrost_get_varying_shader(struct panfrost_batch *batch)
253 {
254 return batch->rsd[PIPE_SHADER_VERTEX] + (2 * pan_size(SHADER_PROGRAM));
255 }
256
257 static inline unsigned
panfrost_vertex_attribute_stride(struct panfrost_compiled_shader * vs,struct panfrost_compiled_shader * fs)258 panfrost_vertex_attribute_stride(struct panfrost_compiled_shader *vs,
259 struct panfrost_compiled_shader *fs)
260 {
261 unsigned v = vs->info.varyings.output_count;
262 unsigned f = fs->info.varyings.input_count;
263 unsigned slots = MAX2(v, f);
264 slots += util_bitcount(fs->key.fs.fixed_varying_mask);
265
266 /* Assumes 16 byte slots. We could do better. */
267 return slots * 16;
268 }
269
270 static inline mali_ptr
panfrost_emit_resources(struct panfrost_batch * batch,enum pipe_shader_type stage)271 panfrost_emit_resources(struct panfrost_batch *batch,
272 enum pipe_shader_type stage)
273 {
274 struct panfrost_context *ctx = batch->ctx;
275 struct panfrost_ptr T;
276 unsigned nr_tables = 12;
277
278 /* Although individual resources need only 16 byte alignment, the
279 * resource table as a whole must be 64-byte aligned.
280 */
281 T = pan_pool_alloc_aligned(&batch->pool.base, nr_tables * pan_size(RESOURCE),
282 64);
283 memset(T.cpu, 0, nr_tables * pan_size(RESOURCE));
284
285 panfrost_make_resource_table(T, PAN_TABLE_UBO, batch->uniform_buffers[stage],
286 batch->nr_uniform_buffers[stage]);
287
288 panfrost_make_resource_table(T, PAN_TABLE_TEXTURE, batch->textures[stage],
289 ctx->sampler_view_count[stage]);
290
291 /* We always need at least 1 sampler for txf to work */
292 panfrost_make_resource_table(T, PAN_TABLE_SAMPLER, batch->samplers[stage],
293 MAX2(ctx->sampler_count[stage], 1));
294
295 panfrost_make_resource_table(T, PAN_TABLE_IMAGE, batch->images[stage],
296 util_last_bit(ctx->image_mask[stage]));
297
298 if (stage == PIPE_SHADER_VERTEX) {
299 panfrost_make_resource_table(T, PAN_TABLE_ATTRIBUTE,
300 batch->attribs[stage],
301 ctx->vertex->num_elements);
302
303 panfrost_make_resource_table(T, PAN_TABLE_ATTRIBUTE_BUFFER,
304 batch->attrib_bufs[stage],
305 util_last_bit(ctx->vb_mask));
306 }
307
308 return T.gpu | nr_tables;
309 }
310 #endif /* PAN_ARCH >= 9 */
311
312 static bool
allow_rotating_primitives(const struct panfrost_compiled_shader * fs,const struct pipe_draw_info * info)313 allow_rotating_primitives(const struct panfrost_compiled_shader *fs,
314 const struct pipe_draw_info *info)
315 {
316 return u_reduced_prim(info->mode) != MESA_PRIM_LINES &&
317 !fs->info.bifrost.uses_flat_shading;
318 }
319
320 #endif
321