1 /*
2 * Copyright (C) 2021 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25 #ifndef __PAN_SHADER_H__
26 #define __PAN_SHADER_H__
27
28 #include "compiler/nir/nir.h"
29 #include "panfrost/util/pan_ir.h"
30 #include "panfrost/util/pan_lower_framebuffer.h"
31
32 #include "genxml/gen_macros.h"
33
34 void bifrost_preprocess_nir(nir_shader *nir, unsigned gpu_id);
35 void midgard_preprocess_nir(nir_shader *nir, unsigned gpu_id);
36
37 static inline void
pan_shader_preprocess(nir_shader * nir,unsigned gpu_id)38 pan_shader_preprocess(nir_shader *nir, unsigned gpu_id)
39 {
40 if (pan_arch(gpu_id) >= 6)
41 bifrost_preprocess_nir(nir, gpu_id);
42 else
43 midgard_preprocess_nir(nir, gpu_id);
44 }
45
46 uint8_t pan_raw_format_mask_midgard(enum pipe_format *formats);
47
48 #ifdef PAN_ARCH
49 const nir_shader_compiler_options *GENX(pan_shader_get_compiler_options)(void);
50
51 void GENX(pan_shader_compile)(nir_shader *nir,
52 struct panfrost_compile_inputs *inputs,
53 struct util_dynarray *binary,
54 struct pan_shader_info *info);
55
56 #if PAN_ARCH >= 6 && PAN_ARCH <= 7
57 enum mali_register_file_format
58 GENX(pan_fixup_blend_type)(nir_alu_type T_size, enum pipe_format format);
59 #endif
60
61 #if PAN_ARCH >= 9
62 static inline enum mali_shader_stage
pan_shader_stage(const struct pan_shader_info * info)63 pan_shader_stage(const struct pan_shader_info *info)
64 {
65 switch (info->stage) {
66 case MESA_SHADER_VERTEX:
67 return MALI_SHADER_STAGE_VERTEX;
68 case MESA_SHADER_FRAGMENT:
69 return MALI_SHADER_STAGE_FRAGMENT;
70 default:
71 return MALI_SHADER_STAGE_COMPUTE;
72 }
73 }
74 #endif
75
76 #if PAN_ARCH >= 7
77 static inline enum mali_shader_register_allocation
pan_register_allocation(unsigned work_reg_count)78 pan_register_allocation(unsigned work_reg_count)
79 {
80 return (work_reg_count <= 32)
81 ? MALI_SHADER_REGISTER_ALLOCATION_32_PER_THREAD
82 : MALI_SHADER_REGISTER_ALLOCATION_64_PER_THREAD;
83 }
84 #endif
85
86 static inline enum mali_depth_source
pan_depth_source(const struct pan_shader_info * info)87 pan_depth_source(const struct pan_shader_info *info)
88 {
89 return info->fs.writes_depth ? MALI_DEPTH_SOURCE_SHADER
90 : MALI_DEPTH_SOURCE_FIXED_FUNCTION;
91 }
92
93 #if PAN_ARCH <= 7
94 #if PAN_ARCH <= 5
95 static inline void
pan_shader_prepare_midgard_rsd(const struct pan_shader_info * info,struct MALI_RENDERER_STATE * rsd)96 pan_shader_prepare_midgard_rsd(const struct pan_shader_info *info,
97 struct MALI_RENDERER_STATE *rsd)
98 {
99 assert((info->push.count & 3) == 0);
100
101 rsd->properties.uniform_count = info->push.count / 4;
102 rsd->properties.shader_has_side_effects = info->writes_global;
103 rsd->properties.fp_mode = MALI_FP_MODE_GL_INF_NAN_ALLOWED;
104
105 /* For fragment shaders, work register count, early-z, reads at draw-time */
106
107 if (info->stage != MESA_SHADER_FRAGMENT) {
108 rsd->properties.work_register_count = info->work_reg_count;
109 } else {
110 rsd->properties.shader_reads_tilebuffer = info->fs.outputs_read;
111
112 /* However, forcing early-z in the shader overrides draw-time */
113 rsd->properties.force_early_z = info->fs.early_fragment_tests;
114 }
115 }
116
117 #else
118
119 #define pan_preloads(reg) (preload & BITFIELD64_BIT(reg))
120
121 static void
pan_make_preload(gl_shader_stage stage,uint64_t preload,struct MALI_PRELOAD * out)122 pan_make_preload(gl_shader_stage stage, uint64_t preload,
123 struct MALI_PRELOAD *out)
124 {
125 switch (stage) {
126 case MESA_SHADER_VERTEX:
127 out->vertex.position_result_address_lo = pan_preloads(58);
128 out->vertex.position_result_address_hi = pan_preloads(59);
129 out->vertex.vertex_id = pan_preloads(61);
130 out->vertex.instance_id = pan_preloads(62);
131 break;
132
133 case MESA_SHADER_FRAGMENT:
134 out->fragment.primitive_id = pan_preloads(57);
135 out->fragment.primitive_flags = pan_preloads(58);
136 out->fragment.fragment_position = pan_preloads(59);
137 out->fragment.sample_mask_id = pan_preloads(61);
138 out->fragment.coverage = true;
139 break;
140
141 default:
142 out->compute.local_invocation_xy = pan_preloads(55);
143 out->compute.local_invocation_z = pan_preloads(56);
144 out->compute.work_group_x = pan_preloads(57);
145 out->compute.work_group_y = pan_preloads(58);
146 out->compute.work_group_z = pan_preloads(59);
147 out->compute.global_invocation_x = pan_preloads(60);
148 out->compute.global_invocation_y = pan_preloads(61);
149 out->compute.global_invocation_z = pan_preloads(62);
150 break;
151 }
152 }
153
154 #if PAN_ARCH == 7
155 static inline void
pan_pack_message_preload(struct MALI_MESSAGE_PRELOAD * cfg,const struct bifrost_message_preload * msg)156 pan_pack_message_preload(struct MALI_MESSAGE_PRELOAD *cfg,
157 const struct bifrost_message_preload *msg)
158 {
159 enum mali_message_preload_register_format regfmt =
160 msg->fp16 ? MALI_MESSAGE_PRELOAD_REGISTER_FORMAT_F16
161 : MALI_MESSAGE_PRELOAD_REGISTER_FORMAT_F32;
162
163 if (msg->enabled && msg->texture) {
164 cfg->type = MALI_MESSAGE_TYPE_VAR_TEX;
165 cfg->var_tex.varying_index = msg->varying_index;
166 cfg->var_tex.texture_index = msg->texture_index;
167 cfg->var_tex.register_format = regfmt;
168 cfg->var_tex.skip = msg->skip;
169 cfg->var_tex.zero_lod = msg->zero_lod;
170 } else if (msg->enabled) {
171 cfg->type = MALI_MESSAGE_TYPE_LD_VAR;
172 cfg->ld_var.varying_index = msg->varying_index;
173 cfg->ld_var.register_format = regfmt;
174 cfg->ld_var.num_components = msg->num_components;
175 } else {
176 cfg->type = MALI_MESSAGE_TYPE_DISABLED;
177 }
178 }
179 #endif
180
181 static inline void
pan_shader_prepare_bifrost_rsd(const struct pan_shader_info * info,struct MALI_RENDERER_STATE * rsd)182 pan_shader_prepare_bifrost_rsd(const struct pan_shader_info *info,
183 struct MALI_RENDERER_STATE *rsd)
184 {
185 unsigned fau_count = DIV_ROUND_UP(info->push.count, 2);
186 rsd->preload.uniform_count = fau_count;
187
188 #if PAN_ARCH >= 7
189 rsd->properties.shader_register_allocation =
190 pan_register_allocation(info->work_reg_count);
191 #endif
192
193 pan_make_preload(info->stage, info->preload, &rsd->preload);
194
195 if (info->stage == MESA_SHADER_FRAGMENT) {
196 rsd->properties.shader_modifies_coverage =
197 info->fs.writes_coverage || info->fs.can_discard;
198
199 rsd->properties.allow_forward_pixel_to_be_killed = !info->writes_global;
200
201 #if PAN_ARCH >= 7
202 rsd->properties.shader_wait_dependency_6 = info->bifrost.wait_6;
203 rsd->properties.shader_wait_dependency_7 = info->bifrost.wait_7;
204
205 pan_pack_message_preload(&rsd->message_preload_1,
206 &info->bifrost.messages[0]);
207 pan_pack_message_preload(&rsd->message_preload_2,
208 &info->bifrost.messages[1]);
209 #endif
210 } else if (info->stage == MESA_SHADER_VERTEX && info->vs.secondary_enable) {
211 rsd->secondary_preload.uniform_count = fau_count;
212
213 pan_make_preload(info->stage, info->vs.secondary_preload,
214 &rsd->secondary_preload);
215
216 rsd->secondary_shader = rsd->shader.shader + info->vs.secondary_offset;
217
218 #if PAN_ARCH >= 7
219 rsd->properties.secondary_shader_register_allocation =
220 pan_register_allocation(info->vs.secondary_work_reg_count);
221 #endif
222 }
223 }
224
225 #endif
226
227 static inline void
pan_shader_prepare_rsd(const struct pan_shader_info * shader_info,mali_ptr shader_ptr,struct MALI_RENDERER_STATE * rsd)228 pan_shader_prepare_rsd(const struct pan_shader_info *shader_info,
229 mali_ptr shader_ptr, struct MALI_RENDERER_STATE *rsd)
230 {
231 #if PAN_ARCH <= 5
232 shader_ptr |= shader_info->midgard.first_tag;
233 #endif
234
235 rsd->shader.shader = shader_ptr;
236 rsd->shader.attribute_count = shader_info->attribute_count;
237 rsd->shader.varying_count =
238 shader_info->varyings.input_count + shader_info->varyings.output_count;
239 rsd->shader.texture_count = shader_info->texture_count;
240 rsd->shader.sampler_count = shader_info->sampler_count;
241 rsd->properties.shader_contains_barrier = shader_info->contains_barrier;
242 rsd->properties.uniform_buffer_count = shader_info->ubo_count;
243
244 if (shader_info->stage == MESA_SHADER_FRAGMENT) {
245 rsd->properties.stencil_from_shader = shader_info->fs.writes_stencil;
246 rsd->properties.depth_source = pan_depth_source(shader_info);
247
248 /* This also needs to be set if the API forces per-sample
249 * shading, but that'll just got ORed in */
250 rsd->multisample_misc.evaluate_per_sample =
251 shader_info->fs.sample_shading;
252 }
253
254 #if PAN_ARCH >= 6
255 pan_shader_prepare_bifrost_rsd(shader_info, rsd);
256 #else
257 pan_shader_prepare_midgard_rsd(shader_info, rsd);
258 #endif
259 }
260 #endif /* PAN_ARCH */
261 #endif
262
263 #endif
264