1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/u_pack_color.h"
25 #include "util/u_upload_mgr.h"
26
27 #include "v3d_context.h"
28 #include "compiler/v3d_compiler.h"
29
30 /* We don't expect that the packets we use in this file change across across
31 * hw versions, so we just include directly the v42 header
32 */
33 #include "broadcom/cle/v3d_packet_v42_pack.h"
34
35 static uint32_t
get_texrect_scale(struct v3d_texture_stateobj * texstate,enum quniform_contents contents,uint32_t data)36 get_texrect_scale(struct v3d_texture_stateobj *texstate,
37 enum quniform_contents contents,
38 uint32_t data)
39 {
40 struct pipe_sampler_view *texture = texstate->textures[data];
41 uint32_t dim;
42
43 if (contents == QUNIFORM_TEXRECT_SCALE_X)
44 dim = texture->texture->width0;
45 else
46 dim = texture->texture->height0;
47
48 return fui(1.0f / dim);
49 }
50
51 static uint32_t
get_texture_size(struct v3d_texture_stateobj * texstate,enum quniform_contents contents,uint32_t data)52 get_texture_size(struct v3d_texture_stateobj *texstate,
53 enum quniform_contents contents,
54 uint32_t data)
55 {
56 struct pipe_sampler_view *texture = texstate->textures[data];
57 switch (contents) {
58 case QUNIFORM_TEXTURE_WIDTH:
59 if (texture->target == PIPE_BUFFER) {
60 return texture->u.buf.size /
61 util_format_get_blocksize(texture->format);
62 } else {
63 return u_minify(texture->texture->width0,
64 texture->u.tex.first_level);
65 }
66 case QUNIFORM_TEXTURE_HEIGHT:
67 return u_minify(texture->texture->height0,
68 texture->u.tex.first_level);
69 case QUNIFORM_TEXTURE_DEPTH:
70 assert(texture->target != PIPE_BUFFER);
71 return u_minify(texture->texture->depth0,
72 texture->u.tex.first_level);
73 case QUNIFORM_TEXTURE_ARRAY_SIZE:
74 assert(texture->target != PIPE_BUFFER);
75 if (texture->target != PIPE_TEXTURE_CUBE_ARRAY) {
76 return texture->texture->array_size;
77 } else {
78 assert(texture->texture->array_size % 6 == 0);
79 return texture->texture->array_size / 6;
80 }
81 case QUNIFORM_TEXTURE_LEVELS:
82 assert(texture->target != PIPE_BUFFER);
83 return (texture->u.tex.last_level -
84 texture->u.tex.first_level) + 1;
85 default:
86 unreachable("Bad texture size field");
87 }
88 }
89
90 static uint32_t
get_image_size(struct v3d_shaderimg_stateobj * shaderimg,enum quniform_contents contents,uint32_t data)91 get_image_size(struct v3d_shaderimg_stateobj *shaderimg,
92 enum quniform_contents contents,
93 uint32_t data)
94 {
95 struct v3d_image_view *image = &shaderimg->si[data];
96
97 switch (contents) {
98 case QUNIFORM_IMAGE_WIDTH:
99 if (image->base.resource->target == PIPE_BUFFER) {
100 return image->base.u.buf.size /
101 util_format_get_blocksize(image->base.format);
102 } else {
103 return u_minify(image->base.resource->width0,
104 image->base.u.tex.level);
105 }
106 case QUNIFORM_IMAGE_HEIGHT:
107 assert(image->base.resource->target != PIPE_BUFFER);
108 return u_minify(image->base.resource->height0,
109 image->base.u.tex.level);
110 case QUNIFORM_IMAGE_DEPTH:
111 assert(image->base.resource->target != PIPE_BUFFER);
112 return u_minify(image->base.resource->depth0,
113 image->base.u.tex.level);
114 case QUNIFORM_IMAGE_ARRAY_SIZE:
115 assert(image->base.resource->target != PIPE_BUFFER);
116 if (image->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY) {
117 return image->base.resource->array_size;
118 } else {
119 assert(image->base.resource->array_size % 6 == 0);
120 return image->base.resource->array_size / 6;
121 }
122 default:
123 unreachable("Bad texture size field");
124 }
125 }
126
127 /** Writes the V3D 4.x TMU configuration parameter 0. */
128 static void
write_tmu_p0(struct v3d_job * job,struct v3d_cl_out ** uniforms,struct v3d_texture_stateobj * texstate,uint32_t data)129 write_tmu_p0(struct v3d_job *job,
130 struct v3d_cl_out **uniforms,
131 struct v3d_texture_stateobj *texstate,
132 uint32_t data)
133 {
134 int unit = v3d_unit_data_get_unit(data);
135 struct pipe_sampler_view *psview = texstate->textures[unit];
136 struct v3d_sampler_view *sview = v3d_sampler_view(psview);
137 /* GL_OES_texture_buffer spec:
138 * "If no buffer object is bound to the buffer texture, the
139 * results of the texel access are undefined."
140 *
141 * This can be interpreted as allowing any result to come back, but
142 * not terminate the program (and some tests interpret that).
143 *
144 * FIXME: just return is not a full valid solution, as it could still
145 * try to get a wrong address for the shader state address. Perhaps we
146 * would need to set up a BO with a "default texture state"
147 */
148 if (sview == NULL)
149 return;
150
151 struct v3d_resource *rsc = v3d_resource(sview->texture);
152
153 cl_aligned_reloc(&job->indirect, uniforms, sview->bo,
154 v3d_unit_data_get_offset(data));
155 v3d_job_add_bo(job, rsc->bo);
156 }
157
158 static void
write_image_tmu_p0(struct v3d_job * job,struct v3d_cl_out ** uniforms,struct v3d_shaderimg_stateobj * img,uint32_t data)159 write_image_tmu_p0(struct v3d_job *job,
160 struct v3d_cl_out **uniforms,
161 struct v3d_shaderimg_stateobj *img,
162 uint32_t data)
163 {
164 /* Extract the image unit from the top bits, and the compiler's
165 * packed p0 from the bottom.
166 */
167 uint32_t unit = data >> 24;
168 uint32_t p0 = data & 0x00ffffff;
169
170 struct v3d_image_view *iview = &img->si[unit];
171 struct v3d_resource *rsc = v3d_resource(iview->base.resource);
172
173 cl_aligned_reloc(&job->indirect, uniforms,
174 v3d_resource(iview->tex_state)->bo,
175 iview->tex_state_offset | p0);
176 v3d_job_add_bo(job, rsc->bo);
177 }
178
179 /** Writes the V3D 4.x TMU configuration parameter 1. */
180 static void
write_tmu_p1(struct v3d_job * job,struct v3d_cl_out ** uniforms,struct v3d_texture_stateobj * texstate,uint32_t data)181 write_tmu_p1(struct v3d_job *job,
182 struct v3d_cl_out **uniforms,
183 struct v3d_texture_stateobj *texstate,
184 uint32_t data)
185 {
186 uint32_t unit = v3d_unit_data_get_unit(data);
187 struct pipe_sampler_state *psampler = texstate->samplers[unit];
188 struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
189 struct pipe_sampler_view *psview = texstate->textures[unit];
190 struct v3d_sampler_view *sview = v3d_sampler_view(psview);
191 int variant = 0;
192
193 /* If we are being asked by the compiler to write parameter 1, then we
194 * need that. So if we are at this point, we should expect to have a
195 * sampler and psampler. As an additional assert, we can check that we
196 * are not on a texel buffer case, as these don't have a sampler.
197 */
198 assert(psview->target != PIPE_BUFFER);
199 assert(sampler);
200 assert(psampler);
201
202 if (sampler->border_color_variants)
203 variant = sview->sampler_variant;
204
205 cl_aligned_reloc(&job->indirect, uniforms,
206 v3d_resource(sampler->sampler_state)->bo,
207 sampler->sampler_state_offset[variant] |
208 v3d_unit_data_get_offset(data));
209 }
210
211 struct v3d_cl_reloc
v3d_write_uniforms(struct v3d_context * v3d,struct v3d_job * job,struct v3d_compiled_shader * shader,enum pipe_shader_type stage)212 v3d_write_uniforms(struct v3d_context *v3d, struct v3d_job *job,
213 struct v3d_compiled_shader *shader,
214 enum pipe_shader_type stage)
215 {
216 struct v3d_device_info *devinfo = &v3d->screen->devinfo;
217 struct v3d_constbuf_stateobj *cb = &v3d->constbuf[stage];
218 struct v3d_texture_stateobj *texstate = &v3d->tex[stage];
219 struct v3d_uniform_list *uinfo = &shader->prog_data.base->uniforms;
220 const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
221
222 /* The hardware always pre-fetches the next uniform (also when there
223 * aren't any), so we always allocate space for an extra slot. This
224 * fixes MMU exceptions reported since Linux kernel 5.4 when the
225 * uniforms fill up the tail bytes of a page in the indirect
226 * BO. In that scenario, when the hardware pre-fetches after reading
227 * the last uniform it will read beyond the end of the page and trigger
228 * the MMU exception.
229 */
230 v3d_cl_ensure_space(&job->indirect, (uinfo->count + 1) * 4, 4);
231
232 struct v3d_cl_reloc uniform_stream = cl_get_address(&job->indirect);
233 v3d_bo_reference(uniform_stream.bo);
234
235 struct v3d_cl_out *uniforms =
236 cl_start(&job->indirect);
237
238 for (int i = 0; i < uinfo->count; i++) {
239 uint32_t data = uinfo->data[i];
240
241 switch (uinfo->contents[i]) {
242 case QUNIFORM_CONSTANT:
243 cl_aligned_u32(&uniforms, data);
244 break;
245 case QUNIFORM_UNIFORM:
246 cl_aligned_u32(&uniforms, gallium_uniforms[data]);
247 break;
248 case QUNIFORM_VIEWPORT_X_SCALE: {
249 float clipper_xy_granularity = V3DV_X(devinfo, CLIPPER_XY_GRANULARITY);
250 cl_aligned_f(&uniforms, v3d->viewport.scale[0] * clipper_xy_granularity);
251 break;
252 }
253 case QUNIFORM_VIEWPORT_Y_SCALE: {
254 float clipper_xy_granularity = V3DV_X(devinfo, CLIPPER_XY_GRANULARITY);
255 cl_aligned_f(&uniforms, v3d->viewport.scale[1] * clipper_xy_granularity);
256 break;
257 }
258 case QUNIFORM_VIEWPORT_Z_OFFSET:
259 cl_aligned_f(&uniforms, v3d->viewport.translate[2]);
260 break;
261 case QUNIFORM_VIEWPORT_Z_SCALE:
262 cl_aligned_f(&uniforms, v3d->viewport.scale[2]);
263 break;
264
265 case QUNIFORM_USER_CLIP_PLANE:
266 cl_aligned_f(&uniforms,
267 v3d->clip.ucp[data / 4][data % 4]);
268 break;
269
270 case QUNIFORM_TMU_CONFIG_P0:
271 write_tmu_p0(job, &uniforms, texstate, data);
272 break;
273
274 case QUNIFORM_TMU_CONFIG_P1:
275 write_tmu_p1(job, &uniforms, texstate, data);
276 break;
277
278 case QUNIFORM_IMAGE_TMU_CONFIG_P0:
279 write_image_tmu_p0(job, &uniforms,
280 &v3d->shaderimg[stage], data);
281 break;
282
283 case QUNIFORM_TEXRECT_SCALE_X:
284 case QUNIFORM_TEXRECT_SCALE_Y:
285 cl_aligned_u32(&uniforms,
286 get_texrect_scale(texstate,
287 uinfo->contents[i],
288 data));
289 break;
290
291 case QUNIFORM_TEXTURE_WIDTH:
292 case QUNIFORM_TEXTURE_HEIGHT:
293 case QUNIFORM_TEXTURE_DEPTH:
294 case QUNIFORM_TEXTURE_ARRAY_SIZE:
295 case QUNIFORM_TEXTURE_LEVELS:
296 cl_aligned_u32(&uniforms,
297 get_texture_size(texstate,
298 uinfo->contents[i],
299 data));
300 break;
301
302 case QUNIFORM_IMAGE_WIDTH:
303 case QUNIFORM_IMAGE_HEIGHT:
304 case QUNIFORM_IMAGE_DEPTH:
305 case QUNIFORM_IMAGE_ARRAY_SIZE:
306 cl_aligned_u32(&uniforms,
307 get_image_size(&v3d->shaderimg[stage],
308 uinfo->contents[i],
309 data));
310 break;
311
312 case QUNIFORM_LINE_WIDTH:
313 cl_aligned_f(&uniforms,
314 v3d->rasterizer->base.line_width);
315 break;
316
317 case QUNIFORM_AA_LINE_WIDTH:
318 cl_aligned_f(&uniforms, v3d_get_real_line_width(v3d));
319 break;
320
321 case QUNIFORM_UBO_ADDR: {
322 uint32_t unit = v3d_unit_data_get_unit(data);
323 /* Constant buffer 0 may be a system memory pointer,
324 * in which case we want to upload a shadow copy to
325 * the GPU.
326 */
327 if (!cb->cb[unit].buffer) {
328 u_upload_data(v3d->uploader, 0,
329 cb->cb[unit].buffer_size, 16,
330 cb->cb[unit].user_buffer,
331 &cb->cb[unit].buffer_offset,
332 &cb->cb[unit].buffer);
333 }
334
335 cl_aligned_reloc(&job->indirect, &uniforms,
336 v3d_resource(cb->cb[unit].buffer)->bo,
337 cb->cb[unit].buffer_offset +
338 v3d_unit_data_get_offset(data));
339 break;
340 }
341
342 case QUNIFORM_SSBO_OFFSET: {
343 struct pipe_shader_buffer *sb =
344 &v3d->ssbo[stage].sb[data];
345
346 cl_aligned_reloc(&job->indirect, &uniforms,
347 v3d_resource(sb->buffer)->bo,
348 sb->buffer_offset);
349 break;
350 }
351
352 case QUNIFORM_GET_SSBO_SIZE:
353 cl_aligned_u32(&uniforms,
354 v3d->ssbo[stage].sb[data].buffer_size);
355 break;
356
357 case QUNIFORM_TEXTURE_FIRST_LEVEL:
358 cl_aligned_f(&uniforms,
359 texstate->textures[data]->u.tex.first_level);
360 break;
361
362 case QUNIFORM_SPILL_OFFSET:
363 cl_aligned_reloc(&job->indirect, &uniforms,
364 v3d->prog.spill_bo, 0);
365 break;
366
367 case QUNIFORM_SPILL_SIZE_PER_THREAD:
368 cl_aligned_u32(&uniforms,
369 v3d->prog.spill_size_per_thread);
370 break;
371
372 case QUNIFORM_NUM_WORK_GROUPS:
373 cl_aligned_u32(&uniforms,
374 v3d->compute_num_workgroups[data]);
375 break;
376
377 case QUNIFORM_SHARED_OFFSET:
378 cl_aligned_reloc(&job->indirect, &uniforms,
379 v3d->compute_shared_memory, 0);
380 break;
381
382 case QUNIFORM_FB_LAYERS:
383 cl_aligned_u32(&uniforms, job->num_layers);
384 break;
385
386 default:
387 unreachable("Unknown QUNIFORM");
388
389 }
390 #if 0
391 uint32_t written_val = *((uint32_t *)uniforms - 1);
392 fprintf(stderr, "shader %p[%d]: 0x%08x / 0x%08x (%f) ",
393 shader, i, __gen_address_offset(&uniform_stream) + i * 4,
394 written_val, uif(written_val));
395 vir_dump_uniform(uinfo->contents[i], data);
396 fprintf(stderr, "\n");
397 #endif
398 }
399
400 cl_end(&job->indirect, uniforms);
401
402 return uniform_stream;
403 }
404
405 void
v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader * shader)406 v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
407 {
408 uint64_t dirty = 0;
409
410 for (int i = 0; i < shader->prog_data.base->uniforms.count; i++) {
411 switch (shader->prog_data.base->uniforms.contents[i]) {
412 case QUNIFORM_CONSTANT:
413 break;
414 case QUNIFORM_UNIFORM:
415 case QUNIFORM_UBO_ADDR:
416 dirty |= V3D_DIRTY_CONSTBUF;
417 break;
418
419 case QUNIFORM_VIEWPORT_X_SCALE:
420 case QUNIFORM_VIEWPORT_Y_SCALE:
421 case QUNIFORM_VIEWPORT_Z_OFFSET:
422 case QUNIFORM_VIEWPORT_Z_SCALE:
423 dirty |= V3D_DIRTY_VIEWPORT;
424 break;
425
426 case QUNIFORM_USER_CLIP_PLANE:
427 dirty |= V3D_DIRTY_CLIP;
428 break;
429
430 case QUNIFORM_TMU_CONFIG_P0:
431 case QUNIFORM_TMU_CONFIG_P1:
432 case QUNIFORM_TEXTURE_CONFIG_P1:
433 case QUNIFORM_TEXTURE_FIRST_LEVEL:
434 case QUNIFORM_TEXRECT_SCALE_X:
435 case QUNIFORM_TEXRECT_SCALE_Y:
436 case QUNIFORM_TEXTURE_WIDTH:
437 case QUNIFORM_TEXTURE_HEIGHT:
438 case QUNIFORM_TEXTURE_DEPTH:
439 case QUNIFORM_TEXTURE_ARRAY_SIZE:
440 case QUNIFORM_TEXTURE_LEVELS:
441 case QUNIFORM_SPILL_OFFSET:
442 case QUNIFORM_SPILL_SIZE_PER_THREAD:
443 /* We could flag this on just the stage we're
444 * compiling for, but it's not passed in.
445 */
446 dirty |= V3D_DIRTY_FRAGTEX | V3D_DIRTY_VERTTEX |
447 V3D_DIRTY_GEOMTEX | V3D_DIRTY_COMPTEX;
448 break;
449
450 case QUNIFORM_SSBO_OFFSET:
451 case QUNIFORM_GET_SSBO_SIZE:
452 dirty |= V3D_DIRTY_SSBO;
453 break;
454
455 case QUNIFORM_IMAGE_TMU_CONFIG_P0:
456 case QUNIFORM_IMAGE_WIDTH:
457 case QUNIFORM_IMAGE_HEIGHT:
458 case QUNIFORM_IMAGE_DEPTH:
459 case QUNIFORM_IMAGE_ARRAY_SIZE:
460 dirty |= V3D_DIRTY_SHADER_IMAGE;
461 break;
462
463 case QUNIFORM_LINE_WIDTH:
464 case QUNIFORM_AA_LINE_WIDTH:
465 dirty |= V3D_DIRTY_RASTERIZER;
466 break;
467
468 case QUNIFORM_NUM_WORK_GROUPS:
469 case QUNIFORM_SHARED_OFFSET:
470 /* Compute always recalculates uniforms. */
471 break;
472
473 case QUNIFORM_FB_LAYERS:
474 dirty |= V3D_DIRTY_FRAMEBUFFER;
475 break;
476
477 default:
478 assert(quniform_contents_is_texture_p0(shader->prog_data.base->uniforms.contents[i]));
479 dirty |= V3D_DIRTY_FRAGTEX | V3D_DIRTY_VERTTEX |
480 V3D_DIRTY_GEOMTEX | V3D_DIRTY_COMPTEX;
481 break;
482 }
483 }
484
485 shader->uniform_dirty_bits = dirty;
486 }
487