1 /*
2 * Copyright © 2014-2017 Broadcom
3 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #ifndef V3D_CONTEXT_H
26 #define V3D_CONTEXT_H
27
28 #ifdef V3D_VERSION
29 #include "broadcom/common/v3d_macros.h"
30 #endif
31
32 #include <stdio.h>
33
34 #include "pipe/p_context.h"
35 #include "pipe/p_state.h"
36 #include "util/bitset.h"
37 #include "util/slab.h"
38 #include "xf86drm.h"
39 #include "drm-uapi/v3d_drm.h"
40 #include "v3d_screen.h"
41 #include "broadcom/common/v3d_limits.h"
42
43 #include "broadcom/simulator/v3d_simulator.h"
44 #include "broadcom/compiler/v3d_compiler.h"
45
46 struct v3d_job;
47 struct v3d_bo;
48 void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
49
50 #include "v3d_bufmgr.h"
51 #include "v3d_resource.h"
52 #include "v3d_cl.h"
53
54 #ifdef USE_V3D_SIMULATOR
55 #define using_v3d_simulator true
56 #else
57 #define using_v3d_simulator false
58 #endif
59
60 #define V3D_DIRTY_BLEND (1ull << 0)
61 #define V3D_DIRTY_RASTERIZER (1ull << 1)
62 #define V3D_DIRTY_ZSA (1ull << 2)
63 #define V3D_DIRTY_COMPTEX (1ull << 3)
64 #define V3D_DIRTY_VERTTEX (1ull << 4)
65 #define V3D_DIRTY_GEOMTEX (1ull << 5)
66 #define V3D_DIRTY_FRAGTEX (1ull << 6)
67
68 #define V3D_DIRTY_SHADER_IMAGE (1ull << 9)
69 #define V3D_DIRTY_BLEND_COLOR (1ull << 10)
70 #define V3D_DIRTY_STENCIL_REF (1ull << 11)
71 #define V3D_DIRTY_SAMPLE_STATE (1ull << 12)
72 #define V3D_DIRTY_FRAMEBUFFER (1ull << 13)
73 #define V3D_DIRTY_STIPPLE (1ull << 14)
74 #define V3D_DIRTY_VIEWPORT (1ull << 15)
75 #define V3D_DIRTY_CONSTBUF (1ull << 16)
76 #define V3D_DIRTY_VTXSTATE (1ull << 17)
77 #define V3D_DIRTY_VTXBUF (1ull << 18)
78 #define V3D_DIRTY_SCISSOR (1ull << 19)
79 #define V3D_DIRTY_FLAT_SHADE_FLAGS (1ull << 20)
80 #define V3D_DIRTY_PRIM_MODE (1ull << 21)
81 #define V3D_DIRTY_CLIP (1ull << 22)
82 #define V3D_DIRTY_UNCOMPILED_CS (1ull << 23)
83 #define V3D_DIRTY_UNCOMPILED_VS (1ull << 24)
84 #define V3D_DIRTY_UNCOMPILED_GS (1ull << 25)
85 #define V3D_DIRTY_UNCOMPILED_FS (1ull << 26)
86
87 #define V3D_DIRTY_COMPILED_CS (1ull << 29)
88 #define V3D_DIRTY_COMPILED_VS (1ull << 30)
89 #define V3D_DIRTY_COMPILED_GS_BIN (1ULL << 31)
90 #define V3D_DIRTY_COMPILED_GS (1ULL << 32)
91 #define V3D_DIRTY_COMPILED_FS (1ull << 33)
92
93 #define V3D_DIRTY_FS_INPUTS (1ull << 38)
94 #define V3D_DIRTY_GS_INPUTS (1ull << 39)
95 #define V3D_DIRTY_STREAMOUT (1ull << 40)
96 #define V3D_DIRTY_OQ (1ull << 41)
97 #define V3D_DIRTY_CENTROID_FLAGS (1ull << 42)
98 #define V3D_DIRTY_NOPERSPECTIVE_FLAGS (1ull << 43)
99 #define V3D_DIRTY_SSBO (1ull << 44)
100
101 #define V3D_MAX_FS_INPUTS 64
102
103 #define MAX_JOB_SCISSORS 16
104
105 enum v3d_sampler_state_variant {
106 V3D_SAMPLER_STATE_BORDER_0000,
107 V3D_SAMPLER_STATE_BORDER_0001,
108 V3D_SAMPLER_STATE_BORDER_1111,
109 V3D_SAMPLER_STATE_F16,
110 V3D_SAMPLER_STATE_F16_UNORM,
111 V3D_SAMPLER_STATE_F16_SNORM,
112 V3D_SAMPLER_STATE_F16_BGRA,
113 V3D_SAMPLER_STATE_F16_BGRA_UNORM,
114 V3D_SAMPLER_STATE_F16_BGRA_SNORM,
115 V3D_SAMPLER_STATE_F16_A,
116 V3D_SAMPLER_STATE_F16_A_SNORM,
117 V3D_SAMPLER_STATE_F16_A_UNORM,
118 V3D_SAMPLER_STATE_F16_LA,
119 V3D_SAMPLER_STATE_F16_LA_UNORM,
120 V3D_SAMPLER_STATE_F16_LA_SNORM,
121 V3D_SAMPLER_STATE_32,
122 V3D_SAMPLER_STATE_32_UNORM,
123 V3D_SAMPLER_STATE_32_SNORM,
124 V3D_SAMPLER_STATE_32_A,
125 V3D_SAMPLER_STATE_32_A_UNORM,
126 V3D_SAMPLER_STATE_32_A_SNORM,
127 V3D_SAMPLER_STATE_1010102U,
128 V3D_SAMPLER_STATE_16U,
129 V3D_SAMPLER_STATE_16I,
130 V3D_SAMPLER_STATE_8I,
131 V3D_SAMPLER_STATE_8U,
132
133 V3D_SAMPLER_STATE_VARIANT_COUNT,
134 };
135
136 enum v3d_flush_cond {
137 /* Flush job unless we are flushing for transform feedback, where we
138 * handle flushing in the driver via the 'Wait for TF' packet.
139 */
140 V3D_FLUSH_DEFAULT,
141 /* Always flush the job, even for cases where we would normally not
142 * do it, such as transform feedback.
143 */
144 V3D_FLUSH_ALWAYS,
145 /* Flush job if it is not the current FBO job. This is intended to
146 * skip automatic flushes of the current job for resources that we
147 * expect to be externally synchronized by the application using
148 * glMemoryBarrier(), such as SSBOs and shader images.
149 */
150 V3D_FLUSH_NOT_CURRENT_JOB,
151 };
152
153 struct v3d_sampler_view {
154 struct pipe_sampler_view base;
155 uint32_t p0;
156 uint32_t p1;
157 /* Precomputed swizzles to pass in to the shader key. */
158 uint8_t swizzle[4];
159
160 uint8_t texture_shader_state[32];
161 /* V3D 4.x: Texture state struct. */
162 struct v3d_bo *bo;
163
164 enum v3d_sampler_state_variant sampler_variant;
165
166 /* Actual texture to be read by this sampler view. May be different
167 * from base.texture in the case of having a shadow tiled copy of a
168 * raster texture.
169 */
170 struct pipe_resource *texture;
171
172 /* A serial ID used to identify cases where a new BO has been created
173 * and we need to rebind a sampler view that was created against the
174 * previous BO to to point to the new one.
175 */
176 uint32_t serial_id;
177 };
178
179 struct v3d_sampler_state {
180 struct pipe_sampler_state base;
181 uint32_t p0;
182 uint32_t p1;
183
184 /* V3D 3.x: Packed texture state. */
185 uint8_t texture_shader_state[32];
186 /* V3D 4.x: Sampler state struct. */
187 struct pipe_resource *sampler_state;
188 uint32_t sampler_state_offset[V3D_SAMPLER_STATE_VARIANT_COUNT];
189
190 bool border_color_variants;
191 };
192
193 struct v3d_texture_stateobj {
194 struct pipe_sampler_view *textures[V3D_MAX_TEXTURE_SAMPLERS];
195 unsigned num_textures;
196 struct pipe_sampler_state *samplers[V3D_MAX_TEXTURE_SAMPLERS];
197 unsigned num_samplers;
198 struct v3d_cl_reloc texture_state[V3D_MAX_TEXTURE_SAMPLERS];
199 };
200
201 struct v3d_shader_uniform_info {
202 enum quniform_contents *contents;
203 uint32_t *data;
204 uint32_t count;
205 };
206
207 struct v3d_uncompiled_shader {
208 /** A name for this program, so you can track it in shader-db output. */
209 uint32_t program_id;
210 /** How many variants of this program were compiled, for shader-db. */
211 uint32_t compiled_variant_count;
212 struct pipe_shader_state base;
213 uint32_t num_tf_outputs;
214 struct v3d_varying_slot *tf_outputs;
215 uint16_t tf_specs[16];
216 uint16_t tf_specs_psiz[16];
217 uint32_t num_tf_specs;
218
219 /* For caching */
220 unsigned char sha1[20];
221 };
222
223 struct v3d_compiled_shader {
224 struct pipe_resource *resource;
225 uint32_t offset;
226
227 union {
228 struct v3d_prog_data *base;
229 struct v3d_vs_prog_data *vs;
230 struct v3d_gs_prog_data *gs;
231 struct v3d_fs_prog_data *fs;
232 struct v3d_compute_prog_data *compute;
233 } prog_data;
234
235 /**
236 * V3D_DIRTY_* flags that, when set in v3d->dirty, mean that the
237 * uniforms have to be rewritten (and therefore the shader state
238 * reemitted).
239 */
240 uint64_t uniform_dirty_bits;
241 };
242
243 struct v3d_program_stateobj {
244 struct v3d_uncompiled_shader *bind_vs, *bind_gs, *bind_fs, *bind_compute;
245 struct v3d_compiled_shader *cs, *vs, *gs_bin, *gs, *fs, *compute;
246
247 struct hash_table *cache[MESA_SHADER_STAGES];
248
249 struct v3d_bo *spill_bo;
250 int spill_size_per_thread;
251 };
252
253 struct v3d_constbuf_stateobj {
254 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
255 uint32_t enabled_mask;
256 uint32_t dirty_mask;
257 };
258
259 struct v3d_vertexbuf_stateobj {
260 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
261 unsigned count;
262 uint32_t enabled_mask;
263 uint32_t dirty_mask;
264 };
265
266 struct v3d_vertex_stateobj {
267 struct pipe_vertex_element pipe[V3D_MAX_VS_INPUTS / 4];
268 unsigned num_elements;
269
270 uint8_t attrs[16 * (V3D_MAX_VS_INPUTS / 4)];
271 /* defaults can be NULL for some hw generation */
272 struct pipe_resource *defaults;
273 uint32_t defaults_offset;
274 };
275
276 struct v3d_stream_output_target {
277 struct pipe_stream_output_target base;
278 /* Number of transform feedback vertices written to this target */
279 uint32_t recorded_vertex_count;
280 /* Number of vertices we've written into the buffer so far */
281 uint32_t offset;
282 };
283
284 struct v3d_streamout_stateobj {
285 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
286 unsigned num_targets;
287 };
288
289 struct v3d_ssbo_stateobj {
290 struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
291 uint32_t enabled_mask;
292 };
293
294 /* Hash table key for v3d->jobs */
295 struct v3d_job_key {
296 struct pipe_surface *cbufs[V3D_MAX_DRAW_BUFFERS];
297 struct pipe_surface *zsbuf;
298 struct pipe_surface *bbuf;
299 };
300
301 enum v3d_ez_state {
302 V3D_EZ_UNDECIDED = 0,
303 V3D_EZ_GT_GE,
304 V3D_EZ_LT_LE,
305 V3D_EZ_DISABLED,
306 };
307
308 struct v3d_image_view {
309 struct pipe_image_view base;
310 /* V3D 4.x texture shader state struct */
311 struct pipe_resource *tex_state;
312 uint32_t tex_state_offset;
313 };
314
315 struct v3d_shaderimg_stateobj {
316 struct v3d_image_view si[PIPE_MAX_SHADER_IMAGES];
317 uint32_t enabled_mask;
318 };
319
320 struct v3d_perfmon_state {
321 /* The kernel perfmon id */
322 uint32_t kperfmon_id;
323 /* True if at least one job was submitted with this perfmon. */
324 bool job_submitted;
325 /* Fence to be signaled when the last job submitted with this perfmon
326 * is executed by the GPU.
327 */
328 struct v3d_fence *last_job_fence;
329 uint8_t counters[DRM_V3D_MAX_PERF_COUNTERS];
330 uint64_t values[DRM_V3D_MAX_PERF_COUNTERS];
331 };
332
333 /**
334 * A complete bin/render job.
335 *
336 * This is all of the state necessary to submit a bin/render to the kernel.
337 * We want to be able to have multiple in progress at a time, so that we don't
338 * need to flush an existing CL just to switch to rendering to a new render
339 * target (which would mean reading back from the old render target when
340 * starting to render to it again).
341 */
342 struct v3d_job {
343 struct v3d_context *v3d;
344 struct v3d_cl bcl;
345 struct v3d_cl rcl;
346 struct v3d_cl indirect;
347 struct v3d_bo *tile_alloc;
348 struct v3d_bo *tile_state;
349
350 struct drm_v3d_submit_cl submit;
351
352 /**
353 * Set of all BOs referenced by the job. This will be used for making
354 * the list of BOs that the kernel will need to have paged in to
355 * execute our job.
356 */
357 struct set *bos;
358
359 /** Sum of the sizes of the BOs referenced by the job. */
360 uint32_t referenced_size;
361
362 struct set *write_prscs;
363 struct set *tf_write_prscs;
364
365 /* Size of the submit.bo_handles array. */
366 uint32_t bo_handles_size;
367
368 /** @{
369 * Surfaces to submit rendering for.
370 * For blit operations, bbuf is the source surface, and cbufs[0] is
371 * the destination surface.
372 */
373 uint32_t nr_cbufs;
374 struct pipe_surface *cbufs[V3D_MAX_DRAW_BUFFERS];
375 struct pipe_surface *zsbuf;
376 struct pipe_surface *bbuf;
377 /** @} */
378 /** @{
379 * Bounding box of the scissor across all queued drawing.
380 *
381 * Note that the max values are exclusive.
382 */
383 uint32_t draw_min_x;
384 uint32_t draw_min_y;
385 uint32_t draw_max_x;
386 uint32_t draw_max_y;
387
388 /** @} */
389 /** @{
390 * List of scissor rects used for all queued drawing. All scissor
391 * rects will be contained in the draw_{min/max}_{x/y} bounding box.
392 *
393 * This is used as an optimization when all drawing is scissored to
394 * limit tile flushing only to tiles that intersect a scissor rect.
395 * If scissor is used together with non-scissored drawing, then
396 * the optimization is disabled.
397 */
398 struct {
399 bool disabled;
400 uint32_t count;
401 struct {
402 uint32_t min_x, min_y;
403 uint32_t max_x, max_y;
404 } rects[MAX_JOB_SCISSORS];
405 } scissor;
406
407 /** @} */
408 /** @{
409 * Width/height of the color framebuffer being rendered to,
410 * for V3D_TILE_RENDERING_MODE_CONFIG.
411 */
412 uint32_t draw_width;
413 uint32_t draw_height;
414 uint32_t num_layers;
415
416 /** @} */
417 /** @{ Tile information, depending on MSAA and float color buffer. */
418 uint32_t draw_tiles_x; /** @< Number of tiles wide for framebuffer. */
419 uint32_t draw_tiles_y; /** @< Number of tiles high for framebuffer. */
420
421 uint32_t tile_width; /** @< Width of a tile. */
422 uint32_t tile_height; /** @< Height of a tile. */
423 /** maximum internal_bpp of all color render targets. */
424 uint32_t internal_bpp;
425
426 /** Whether the current rendering is in a 4X MSAA tile buffer. */
427 bool msaa;
428 /** @} */
429
430 /* Bitmask of PIPE_CLEAR_* of buffers that were cleared before the
431 * first rendering.
432 */
433 uint32_t clear;
434 /* Bitmask of PIPE_CLEAR_* of buffers that have been read by a draw
435 * call without having been cleared first.
436 */
437 uint32_t load;
438 /* Bitmask of PIPE_CLEAR_* of buffers that have been rendered to
439 * (either clears or draws) and should be stored.
440 */
441 uint32_t store;
442 uint32_t clear_color[V3D_MAX_DRAW_BUFFERS][4];
443 float clear_z;
444 uint8_t clear_s;
445
446 /* If TLB double-buffering is enabled for this job */
447 bool double_buffer;
448
449 /**
450 * Set if some drawing (triangles, blits, or just a glClear()) has
451 * been done to the FBO, meaning that we need to
452 * DRM_IOCTL_V3D_SUBMIT_CL.
453 */
454 bool needs_flush;
455
456 /* Set if any shader has dirtied cachelines in the TMU that need to be
457 * flushed before job end.
458 */
459 bool tmu_dirty_rcl;
460
461 /**
462 * Set if a packet enabling TF has been emitted in the job (V3D 4.x).
463 */
464 bool tf_enabled;
465
466 bool needs_primitives_generated;
467
468 /**
469 * Current EZ state for drawing. Updated at the start of draw after
470 * we've decided on the shader being rendered.
471 */
472 enum v3d_ez_state ez_state;
473 /**
474 * The first EZ state that was used for drawing with a decided EZ
475 * direction (so either UNDECIDED, GT, or LT).
476 */
477 enum v3d_ez_state first_ez_state;
478
479 /**
480 * If we have already decided if we need to disable early Z/S
481 * completely for this job.
482 */
483 bool decided_global_ez_enable;
484
485 /**
486 * If this job has been configured to use early Z/S clear.
487 */
488 bool early_zs_clear;
489
490 /**
491 * Number of draw calls (not counting full buffer clears) queued in
492 * the current job.
493 */
494 uint32_t draw_calls_queued;
495
496 /**
497 * Number of draw calls (not counting full buffer clears) queued in
498 * the current job during active transform feedback.
499 */
500 uint32_t tf_draw_calls_queued;
501
502 struct v3d_job_key key;
503 };
504
505 struct v3d_context {
506 struct pipe_context base;
507
508 int fd;
509 struct v3d_screen *screen;
510
511 /** The 3D rendering job for the currently bound FBO. */
512 struct v3d_job *job;
513
514 /* Map from struct v3d_job_key to the job for that FBO.
515 */
516 struct hash_table *jobs;
517
518 /**
519 * Map from v3d_resource to a job writing to that resource.
520 *
521 * Primarily for flushing jobs rendering to textures that are now
522 * being read from.
523 */
524 struct hash_table *write_jobs;
525
526 struct slab_child_pool transfer_pool;
527 struct blitter_context *blitter;
528
529 /** bitfield of V3D_DIRTY_* */
530 uint64_t dirty;
531
532 uint32_t next_uncompiled_program_id;
533 uint64_t next_compiled_program_id;
534
535 struct v3d_compiler_state *compiler_state;
536
537 uint8_t prim_mode;
538
539 /** Maximum index buffer valid for the current shader_rec. */
540 uint32_t max_index;
541
542 /** Sync object that our RCL or TFU job will update as its out_sync. */
543 uint32_t out_sync;
544
545 /* Stream uploader used by gallium internals. This could also be used
546 * by driver internals, but we tend to use the v3d_cl.h interfaces
547 * instead.
548 */
549 struct u_upload_mgr *uploader;
550 /* State uploader used inside the driver. This is for packing bits of
551 * long-term state inside buffers, since the kernel interfaces
552 * allocate a page at a time.
553 */
554 struct u_upload_mgr *state_uploader;
555
556 struct pipe_shader_state *sand8_blit_vs;
557 struct pipe_shader_state *sand8_blit_fs_luma;
558 struct pipe_shader_state *sand8_blit_fs_chroma;
559 struct pipe_shader_state *sand30_blit_vs;
560 struct pipe_shader_state *sand30_blit_fs;
561
562 /** @{ Current pipeline state objects */
563 struct pipe_scissor_state scissor;
564 struct v3d_blend_state *blend;
565 struct v3d_rasterizer_state *rasterizer;
566 struct v3d_depth_stencil_alpha_state *zsa;
567
568 struct v3d_program_stateobj prog;
569 uint32_t compute_num_workgroups[3];
570 struct v3d_bo *compute_shared_memory;
571
572 struct v3d_vertex_stateobj *vtx;
573
574 struct {
575 struct pipe_blend_color f;
576 uint16_t hf[4];
577 } blend_color;
578 struct pipe_stencil_ref stencil_ref;
579 unsigned sample_mask;
580 struct pipe_framebuffer_state framebuffer;
581
582 /* Per render target, whether we should swap the R and B fields in the
583 * shader's color output and in blending. If render targets disagree
584 * on the R/B swap and use the constant color, then we would need to
585 * fall back to in-shader blending.
586 */
587 uint8_t swap_color_rb;
588
589 /* Per render target, whether we should treat the dst alpha values as
590 * one in blending.
591 *
592 * For RGBX formats, the tile buffer's alpha channel will be
593 * undefined.
594 */
595 uint8_t blend_dst_alpha_one;
596
597 bool active_queries;
598
599 /**
600 * If a compute job writes a resource read by a non-compute stage we
601 * should sync on the last compute job.
602 */
603 bool sync_on_last_compute_job;
604
605 uint32_t tf_prims_generated;
606 uint32_t prims_generated;
607 bool prim_restart;
608
609 uint32_t n_primitives_generated_queries_in_flight;
610
611 struct pipe_poly_stipple stipple;
612 struct pipe_clip_state clip;
613 struct pipe_viewport_state viewport;
614 struct v3d_ssbo_stateobj ssbo[PIPE_SHADER_TYPES];
615 struct v3d_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
616 struct v3d_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
617 struct v3d_texture_stateobj tex[PIPE_SHADER_TYPES];
618 struct v3d_vertexbuf_stateobj vertexbuf;
619 struct v3d_streamout_stateobj streamout;
620 struct v3d_bo *current_oq;
621 struct pipe_resource *prim_counts;
622 uint32_t prim_counts_offset;
623 struct v3d_perfmon_state *active_perfmon;
624 struct v3d_perfmon_state *last_perfmon;
625
626 struct pipe_query *cond_query;
627 bool cond_cond;
628 enum pipe_render_cond_flag cond_mode;
629
630 int in_fence_fd;
631 /** Handle of the syncobj that holds in_fence_fd for submission. */
632 uint32_t in_syncobj;
633 /** @} */
634 };
635
636 struct v3d_rasterizer_state {
637 struct pipe_rasterizer_state base;
638
639 float point_size;
640
641 uint8_t depth_offset[9];
642 uint8_t depth_offset_z16[9];
643 };
644
645 struct v3d_depth_stencil_alpha_state {
646 struct pipe_depth_stencil_alpha_state base;
647
648 enum v3d_ez_state ez_state;
649
650 uint8_t stencil_front[6];
651 uint8_t stencil_back[6];
652 };
653
654 struct v3d_blend_state {
655 struct pipe_blend_state base;
656
657 /* Per-RT mask of whether blending is enabled. */
658 uint8_t blend_enables;
659 };
660
661 #define perf_debug(...) do { \
662 if (V3D_DBG(PERF)) \
663 fprintf(stderr, __VA_ARGS__); \
664 if (unlikely(v3d->base.debug.debug_message)) \
665 util_debug_message(&v3d->base.debug, PERF_INFO, __VA_ARGS__); \
666 } while (0)
667
668 static inline struct v3d_context *
v3d_context(struct pipe_context * pcontext)669 v3d_context(struct pipe_context *pcontext)
670 {
671 return (struct v3d_context *)pcontext;
672 }
673
674 static inline struct v3d_sampler_view *
v3d_sampler_view(struct pipe_sampler_view * psview)675 v3d_sampler_view(struct pipe_sampler_view *psview)
676 {
677 return (struct v3d_sampler_view *)psview;
678 }
679
680 static inline struct v3d_sampler_state *
v3d_sampler_state(struct pipe_sampler_state * psampler)681 v3d_sampler_state(struct pipe_sampler_state *psampler)
682 {
683 return (struct v3d_sampler_state *)psampler;
684 }
685
686 static inline struct v3d_stream_output_target *
v3d_stream_output_target(struct pipe_stream_output_target * ptarget)687 v3d_stream_output_target(struct pipe_stream_output_target *ptarget)
688 {
689 return (struct v3d_stream_output_target *)ptarget;
690 }
691
692 static inline uint32_t
v3d_stream_output_target_get_vertex_count(struct pipe_stream_output_target * ptarget)693 v3d_stream_output_target_get_vertex_count(struct pipe_stream_output_target *ptarget)
694 {
695 return v3d_stream_output_target(ptarget)->recorded_vertex_count;
696 }
697
698 int v3d_get_driver_query_group_info(struct pipe_screen *pscreen,
699 unsigned index,
700 struct pipe_driver_query_group_info *info);
701 int v3d_get_driver_query_info(struct pipe_screen *pscreen, unsigned index,
702 struct pipe_driver_query_info *info);
703
704 struct pipe_context *v3d_context_create(struct pipe_screen *pscreen,
705 void *priv, unsigned flags);
706 void v3d_program_init(struct pipe_context *pctx);
707 void v3d_program_fini(struct pipe_context *pctx);
708 void v3d_query_init(struct pipe_context *pctx);
709
710 static inline int
v3d_ioctl(int fd,unsigned long request,void * arg)711 v3d_ioctl(int fd, unsigned long request, void *arg)
712 {
713 if (using_v3d_simulator)
714 return v3d_simulator_ioctl(fd, request, arg);
715 else
716 return drmIoctl(fd, request, arg);
717 }
718
719 static inline bool
v3d_transform_feedback_enabled(struct v3d_context * v3d)720 v3d_transform_feedback_enabled(struct v3d_context *v3d)
721 {
722 return (v3d->prog.bind_vs->num_tf_specs != 0 ||
723 (v3d->prog.bind_gs && v3d->prog.bind_gs->num_tf_specs != 0)) &&
724 v3d->active_queries;
725 }
726
727 void v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader);
728 struct v3d_cl_reloc v3d_write_uniforms(struct v3d_context *v3d,
729 struct v3d_job *job,
730 struct v3d_compiled_shader *shader,
731 enum pipe_shader_type stage);
732
733 void v3d_flush(struct pipe_context *pctx);
734 void v3d_job_init(struct v3d_context *v3d);
735 struct v3d_job *v3d_job_create(struct v3d_context *v3d);
736 void v3d_job_free(struct v3d_context *v3d, struct v3d_job *job);
737 struct v3d_job *v3d_get_job(struct v3d_context *v3d,
738 uint32_t nr_cbufs,
739 struct pipe_surface **cbufs,
740 struct pipe_surface *zsbuf,
741 struct pipe_surface *bbuf);
742 struct v3d_job *v3d_get_job_for_fbo(struct v3d_context *v3d);
743 void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
744 void v3d_job_add_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
745 void v3d_job_add_tf_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
746 void v3d_job_submit(struct v3d_context *v3d, struct v3d_job *job);
747 void v3d_flush_jobs_using_bo(struct v3d_context *v3d, struct v3d_bo *bo);
748 void v3d_flush_jobs_writing_resource(struct v3d_context *v3d,
749 struct pipe_resource *prsc,
750 enum v3d_flush_cond flush_cond,
751 bool is_compute_pipeline);
752 void v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
753 struct pipe_resource *prsc,
754 enum v3d_flush_cond flush_cond,
755 bool is_compute_pipeline);
756 void v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode);
757 void v3d_update_compiled_cs(struct v3d_context *v3d);
758
759 bool v3d_rt_format_supported(const struct v3d_device_info *devinfo,
760 enum pipe_format f);
761 bool v3d_tex_format_supported(const struct v3d_device_info *devinfo,
762 enum pipe_format f);
763 uint8_t v3d_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f);
764 uint8_t v3d_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f);
765 uint8_t v3d_get_tex_return_size(const struct v3d_device_info *devinfo,
766 enum pipe_format f);
767 uint8_t v3d_get_tex_return_channels(const struct v3d_device_info *devinfo,
768 enum pipe_format f);
769 const uint8_t *v3d_get_format_swizzle(const struct v3d_device_info *devinfo,
770 enum pipe_format f);
771 bool v3d_format_supports_tlb_msaa_resolve(const struct v3d_device_info *devinfo,
772 enum pipe_format f);
773
774 void v3d_init_query_functions(struct v3d_context *v3d);
775 void v3d_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
776 void v3d_blitter_save(struct v3d_context *v3d, bool op_blit, bool render_cond);
777 bool v3d_generate_mipmap(struct pipe_context *pctx,
778 struct pipe_resource *prsc,
779 enum pipe_format format,
780 unsigned int base_level,
781 unsigned int last_level,
782 unsigned int first_layer,
783 unsigned int last_layer);
784
785 void
786 v3d_fence_unreference(struct v3d_fence **fence);
787
788 struct v3d_fence *v3d_fence_create(struct v3d_context *v3d, int fd);
789
790 bool v3d_fence_wait(struct v3d_screen *screen,
791 struct v3d_fence *fence,
792 uint64_t timeout_ns);
793
794 int v3d_fence_context_init(struct v3d_context *v3d);
795 void v3d_fence_context_finish(struct v3d_context *v3d);
796
797 void v3d_update_primitive_counters(struct v3d_context *v3d);
798
799 bool v3d_line_smoothing_enabled(struct v3d_context *v3d);
800
801 float v3d_get_real_line_width(struct v3d_context *v3d);
802
803 void v3d_ensure_prim_counts_allocated(struct v3d_context *ctx);
804
805 void v3d_flag_dirty_sampler_state(struct v3d_context *v3d,
806 enum pipe_shader_type shader);
807
808 void v3d_get_tile_buffer_size(const struct v3d_device_info *devinfo,
809 bool is_msaa,
810 bool double_buffer,
811 uint32_t nr_cbufs,
812 struct pipe_surface **cbufs,
813 struct pipe_surface *bbuf,
814 uint32_t *tile_width,
815 uint32_t *tile_height,
816 uint32_t *max_bpp);
817
818 bool v3d_render_condition_check(struct v3d_context *v3d);
819
820 #ifdef ENABLE_SHADER_CACHE
821 struct v3d_compiled_shader *v3d_disk_cache_retrieve(struct v3d_context *v3d,
822 const struct v3d_key *key,
823 const struct v3d_uncompiled_shader *uncompiled);
824
825 void v3d_disk_cache_store(struct v3d_context *v3d,
826 const struct v3d_key *key,
827 const struct v3d_uncompiled_shader *uncompiled,
828 const struct v3d_compiled_shader *shader,
829 uint64_t *qpu_insts,
830 uint32_t qpu_size);
831 #endif /* ENABLE_SHADER_CACHE */
832
833 /* Helper to call hw ver specific functions */
834 #define v3d_X(devinfo, thing) ({ \
835 __typeof(&v3d42_##thing) v3d_X_thing; \
836 switch (devinfo->ver) { \
837 case 42: \
838 v3d_X_thing = &v3d42_##thing; \
839 break; \
840 case 71: \
841 v3d_X_thing = &v3d71_##thing; \
842 break; \
843 default: \
844 unreachable("Unsupported hardware generation"); \
845 } \
846 v3d_X_thing; \
847 })
848
849 /* FIXME: The same for vulkan/opengl. Common place? define it at the
850 * v3d_packet files?
851 */
852 #define V3D42_CLIPPER_XY_GRANULARITY 256.0f
853 #define V3D71_CLIPPER_XY_GRANULARITY 64.0f
854
855 /* Helper to get hw-specific macro values */
856 #define V3DV_X(devinfo, thing) ({ \
857 __typeof(V3D42_##thing) V3D_X_THING; \
858 switch (devinfo->ver) { \
859 case 42: \
860 V3D_X_THING = V3D42_##thing; \
861 break; \
862 case 71: \
863 V3D_X_THING = V3D71_##thing; \
864 break; \
865 default: \
866 unreachable("Unsupported hardware generation"); \
867 } \
868 V3D_X_THING; \
869 })
870
871 #ifdef v3dX
872 # include "v3dx_context.h"
873 #else
874 # define v3dX(x) v3d42_##x
875 # include "v3dx_context.h"
876 # undef v3dX
877
878 # define v3dX(x) v3d71_##x
879 # include "v3dx_context.h"
880 # undef v3dX
881 #endif
882
883 #endif /* V3D_CONTEXT_H */
884