• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3  * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4  * Copyright 2018 Advanced Micro Devices, Inc.
5  *
6  * SPDX-License-Identifier: MIT
7  */
8 
9 #ifndef RADEON_WINSYS_H
10 #define RADEON_WINSYS_H
11 
12 /* The public winsys interface header for the radeon driver. */
13 
14 /* Skip command submission. Same as RADEON_NOOP=1. */
15 #define RADEON_FLUSH_NOOP                     (1u << 29)
16 
17 /* Toggle the secure submission boolean after the flush */
18 #define RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION (1u << 30)
19 
20 /* Whether the next IB can start immediately and not wait for draws and
21  * dispatches from the current IB to finish. */
22 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW    (1u << 31)
23 
24 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW                                                   \
25    (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
26 
27 #include "amd/common/ac_gpu_info.h"
28 #include "amd/common/ac_surface.h"
29 #include "pipebuffer/pb_buffer.h"
30 
31 /* Tiling flags. */
32 enum radeon_bo_layout
33 {
34    RADEON_LAYOUT_LINEAR = 0,
35    RADEON_LAYOUT_TILED,
36    RADEON_LAYOUT_SQUARETILED,
37 
38    RADEON_LAYOUT_UNKNOWN
39 };
40 
41 enum radeon_bo_domain
42 { /* bitfield */
43   RADEON_DOMAIN_GTT = 2,
44   RADEON_DOMAIN_VRAM = 4,
45   RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
46   RADEON_DOMAIN_GDS = 8,
47   RADEON_DOMAIN_OA = 16,
48   RADEON_DOMAIN_DOORBELL = 32,
49 };
50 
51 enum radeon_bo_flag
52 { /* bitfield */
53   RADEON_FLAG_GTT_WC = (1 << 0),
54   RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
55   RADEON_FLAG_NO_SUBALLOC = (1 << 2),
56   RADEON_FLAG_SPARSE = (1 << 3),
57   RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
58   RADEON_FLAG_32BIT = (1 << 6),
59   RADEON_FLAG_ENCRYPTED = (1 << 7),
60   RADEON_FLAG_GL2_BYPASS = (1 << 8), /* only gfx9 and newer */
61   RADEON_FLAG_DRIVER_INTERNAL = (1 << 9),
62    /* Discard on eviction (instead of moving the buffer to GTT).
63     * This guarantees that this buffer will never be moved to GTT.
64     */
65   RADEON_FLAG_DISCARDABLE = (1 << 10),
66   RADEON_FLAG_WINSYS_SLAB_BACKING = (1 << 11), /* only used by the winsys */
67   RADEON_FLAG_GFX12_ALLOW_DCC = (1 << 12), /* allow DCC, VRAM only */
68   RADEON_FLAG_CLEAR_VRAM = (1 << 13),
69 };
70 
71 static inline void
si_res_print_flags(enum radeon_bo_flag flags)72 si_res_print_flags(enum radeon_bo_flag flags) {
73    if (flags & RADEON_FLAG_GTT_WC)
74       fprintf(stderr, "GTT_WC ");
75    if (flags & RADEON_FLAG_NO_CPU_ACCESS)
76       fprintf(stderr, "NO_CPU_ACCESS ");
77    if (flags & RADEON_FLAG_NO_SUBALLOC)
78       fprintf(stderr, "NO_SUBALLOC ");
79    if (flags & RADEON_FLAG_SPARSE)
80       fprintf(stderr, "SPARSE ");
81    if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING)
82       fprintf(stderr, "NO_INTERPROCESS_SHARING ");
83    if (flags & RADEON_FLAG_32BIT)
84       fprintf(stderr, "32BIT ");
85    if (flags & RADEON_FLAG_ENCRYPTED)
86       fprintf(stderr, "ENCRYPTED ");
87    if (flags & RADEON_FLAG_GL2_BYPASS)
88       fprintf(stderr, "GL2_BYPASS ");
89    if (flags & RADEON_FLAG_DRIVER_INTERNAL)
90       fprintf(stderr, "DRIVER_INTERNAL ");
91    if (flags & RADEON_FLAG_DISCARDABLE)
92       fprintf(stderr, "DISCARDABLE ");
93    if (flags & RADEON_FLAG_GFX12_ALLOW_DCC)
94       fprintf(stderr, "GFX12_ALLOW_DCC ");
95 }
96 
97 enum radeon_map_flags
98 {
99    /* Indicates that the caller will unmap the buffer.
100     *
101     * Not unmapping buffers is an important performance optimization for
102     * OpenGL (avoids kernel overhead for frequently mapped buffers).
103     */
104    RADEON_MAP_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
105 };
106 
107 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
108 
109 enum radeon_value_id
110 {
111    RADEON_REQUESTED_VRAM_MEMORY,
112    RADEON_REQUESTED_GTT_MEMORY,
113    RADEON_MAPPED_VRAM,
114    RADEON_MAPPED_GTT,
115    RADEON_SLAB_WASTED_VRAM,
116    RADEON_SLAB_WASTED_GTT,
117    RADEON_BUFFER_WAIT_TIME_NS,
118    RADEON_NUM_MAPPED_BUFFERS,
119    RADEON_TIMESTAMP,
120    RADEON_NUM_GFX_IBS,
121    RADEON_NUM_SDMA_IBS,
122    RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
123    RADEON_GFX_IB_SIZE_COUNTER,
124    RADEON_NUM_BYTES_MOVED,
125    RADEON_NUM_EVICTIONS,
126    RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
127    RADEON_VRAM_USAGE,
128    RADEON_VRAM_VIS_USAGE,
129    RADEON_GTT_USAGE,
130    RADEON_GPU_TEMPERATURE,
131    RADEON_CURRENT_SCLK,
132    RADEON_CURRENT_MCLK,
133    RADEON_CS_THREAD_TIME,
134 };
135 
136 enum radeon_ctx_priority
137 {
138    RADEON_CTX_PRIORITY_LOW = 0,
139    RADEON_CTX_PRIORITY_MEDIUM,
140    RADEON_CTX_PRIORITY_HIGH,
141    RADEON_CTX_PRIORITY_REALTIME,
142 };
143 
144 enum radeon_ctx_pstate
145 {
146    RADEON_CTX_PSTATE_NONE = 0,
147    RADEON_CTX_PSTATE_STANDARD,
148    RADEON_CTX_PSTATE_MIN_SCLK,
149    RADEON_CTX_PSTATE_MIN_MCLK,
150    RADEON_CTX_PSTATE_PEAK,
151 };
152 
153 
154 /* Each group of two has the same priority. */
155 #define RADEON_PRIO_FENCE_TRACE (1 << 0)
156 #define RADEON_PRIO_SO_FILLED_SIZE (1 << 1)
157 
158 #define RADEON_PRIO_QUERY (1 << 2)
159 #define RADEON_PRIO_IB (1 << 3)
160 
161 #define RADEON_PRIO_DRAW_INDIRECT (1 << 4)
162 #define RADEON_PRIO_INDEX_BUFFER (1 << 5)
163 
164 #define RADEON_PRIO_CP_DMA (1 << 6)
165 #define RADEON_PRIO_BORDER_COLORS (1 << 7)
166 
167 #define RADEON_PRIO_CONST_BUFFER (1 << 8)
168 #define RADEON_PRIO_DESCRIPTORS (1 << 9)
169 
170 #define RADEON_PRIO_SAMPLER_BUFFER (1 << 10)
171 #define RADEON_PRIO_VERTEX_BUFFER (1 << 11)
172 
173 #define RADEON_PRIO_SHADER_RW_BUFFER (1 << 12)
174 #define RADEON_PRIO_SAMPLER_TEXTURE (1 << 13)
175 
176 #define RADEON_PRIO_SHADER_RW_IMAGE (1 << 14)
177 #define RADEON_PRIO_SAMPLER_TEXTURE_MSAA (1 << 15)
178 
179 #define RADEON_PRIO_COLOR_BUFFER (1 << 16)
180 #define RADEON_PRIO_DEPTH_BUFFER (1 << 17)
181 
182 #define RADEON_PRIO_COLOR_BUFFER_MSAA (1 << 18)
183 #define RADEON_PRIO_DEPTH_BUFFER_MSAA (1 << 19)
184 
185 #define RADEON_PRIO_SEPARATE_META (1 << 20)
186 #define RADEON_PRIO_SHADER_BINARY (1 << 21) /* the hw can't hide instruction cache misses */
187 
188 #define RADEON_PRIO_SHADER_RINGS (1 << 22)
189 #define RADEON_PRIO_SCRATCH_BUFFER (1 << 23)
190 
191 #define RADEON_ALL_PRIORITIES    BITFIELD_MASK(24)
192 
193 /* When passed to radeon_winsys::buffer_wait, it disallows using the DRM ioctl for timeout=0
194  * queries because it can take ~1 ms to return, reducing FPS.
195  */
196 #define RADEON_USAGE_DISALLOW_SLOW_REPLY (1 << 26)
197 
198 /* Upper bits of priorities are used by usage flags. */
199 #define RADEON_USAGE_READ (1 << 27)
200 #define RADEON_USAGE_WRITE (1 << 28)
201 #define RADEON_USAGE_READWRITE (RADEON_USAGE_READ | RADEON_USAGE_WRITE)
202 
203 /* The winsys ensures that the CS submission will be scheduled after
204  * previously flushed CSs referencing this BO in a conflicting way.
205  */
206 #define RADEON_USAGE_SYNCHRONIZED (1 << 29)
207 
208 /* When used, an implicit sync is done to make sure a compute shader
209  * will read the written values from a previous draw.
210  */
211 #define RADEON_USAGE_CB_NEEDS_IMPLICIT_SYNC (1u << 30)
212 #define RADEON_USAGE_DB_NEEDS_IMPLICIT_SYNC (1u << 31)
213 
214 struct winsys_handle;
215 struct radeon_winsys_ctx;
216 
217 struct radeon_cmdbuf_chunk {
218    unsigned cdw;    /* Number of used dwords. */
219    unsigned max_dw; /* Maximum number of dwords. */
220    uint32_t *buf;   /* The base pointer of the chunk. */
221 };
222 
223 struct radeon_cmdbuf {
224    struct radeon_cmdbuf_chunk current;
225    struct radeon_cmdbuf_chunk *prev;
226    uint16_t num_prev; /* Number of previous chunks. */
227    uint16_t max_prev; /* Space in array pointed to by prev. */
228    unsigned prev_dw;  /* Total number of dwords in previous chunks. */
229 
230    /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
231    uint32_t used_vram_kb;
232    uint32_t used_gart_kb;
233 
234    /* Private winsys data. */
235    void *priv;
236    void *csc; /* amdgpu_cs_context */
237 };
238 
239 /* Tiling info for display code, DRI sharing, and other data. */
240 struct radeon_bo_metadata {
241    /* Tiling flags describing the texture layout for display code
242     * and DRI sharing.
243     */
244    union {
245       struct {
246          enum radeon_bo_layout microtile;
247          enum radeon_bo_layout macrotile;
248          unsigned pipe_config;
249          unsigned bankw;
250          unsigned bankh;
251          unsigned tile_split;
252          unsigned mtilea;
253          unsigned num_banks;
254          unsigned stride;
255          bool scanout;
256       } legacy;
257    } u;
258 
259    enum radeon_surf_mode mode;   /* Output from buffer_get_metadata */
260 
261    /* Additional metadata associated with the buffer, in bytes.
262     * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
263     * Supported by amdgpu only.
264     */
265    uint32_t size_metadata;
266    uint32_t metadata[64];
267 };
268 
269 enum radeon_feature_id
270 {
271    RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
272    RADEON_FID_R300_CMASK_ACCESS,
273 };
274 
275 struct radeon_bo_list_item {
276    uint64_t bo_size;
277    uint64_t vm_address;
278    uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
279 };
280 
281 struct radeon_winsys {
282    /**
283     * The screen object this winsys was created for
284     */
285    struct pipe_screen *screen;
286    /**
287     * Has the application created at least one TMZ buffer.
288     */
289    const bool uses_secure_bos;
290 
291    /**
292     * Decrement the winsys reference count.
293     *
294     * \param ws  The winsys this function is called for.
295     * \return    True if the winsys and screen should be destroyed.
296     */
297    bool (*unref)(struct radeon_winsys *ws);
298 
299    /**
300     * Destroy this winsys.
301     *
302     * \param ws        The winsys this function is called from.
303     */
304    void (*destroy)(struct radeon_winsys *ws);
305 
306    /**
307     * Get FD for winsys if winsys provides one
308     */
309    int (*get_fd)(struct radeon_winsys *ws);
310 
311    /**
312     * Query an info structure from winsys.
313     *
314     * \param ws        The winsys this function is called from.
315     * \param info      Return structure
316     */
317    void (*query_info)(struct radeon_winsys *ws, struct radeon_info *info);
318 
319    /**
320     * A hint for the winsys that it should pin its execution threads to
321     * a group of cores sharing a specific L3 cache if the CPU has multiple
322     * L3 caches. This is needed for good multithreading performance on
323     * AMD Zen CPUs.
324     */
325    void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cpu);
326 
327    /**************************************************************************
328     * Buffer management. Buffer attributes are mostly fixed over its lifetime.
329     *
330     * Remember that gallium gets to choose the interface it needs, and the
331     * window systems must then implement that interface (rather than the
332     * other way around...).
333     *************************************************************************/
334 
335    /**
336     * Create a buffer object.
337     *
338     * \param ws        The winsys this function is called from.
339     * \param size      The size to allocate.
340     * \param alignment An alignment of the buffer in memory.
341     * \param use_reusable_pool Whether the cache buffer manager should be used.
342     * \param domain    A bitmask of the RADEON_DOMAIN_* flags.
343     * \return          The created buffer object.
344     */
345    struct pb_buffer_lean *(*buffer_create)(struct radeon_winsys *ws, uint64_t size,
346                                            unsigned alignment, enum radeon_bo_domain domain,
347                                            enum radeon_bo_flag flags);
348 
349    /**
350     * Don't use directly. Use radeon_bo_reference.
351     */
352    void (*buffer_destroy)(struct radeon_winsys *ws, struct pb_buffer_lean *buf);
353 
354    /**
355     * Map the entire data store of a buffer object into the client's address
356     * space.
357     *
358     * Callers are expected to unmap buffers again if and only if the
359     * RADEON_MAP_TEMPORARY flag is set in \p usage.
360     *
361     * \param buf       A winsys buffer object to map.
362     * \param cs        A command stream to flush if the buffer is referenced by it.
363     * \param usage     A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
364     * \return          The pointer at the beginning of the buffer.
365     */
366    void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
367                        struct radeon_cmdbuf *cs, enum pipe_map_flags usage);
368 
369    /**
370     * Unmap a buffer object from the client's address space.
371     *
372     * \param buf       A winsys buffer object to unmap.
373     */
374    void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer_lean *buf);
375 
376    /**
377     * Wait for the buffer and return true if the buffer is not used
378     * by the device.
379     *
380     * The timeout of 0 will only return the status.
381     * The timeout of OS_TIMEOUT_INFINITE will always wait until the buffer
382     * is idle.
383     *
384     * usage is RADEON_USAGE_READ/WRITE.
385     *
386     * Checking whether a buffer is idle using timeout=0 can take 1 ms even if the DRM ioctl is
387     * used, reducing our FPS to several hundreds. To prevent that, set
388     * RADEON_USAGE_DISALLOW_SLOW_REPLY, which will return busy. This is a workaround for kernel
389     * inefficiency.
390     */
391    bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
392                        uint64_t timeout, unsigned usage);
393 
394    /**
395     * Return buffer metadata.
396     * (tiling info for display code, DRI sharing, and other data)
397     *
398     * \param buf       A winsys buffer object to get the flags from.
399     * \param md        Metadata
400     */
401    void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
402                                struct radeon_bo_metadata *md, struct radeon_surf *surf);
403 
404    /**
405     * Set buffer metadata.
406     * (tiling info for display code, DRI sharing, and other data)
407     *
408     * \param buf       A winsys buffer object to set the flags for.
409     * \param md        Metadata
410     */
411    void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
412                                struct radeon_bo_metadata *md, struct radeon_surf *surf);
413 
414    /**
415     * Get a winsys buffer from a winsys handle. The internal structure
416     * of the handle is platform-specific and only a winsys should access it.
417     *
418     * \param ws        The winsys this function is called from.
419     * \param whandle   A winsys handle pointer as was received from a state
420     *                  tracker.
421     */
422    struct pb_buffer_lean *(*buffer_from_handle)(struct radeon_winsys *ws,
423                                                 struct winsys_handle *whandle,
424                                                 unsigned vm_alignment,
425                                                 bool is_prime_linear_buffer);
426 
427    /**
428     * Get a winsys buffer from a user pointer. The resulting buffer can't
429     * be exported. Both pointer and size must be page aligned.
430     *
431     * \param ws        The winsys this function is called from.
432     * \param pointer   User pointer to turn into a buffer object.
433     * \param Size      Size in bytes for the new buffer.
434     */
435    struct pb_buffer_lean *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer,
436                                              uint64_t size, enum radeon_bo_flag flags);
437 
438    /**
439     * Whether the buffer was created from a user pointer.
440     *
441     * \param buf       A winsys buffer object
442     * \return          whether \p buf was created via buffer_from_ptr
443     */
444    bool (*buffer_is_user_ptr)(struct pb_buffer_lean *buf);
445 
446    /** Whether the buffer was suballocated. */
447    bool (*buffer_is_suballocated)(struct pb_buffer_lean *buf);
448 
449    /**
450     * Get a winsys handle from a winsys buffer. The internal structure
451     * of the handle is platform-specific and only a winsys should access it.
452     *
453     * \param ws        The winsys instance for which the handle is to be valid
454     * \param buf       A winsys buffer object to get the handle from.
455     * \param whandle   A winsys handle pointer.
456     * \return          true on success.
457     */
458    bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
459                              struct winsys_handle *whandle);
460 
461    /**
462     * Change the commitment of a (64KB-page aligned) region of the given
463     * sparse buffer.
464     *
465     * \warning There is no automatic synchronization with command submission.
466     *
467     * \note Only implemented by the amdgpu winsys.
468     *
469     * \return false on out of memory or other failure, true on success.
470     */
471    bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
472                          uint64_t offset, uint64_t size, bool commit);
473 
474    /**
475     * Calc size of the first committed part of the given sparse buffer.
476     * \note Only implemented by the amdgpu winsys.
477     * \return the skipped count if the range_offset fall into a hole.
478     */
479    unsigned (*buffer_find_next_committed_memory)(struct pb_buffer_lean *buf,
480                         uint64_t range_offset, unsigned *range_size);
481    /**
482     * Return the virtual address of a buffer.
483     *
484     * When virtual memory is not in use, this is the offset relative to the
485     * relocation base (non-zero for sub-allocated buffers).
486     *
487     * \param buf       A winsys buffer object
488     * \return          virtual address
489     */
490    uint64_t (*buffer_get_virtual_address)(struct pb_buffer_lean *buf);
491 
492    /**
493     * Return the offset of this buffer relative to the relocation base.
494     * This is only non-zero for sub-allocated buffers.
495     *
496     * This is only supported in the radeon winsys, since amdgpu uses virtual
497     * addresses in submissions even for the video engines.
498     *
499     * \param buf      A winsys buffer object
500     * \return         the offset for relocations
501     */
502    unsigned (*buffer_get_reloc_offset)(struct pb_buffer_lean *buf);
503 
504    /**
505     * Query the initial placement of the buffer from the kernel driver.
506     */
507    enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer_lean *buf);
508 
509    /**
510     * Query the flags used for creation of this buffer.
511     *
512     * Note that for imported buffer this may be lossy since not all flags
513     * are passed 1:1.
514     */
515    enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer_lean *buf);
516 
517    /**************************************************************************
518     * Command submission.
519     *
520     * Each pipe context should create its own command stream and submit
521     * commands independently of other contexts.
522     *************************************************************************/
523 
524    /**
525     * Create a command submission context.
526     * Various command streams can be submitted to the same context.
527     *
528     * \param allow_context_lost  If true, lost contexts skip command submission and report
529     *                            the reset status.
530     *                            If false, losing the context results in undefined behavior.
531     */
532    struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws,
533                                            enum radeon_ctx_priority priority,
534                                            bool allow_context_lost);
535 
536    /**
537     * Destroy a context.
538     */
539    void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
540 
541    /**
542     * Set a reset status for the context due to a software failure, such as an allocation failure
543     * or a skipped draw.
544     */
545    void (*ctx_set_sw_reset_status)(struct radeon_winsys_ctx *ctx, enum pipe_reset_status status,
546                                    const char *format, ...);
547 
548    /**
549     * Query a GPU reset status.
550     */
551    enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx,
552                                                     bool full_reset_only,
553                                                     bool *needs_reset, bool *reset_completed);
554 
555    /**
556     * Create a command stream.
557     *
558     * \param cs        The returned structure that is initialized by cs_create.
559     * \param ctx       The submission context
560     * \param ip_type   The IP type (GFX, DMA, UVD)
561     * \param flush     Flush callback function associated with the command stream.
562     * \param user      User pointer that will be passed to the flush callback.
563     *
564     * \return true on success
565     */
566    bool (*cs_create)(struct radeon_cmdbuf *cs,
567                      struct radeon_winsys_ctx *ctx, enum amd_ip_type amd_ip_type,
568                      void (*flush)(void *ctx, unsigned flags,
569                                    struct pipe_fence_handle **fence),
570                      void *flush_ctx);
571 
572    /**
573     * Set up and enable mid command buffer preemption for the command stream.
574     *
575     * \param cs               Command stream
576     * \param preamble_ib      Non-preemptible preamble IB for the context.
577     * \param preamble_num_dw  Number of dwords in the preamble IB.
578     */
579    bool (*cs_setup_preemption)(struct radeon_cmdbuf *cs, const uint32_t *preamble_ib,
580                                unsigned preamble_num_dw);
581 
582    /**
583     * Destroy a command stream.
584     *
585     * \param cs        A command stream to destroy.
586     */
587    void (*cs_destroy)(struct radeon_cmdbuf *cs);
588 
589    /**
590     * Add a buffer. Each buffer used by a CS must be added using this function.
591     *
592     * \param cs      Command stream
593     * \param buf     Buffer
594     * \param usage   Usage
595     * \param domain  Bitmask of the RADEON_DOMAIN_* flags.
596     * \return Buffer index.
597     */
598    unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf,
599                              unsigned usage, enum radeon_bo_domain domain);
600 
601    /**
602     * Return the index of an already-added buffer.
603     *
604     * Not supported on amdgpu. Drivers with GPUVM should not care about
605     * buffer indices.
606     *
607     * \param cs        Command stream
608     * \param buf       Buffer
609     * \return          The buffer index, or -1 if the buffer has not been added.
610     */
611    int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf);
612 
613    /**
614     * Return true if there is enough memory in VRAM and GTT for the buffers
615     * added so far. If the validation fails, all buffers which have
616     * been added since the last call of cs_validate will be removed and
617     * the CS will be flushed (provided there are still any buffers).
618     *
619     * \param cs        A command stream to validate.
620     */
621    bool (*cs_validate)(struct radeon_cmdbuf *cs);
622 
623    /**
624     * Check whether the given number of dwords is available in the IB.
625     * Optionally chain a new chunk of the IB if necessary and supported.
626     *
627     * \param cs        A command stream.
628     * \param dw        Number of CS dwords requested by the caller.
629     * \return true if there is enough space
630     */
631    bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
632 
633    /**
634     * Return the buffer list.
635     *
636     * This is the buffer list as passed to the kernel, i.e. it only contains
637     * the parent buffers of sub-allocated buffers.
638     *
639     * \param cs    Command stream
640     * \param list  Returned buffer list. Set to NULL to query the count only.
641     * \return      The buffer count.
642     */
643    unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs, struct radeon_bo_list_item *list);
644 
645    /**
646     * Flush a command stream.
647     *
648     * \param cs          A command stream to flush.
649     * \param flags,      PIPE_FLUSH_* flags.
650     * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
651     *                    after the CS and is returned through this parameter.
652     * \return Negative POSIX error code or 0 for success.
653     *         Asynchronous submissions never return an error.
654     */
655    int (*cs_flush)(struct radeon_cmdbuf *cs, unsigned flags, struct pipe_fence_handle **fence);
656 
657    /**
658     * Create a fence before the CS is flushed.
659     * The user must flush manually to complete the initializaton of the fence.
660     *
661     * The fence must not be used for anything except \ref cs_add_fence_dependency
662     * before the flush.
663     */
664    struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
665 
666    /**
667     * Return true if a buffer is referenced by a command stream.
668     *
669     * \param cs        A command stream.
670     * \param buf       A winsys buffer.
671     */
672    bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf,
673                                    unsigned usage);
674 
675    /**
676     * Request access to a feature for a command stream.
677     *
678     * \param cs        A command stream.
679     * \param fid       Feature ID, one of RADEON_FID_*
680     * \param enable    Whether to enable or disable the feature.
681     */
682    bool (*cs_request_feature)(struct radeon_cmdbuf *cs, enum radeon_feature_id fid, bool enable);
683    /**
684     * Make sure all asynchronous flush of the cs have completed
685     *
686     * \param cs        A command stream.
687     */
688    void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
689 
690    /**
691     * Add a fence dependency to the CS, so that the CS will wait for
692     * the fence before execution.
693     */
694    void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
695 
696    /**
697     * Signal a syncobj when the CS finishes execution.
698     */
699    void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
700 
701    /**
702     * Returns the amd_ip_type type of a CS.
703     */
704    enum amd_ip_type (*cs_get_ip_type)(struct radeon_cmdbuf *cs);
705 
706    /**
707     * Wait for the fence and return true if the fence has been signalled.
708     * The timeout of 0 will only return the status.
709     * The timeout of OS_TIMEOUT_INFINITE will always wait until the fence
710     * is signalled.
711     */
712    bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
713 
714    /**
715     * Reference counting for fences.
716     */
717    void (*fence_reference)(struct radeon_winsys *ws, struct pipe_fence_handle **dst,
718                            struct pipe_fence_handle *src);
719 
720    /**
721     * Create a new fence object corresponding to the given syncobj fd.
722     */
723    struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws, int fd);
724 
725    /**
726     * Create a new fence object corresponding to the given sync_file.
727     */
728    struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws, int fd);
729 
730    /**
731     * Return a sync_file FD corresponding to the given fence object.
732     */
733    int (*fence_export_sync_file)(struct radeon_winsys *ws, struct pipe_fence_handle *fence);
734 
735    /**
736     * Return a sync file FD that is already signalled.
737     */
738    int (*export_signalled_sync_file)(struct radeon_winsys *ws);
739 
740    /**
741     * Initialize surface
742     *
743     * \param ws        The winsys this function is called from.
744     * \param info      radeon_info from the driver
745     * \param tex       Input texture description
746     * \param flags     Bitmask of RADEON_SURF_* flags
747     * \param bpe       Bytes per pixel, it can be different for Z buffers.
748     * \param mode      Preferred tile mode. (linear, 1D, or 2D)
749     * \param surf      Output structure
750     */
751    int (*surface_init)(struct radeon_winsys *ws, const struct radeon_info *info,
752                        const struct pipe_resource *tex, uint64_t flags,
753                        unsigned bpe, enum radeon_surf_mode mode, struct radeon_surf *surf);
754 
755    uint64_t (*query_value)(struct radeon_winsys *ws, enum radeon_value_id value);
756 
757    bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset, unsigned num_registers,
758                           uint32_t *out);
759 
760    /**
761     * Secure context
762     */
763    bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
764 
765    /**
766     * Stable pstate
767     */
768    bool (*cs_set_pstate)(struct radeon_cmdbuf *cs, enum radeon_ctx_pstate state);
769 
770    /**
771     * Pass the VAs to the buffers where various information is saved by the FW during mcbp.
772     */
773    void (*cs_set_mcbp_reg_shadowing_va)(struct radeon_cmdbuf *cs, uint64_t regs_va,
774                                                                   uint64_t csa_va);
775 };
776 
radeon_emitted(struct radeon_cmdbuf * cs,unsigned num_dw)777 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
778 {
779    return cs && (cs->prev_dw + cs->current.cdw > num_dw);
780 }
781 
radeon_emit(struct radeon_cmdbuf * cs,uint32_t value)782 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
783 {
784    cs->current.buf[cs->current.cdw++] = value;
785 }
786 
radeon_emit_array(struct radeon_cmdbuf * cs,const uint32_t * values,unsigned count)787 static inline void radeon_emit_array(struct radeon_cmdbuf *cs, const uint32_t *values,
788                                      unsigned count)
789 {
790    memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
791    cs->current.cdw += count;
792 }
793 
radeon_uses_secure_bos(struct radeon_winsys * ws)794 static inline bool radeon_uses_secure_bos(struct radeon_winsys* ws)
795 {
796   return ws->uses_secure_bos;
797 }
798 
799 static inline void
radeon_bo_reference(struct radeon_winsys * rws,struct pb_buffer_lean ** dst,struct pb_buffer_lean * src)800 radeon_bo_reference(struct radeon_winsys *rws, struct pb_buffer_lean **dst,
801                     struct pb_buffer_lean *src)
802 {
803    struct pb_buffer_lean *old = *dst;
804 
805    if (pipe_reference(&(*dst)->reference, &src->reference))
806       rws->buffer_destroy(rws, old);
807    *dst = src;
808 }
809 
810 /* Same as radeon_bo_reference, but ignore the value in *dst. */
811 static inline void
radeon_bo_set_reference(struct pb_buffer_lean ** dst,struct pb_buffer_lean * src)812 radeon_bo_set_reference(struct pb_buffer_lean **dst, struct pb_buffer_lean *src)
813 {
814    *dst = src;
815    pipe_reference(NULL, &src->reference); /* only increment refcount */
816 }
817 
818 /* Unreference dst, but don't assign anything. */
819 static inline void
radeon_bo_drop_reference(struct radeon_winsys * rws,struct pb_buffer_lean * dst)820 radeon_bo_drop_reference(struct radeon_winsys *rws, struct pb_buffer_lean *dst)
821 {
822    if (pipe_reference(&dst->reference, NULL)) /* only decrement refcount */
823       rws->buffer_destroy(rws, dst);
824 }
825 
826 /* The following bits describe the heaps managed by slab allocators (pb_slab) and
827  * the allocation cache (pb_cache).
828  */
829 #define RADEON_HEAP_BIT_VRAM           (1 << 0) /* if false, it's GTT */
830 #define RADEON_HEAP_BIT_GL2_BYPASS     (1 << 1) /* both VRAM and GTT */
831 #define RADEON_HEAP_BIT_32BIT          (1 << 2) /* both VRAM and GTT */
832 #define RADEON_HEAP_BIT_ENCRYPTED      (1 << 3) /* both VRAM and GTT */
833 
834 #define RADEON_HEAP_BIT_NO_CPU_ACCESS  (1 << 4) /* VRAM only */
835 #define RADEON_HEAP_BIT_GFX12_ALLOW_DCC (1 << 5) /* VRAM only */
836 
837 #define RADEON_HEAP_BIT_WC             (1 << 4) /* GTT only, VRAM implies this to be true */
838 
839 /* The number of all possible heap descriptions using the bits above. */
840 #define RADEON_NUM_HEAPS               (1 << 6)
841 
radeon_domain_from_heap(int heap)842 static inline enum radeon_bo_domain radeon_domain_from_heap(int heap)
843 {
844    assert(heap >= 0);
845 
846    if (heap & RADEON_HEAP_BIT_VRAM)
847       return RADEON_DOMAIN_VRAM;
848    else
849       return RADEON_DOMAIN_GTT;
850 }
851 
radeon_flags_from_heap(int heap)852 static inline unsigned radeon_flags_from_heap(int heap)
853 {
854    assert(heap >= 0);
855 
856    unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
857 
858    if (heap & RADEON_HEAP_BIT_GL2_BYPASS)
859       flags |= RADEON_FLAG_GL2_BYPASS;
860    if (heap & RADEON_HEAP_BIT_32BIT)
861       flags |= RADEON_FLAG_32BIT;
862    if (heap & RADEON_HEAP_BIT_ENCRYPTED)
863       flags |= RADEON_FLAG_ENCRYPTED;
864 
865    if (heap & RADEON_HEAP_BIT_VRAM) {
866       flags |= RADEON_FLAG_GTT_WC;
867       if (heap & RADEON_HEAP_BIT_NO_CPU_ACCESS)
868          flags |= RADEON_FLAG_NO_CPU_ACCESS;
869       if (heap & RADEON_HEAP_BIT_GFX12_ALLOW_DCC)
870          flags |= RADEON_FLAG_GFX12_ALLOW_DCC;
871    } else {
872       /* GTT only */
873       if (heap & RADEON_HEAP_BIT_WC)
874          flags |= RADEON_FLAG_GTT_WC;
875    }
876 
877    return flags;
878 }
879 
880 /* This cleans up flags, so that we can comfortably assume that no invalid flag combinations
881  * are set.
882  */
radeon_canonicalize_bo_flags(enum radeon_bo_domain * _domain,enum radeon_bo_flag * _flags)883 static void radeon_canonicalize_bo_flags(enum radeon_bo_domain *_domain,
884                                          enum radeon_bo_flag *_flags)
885 {
886    unsigned domain = *_domain;
887    unsigned flags = *_flags;
888 
889    /* Only set 1 domain, e.g. ignore GTT if VRAM is set. */
890    if (domain == RADEON_DOMAIN_VRAM_GTT)
891       domain = RADEON_DOMAIN_VRAM;
892    else
893       assert(util_bitcount(domain) == 1);
894 
895    switch (domain) {
896    case RADEON_DOMAIN_VRAM:
897       flags |= RADEON_FLAG_GTT_WC;
898       break;
899    case RADEON_DOMAIN_GTT:
900       flags &= ~RADEON_FLAG_NO_CPU_ACCESS;
901       flags &= ~RADEON_FLAG_GFX12_ALLOW_DCC;
902       break;
903    case RADEON_DOMAIN_GDS:
904    case RADEON_DOMAIN_OA:
905       flags |= RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_NO_CPU_ACCESS;
906       flags &= ~RADEON_FLAG_SPARSE;
907       break;
908    case RADEON_DOMAIN_DOORBELL:
909       flags |= RADEON_FLAG_NO_SUBALLOC;
910       flags &= ~RADEON_FLAG_SPARSE;
911       break;
912    }
913 
914    /* Sparse buffers must have NO_CPU_ACCESS set. */
915    if (flags & RADEON_FLAG_SPARSE)
916       flags |= RADEON_FLAG_NO_CPU_ACCESS;
917 
918    *_domain = (enum radeon_bo_domain)domain;
919    *_flags = (enum radeon_bo_flag)flags;
920 }
921 
922 /* Return the heap index for winsys allocators, or -1 on failure. */
radeon_get_heap_index(enum radeon_bo_domain domain,enum radeon_bo_flag flags)923 static inline int radeon_get_heap_index(enum radeon_bo_domain domain, enum radeon_bo_flag flags)
924 {
925    radeon_canonicalize_bo_flags(&domain, &flags);
926 
927    /* Resources with interprocess sharing don't use any winsys allocators. */
928    if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
929       return -1;
930 
931    /* These are unsupported flags. */
932    /* RADEON_FLAG_DRIVER_INTERNAL is ignored. It doesn't affect allocators. */
933    if (flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE |
934                 RADEON_FLAG_DISCARDABLE | RADEON_FLAG_CLEAR_VRAM))
935       return -1;
936 
937    int heap = 0;
938 
939    if (flags & RADEON_FLAG_GL2_BYPASS)
940       heap |= RADEON_HEAP_BIT_GL2_BYPASS;
941    if (flags & RADEON_FLAG_32BIT)
942       heap |= RADEON_HEAP_BIT_32BIT;
943    if (flags & RADEON_FLAG_ENCRYPTED)
944       heap |= RADEON_HEAP_BIT_ENCRYPTED;
945 
946    if (domain == RADEON_DOMAIN_VRAM) {
947       /* VRAM | GTT shouldn't occur, but if it does, ignore GTT. */
948       heap |= RADEON_HEAP_BIT_VRAM;
949       if (flags & RADEON_FLAG_NO_CPU_ACCESS)
950          heap |= RADEON_HEAP_BIT_NO_CPU_ACCESS;
951       if (flags & RADEON_FLAG_GFX12_ALLOW_DCC)
952          heap |= RADEON_HEAP_BIT_GFX12_ALLOW_DCC;
953       /* RADEON_FLAG_WC is ignored and implied to be true for VRAM */
954    } else if (domain == RADEON_DOMAIN_GTT) {
955       /* GTT is implied by RADEON_HEAP_BIT_VRAM not being set. */
956       if (flags & RADEON_FLAG_GTT_WC)
957          heap |= RADEON_HEAP_BIT_WC;
958       /* RADEON_FLAG_NO_CPU_ACCESS is ignored and implied to be false for GTT */
959    } else {
960       return -1; /*  */
961    }
962 
963    assert(heap < RADEON_NUM_HEAPS);
964    return heap;
965 }
966 
967 typedef struct pipe_screen *(*radeon_screen_create_t)(struct radeon_winsys *,
968                                                       const struct pipe_screen_config *);
969 
970 /* These functions create the radeon_winsys instance for the corresponding kernel driver. */
971 struct radeon_winsys *
972 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
973 		     radeon_screen_create_t screen_create, bool is_virtio);
974 struct radeon_winsys *
975 radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
976 			 radeon_screen_create_t screen_create);
977 
978 #endif
979