• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3  * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4  * Copyright 2018 Advanced Micro Devices, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * on the rights to use, copy, modify, merge, publish, distribute, sub
11  * license, and/or sell copies of the Software, and to permit persons to whom
12  * the Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25 
26 #ifndef RADEON_WINSYS_H
27 #define RADEON_WINSYS_H
28 
29 /* The public winsys interface header for the radeon driver. */
30 
31 /* Skip command submission. Same as RADEON_NOOP=1. */
32 #define RADEON_FLUSH_NOOP                     (1u << 29)
33 
34 /* Toggle the secure submission boolean after the flush */
35 #define RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION (1u << 30)
36 
37 /* Whether the next IB can start immediately and not wait for draws and
38  * dispatches from the current IB to finish. */
39 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW    (1u << 31)
40 
41 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW                                                   \
42    (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
43 
44 #include "amd/common/ac_gpu_info.h"
45 #include "amd/common/ac_surface.h"
46 #include "pipebuffer/pb_buffer.h"
47 
48 /* Tiling flags. */
49 enum radeon_bo_layout
50 {
51    RADEON_LAYOUT_LINEAR = 0,
52    RADEON_LAYOUT_TILED,
53    RADEON_LAYOUT_SQUARETILED,
54 
55    RADEON_LAYOUT_UNKNOWN
56 };
57 
58 enum radeon_bo_domain
59 { /* bitfield */
60   RADEON_DOMAIN_GTT = 2,
61   RADEON_DOMAIN_VRAM = 4,
62   RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
63   RADEON_DOMAIN_GDS = 8,
64   RADEON_DOMAIN_OA = 16,
65 };
66 
67 enum radeon_bo_flag
68 { /* bitfield */
69   RADEON_FLAG_GTT_WC = (1 << 0),
70   RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
71   RADEON_FLAG_NO_SUBALLOC = (1 << 2),
72   RADEON_FLAG_SPARSE = (1 << 3),
73   RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
74   RADEON_FLAG_READ_ONLY = (1 << 5),
75   RADEON_FLAG_32BIT = (1 << 6),
76   RADEON_FLAG_ENCRYPTED = (1 << 7),
77   RADEON_FLAG_UNCACHED = (1 << 8), /* only gfx9 and newer */
78   RADEON_FLAG_DRIVER_INTERNAL = (1 << 9),
79 };
80 
81 enum radeon_dependency_flag
82 {
83    /* Instead of waiting for a job to finish execution, the dependency will
84     * be signaled when the job starts execution.
85     */
86    RADEON_DEPENDENCY_START_FENCE = 1 << 1,
87 };
88 
89 enum radeon_bo_usage
90 { /* bitfield */
91   RADEON_USAGE_READ = 2,
92   RADEON_USAGE_WRITE = 4,
93   RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
94 
95   /* The winsys ensures that the CS submission will be scheduled after
96    * previously flushed CSs referencing this BO in a conflicting way.
97    */
98   RADEON_USAGE_SYNCHRONIZED = 8,
99 
100   /* When used, an implicit sync is done to make sure a compute shader
101    * will read the written values from a previous draw.
102    */
103   RADEON_USAGE_NEEDS_IMPLICIT_SYNC = 16,
104 };
105 
106 enum radeon_map_flags
107 {
108    /* Indicates that the caller will unmap the buffer.
109     *
110     * Not unmapping buffers is an important performance optimization for
111     * OpenGL (avoids kernel overhead for frequently mapped buffers).
112     */
113    RADEON_MAP_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
114 };
115 
116 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
117 
118 enum radeon_value_id
119 {
120    RADEON_REQUESTED_VRAM_MEMORY,
121    RADEON_REQUESTED_GTT_MEMORY,
122    RADEON_MAPPED_VRAM,
123    RADEON_MAPPED_GTT,
124    RADEON_SLAB_WASTED_VRAM,
125    RADEON_SLAB_WASTED_GTT,
126    RADEON_BUFFER_WAIT_TIME_NS,
127    RADEON_NUM_MAPPED_BUFFERS,
128    RADEON_TIMESTAMP,
129    RADEON_NUM_GFX_IBS,
130    RADEON_NUM_SDMA_IBS,
131    RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
132    RADEON_GFX_IB_SIZE_COUNTER,
133    RADEON_NUM_BYTES_MOVED,
134    RADEON_NUM_EVICTIONS,
135    RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
136    RADEON_VRAM_USAGE,
137    RADEON_VRAM_VIS_USAGE,
138    RADEON_GTT_USAGE,
139    RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
140    RADEON_CURRENT_SCLK,
141    RADEON_CURRENT_MCLK,
142    RADEON_CS_THREAD_TIME,
143 };
144 
145 enum radeon_bo_priority
146 {
147    /* Each group of two has the same priority. */
148    RADEON_PRIO_FENCE = 0,
149    RADEON_PRIO_TRACE,
150 
151    RADEON_PRIO_SO_FILLED_SIZE = 2,
152    RADEON_PRIO_QUERY,
153 
154    RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
155    RADEON_PRIO_IB2,     /* IB executed with INDIRECT_BUFFER */
156 
157    RADEON_PRIO_DRAW_INDIRECT = 6,
158    RADEON_PRIO_INDEX_BUFFER,
159 
160    RADEON_PRIO_CP_DMA = 8,
161    RADEON_PRIO_BORDER_COLORS,
162 
163    RADEON_PRIO_CONST_BUFFER = 10,
164    RADEON_PRIO_DESCRIPTORS,
165 
166    RADEON_PRIO_SAMPLER_BUFFER = 12,
167    RADEON_PRIO_VERTEX_BUFFER,
168 
169    RADEON_PRIO_SHADER_RW_BUFFER = 14,
170    RADEON_PRIO_COMPUTE_GLOBAL,
171 
172    RADEON_PRIO_SAMPLER_TEXTURE = 16,
173    RADEON_PRIO_SHADER_RW_IMAGE,
174 
175    RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
176    RADEON_PRIO_COLOR_BUFFER,
177 
178    RADEON_PRIO_DEPTH_BUFFER = 20,
179 
180    RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
181 
182    RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
183 
184    RADEON_PRIO_SEPARATE_META = 26,
185    RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
186 
187    RADEON_PRIO_SHADER_RINGS = 28,
188 
189    RADEON_PRIO_SCRATCH_BUFFER = 30,
190    /* 31 is the maximum value */
191 };
192 
193 struct winsys_handle;
194 struct radeon_winsys_ctx;
195 
196 struct radeon_cmdbuf_chunk {
197    unsigned cdw;    /* Number of used dwords. */
198    unsigned max_dw; /* Maximum number of dwords. */
199    uint32_t *buf;   /* The base pointer of the chunk. */
200 };
201 
202 struct radeon_cmdbuf {
203    struct radeon_cmdbuf_chunk current;
204    struct radeon_cmdbuf_chunk *prev;
205    uint16_t num_prev; /* Number of previous chunks. */
206    uint16_t max_prev; /* Space in array pointed to by prev. */
207    unsigned prev_dw;  /* Total number of dwords in previous chunks. */
208 
209    /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
210    uint32_t used_vram_kb;
211    uint32_t used_gart_kb;
212    uint64_t gpu_address;
213 
214    /* Private winsys data. */
215    void *priv;
216 };
217 
218 /* Tiling info for display code, DRI sharing, and other data. */
219 struct radeon_bo_metadata {
220    /* Tiling flags describing the texture layout for display code
221     * and DRI sharing.
222     */
223    union {
224       struct {
225          enum radeon_bo_layout microtile;
226          enum radeon_bo_layout macrotile;
227          unsigned pipe_config;
228          unsigned bankw;
229          unsigned bankh;
230          unsigned tile_split;
231          unsigned mtilea;
232          unsigned num_banks;
233          unsigned stride;
234          bool scanout;
235       } legacy;
236    } u;
237 
238    enum radeon_surf_mode mode;   /* Output from buffer_get_metadata */
239 
240    /* Additional metadata associated with the buffer, in bytes.
241     * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
242     * Supported by amdgpu only.
243     */
244    uint32_t size_metadata;
245    uint32_t metadata[64];
246 };
247 
248 enum radeon_feature_id
249 {
250    RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
251    RADEON_FID_R300_CMASK_ACCESS,
252 };
253 
254 struct radeon_bo_list_item {
255    uint64_t bo_size;
256    uint64_t vm_address;
257    uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
258 };
259 
260 struct radeon_winsys {
261    /**
262     * The screen object this winsys was created for
263     */
264    struct pipe_screen *screen;
265    /**
266     * Has the application created at least one TMZ buffer.
267     */
268    const bool uses_secure_bos;
269 
270    /**
271     * Decrement the winsys reference count.
272     *
273     * \param ws  The winsys this function is called for.
274     * \return    True if the winsys and screen should be destroyed.
275     */
276    bool (*unref)(struct radeon_winsys *ws);
277 
278    /**
279     * Destroy this winsys.
280     *
281     * \param ws        The winsys this function is called from.
282     */
283    void (*destroy)(struct radeon_winsys *ws);
284 
285    /**
286     * Query an info structure from winsys.
287     *
288     * \param ws        The winsys this function is called from.
289     * \param info      Return structure
290     */
291    void (*query_info)(struct radeon_winsys *ws, struct radeon_info *info,
292                       bool enable_smart_access_memory,
293                       bool disable_smart_access_memory);
294 
295    /**
296     * A hint for the winsys that it should pin its execution threads to
297     * a group of cores sharing a specific L3 cache if the CPU has multiple
298     * L3 caches. This is needed for good multithreading performance on
299     * AMD Zen CPUs.
300     */
301    void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
302 
303    /**************************************************************************
304     * Buffer management. Buffer attributes are mostly fixed over its lifetime.
305     *
306     * Remember that gallium gets to choose the interface it needs, and the
307     * window systems must then implement that interface (rather than the
308     * other way around...).
309     *************************************************************************/
310 
311    /**
312     * Create a buffer object.
313     *
314     * \param ws        The winsys this function is called from.
315     * \param size      The size to allocate.
316     * \param alignment An alignment of the buffer in memory.
317     * \param use_reusable_pool Whether the cache buffer manager should be used.
318     * \param domain    A bitmask of the RADEON_DOMAIN_* flags.
319     * \return          The created buffer object.
320     */
321    struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
322                                       enum radeon_bo_domain domain, enum radeon_bo_flag flags);
323 
324    /**
325     * Map the entire data store of a buffer object into the client's address
326     * space.
327     *
328     * Callers are expected to unmap buffers again if and only if the
329     * RADEON_MAP_TEMPORARY flag is set in \p usage.
330     *
331     * \param buf       A winsys buffer object to map.
332     * \param cs        A command stream to flush if the buffer is referenced by it.
333     * \param usage     A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
334     * \return          The pointer at the beginning of the buffer.
335     */
336    void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer *buf,
337                        struct radeon_cmdbuf *cs, enum pipe_map_flags usage);
338 
339    /**
340     * Unmap a buffer object from the client's address space.
341     *
342     * \param buf       A winsys buffer object to unmap.
343     */
344    void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer *buf);
345 
346    /**
347     * Wait for the buffer and return true if the buffer is not used
348     * by the device.
349     *
350     * The timeout of 0 will only return the status.
351     * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
352     * is idle.
353     */
354    bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer *buf,
355                        uint64_t timeout, enum radeon_bo_usage usage);
356 
357    /**
358     * Return buffer metadata.
359     * (tiling info for display code, DRI sharing, and other data)
360     *
361     * \param buf       A winsys buffer object to get the flags from.
362     * \param md        Metadata
363     */
364    void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
365                                struct radeon_bo_metadata *md, struct radeon_surf *surf);
366 
367    /**
368     * Set buffer metadata.
369     * (tiling info for display code, DRI sharing, and other data)
370     *
371     * \param buf       A winsys buffer object to set the flags for.
372     * \param md        Metadata
373     */
374    void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
375                                struct radeon_bo_metadata *md, struct radeon_surf *surf);
376 
377    /**
378     * Get a winsys buffer from a winsys handle. The internal structure
379     * of the handle is platform-specific and only a winsys should access it.
380     *
381     * \param ws        The winsys this function is called from.
382     * \param whandle   A winsys handle pointer as was received from a state
383     *                  tracker.
384     */
385    struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws, struct winsys_handle *whandle,
386                                            unsigned vm_alignment);
387 
388    /**
389     * Get a winsys buffer from a user pointer. The resulting buffer can't
390     * be exported. Both pointer and size must be page aligned.
391     *
392     * \param ws        The winsys this function is called from.
393     * \param pointer   User pointer to turn into a buffer object.
394     * \param Size      Size in bytes for the new buffer.
395     */
396    struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size);
397 
398    /**
399     * Whether the buffer was created from a user pointer.
400     *
401     * \param buf       A winsys buffer object
402     * \return          whether \p buf was created via buffer_from_ptr
403     */
404    bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
405 
406    /** Whether the buffer was suballocated. */
407    bool (*buffer_is_suballocated)(struct pb_buffer *buf);
408 
409    /**
410     * Get a winsys handle from a winsys buffer. The internal structure
411     * of the handle is platform-specific and only a winsys should access it.
412     *
413     * \param ws        The winsys instance for which the handle is to be valid
414     * \param buf       A winsys buffer object to get the handle from.
415     * \param whandle   A winsys handle pointer.
416     * \return          true on success.
417     */
418    bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer *buf,
419                              struct winsys_handle *whandle);
420 
421    /**
422     * Change the commitment of a (64KB-page aligned) region of the given
423     * sparse buffer.
424     *
425     * \warning There is no automatic synchronization with command submission.
426     *
427     * \note Only implemented by the amdgpu winsys.
428     *
429     * \return false on out of memory or other failure, true on success.
430     */
431    bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer *buf,
432                          uint64_t offset, uint64_t size, bool commit);
433 
434    /**
435     * Return the virtual address of a buffer.
436     *
437     * When virtual memory is not in use, this is the offset relative to the
438     * relocation base (non-zero for sub-allocated buffers).
439     *
440     * \param buf       A winsys buffer object
441     * \return          virtual address
442     */
443    uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
444 
445    /**
446     * Return the offset of this buffer relative to the relocation base.
447     * This is only non-zero for sub-allocated buffers.
448     *
449     * This is only supported in the radeon winsys, since amdgpu uses virtual
450     * addresses in submissions even for the video engines.
451     *
452     * \param buf      A winsys buffer object
453     * \return         the offset for relocations
454     */
455    unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
456 
457    /**
458     * Query the initial placement of the buffer from the kernel driver.
459     */
460    enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
461 
462    /**
463     * Query the flags used for creation of this buffer.
464     *
465     * Note that for imported buffer this may be lossy since not all flags
466     * are passed 1:1.
467     */
468    enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer *buf);
469 
470    /**************************************************************************
471     * Command submission.
472     *
473     * Each pipe context should create its own command stream and submit
474     * commands independently of other contexts.
475     *************************************************************************/
476 
477    /**
478     * Create a command submission context.
479     * Various command streams can be submitted to the same context.
480     */
481    struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
482 
483    /**
484     * Destroy a context.
485     */
486    void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
487 
488    /**
489     * Query a GPU reset status.
490     */
491    enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx,
492                                                     bool full_reset_only,
493                                                     bool *needs_reset);
494 
495    /**
496     * Create a command stream.
497     *
498     * \param cs        The returned structure that is initialized by cs_create.
499     * \param ctx       The submission context
500     * \param ring_type The ring type (GFX, DMA, UVD)
501     * \param flush     Flush callback function associated with the command stream.
502     * \param user      User pointer that will be passed to the flush callback.
503     *
504     * \return true on success
505     */
506    bool (*cs_create)(struct radeon_cmdbuf *cs,
507                      struct radeon_winsys_ctx *ctx, enum ring_type ring_type,
508                      void (*flush)(void *ctx, unsigned flags,
509                                    struct pipe_fence_handle **fence),
510                      void *flush_ctx, bool stop_exec_on_failure);
511 
512    /**
513     * Set up and enable mid command buffer preemption for the command stream.
514     *
515     * \param cs               Command stream
516     * \param preamble_ib      Non-preemptible preamble IB for the context.
517     * \param preamble_num_dw  Number of dwords in the preamble IB.
518     */
519    bool (*cs_setup_preemption)(struct radeon_cmdbuf *cs, const uint32_t *preamble_ib,
520                                unsigned preamble_num_dw);
521 
522    /**
523     * Destroy a command stream.
524     *
525     * \param cs        A command stream to destroy.
526     */
527    void (*cs_destroy)(struct radeon_cmdbuf *cs);
528 
529    /**
530     * Add a buffer. Each buffer used by a CS must be added using this function.
531     *
532     * \param cs      Command stream
533     * \param buf     Buffer
534     * \param usage   Whether the buffer is used for read and/or write.
535     * \param domain  Bitmask of the RADEON_DOMAIN_* flags.
536     * \param priority  A higher number means a greater chance of being
537     *                  placed in the requested domain. 15 is the maximum.
538     * \return Buffer index.
539     */
540    unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
541                              enum radeon_bo_usage usage, enum radeon_bo_domain domain,
542                              enum radeon_bo_priority priority);
543 
544    /**
545     * Return the index of an already-added buffer.
546     *
547     * Not supported on amdgpu. Drivers with GPUVM should not care about
548     * buffer indices.
549     *
550     * \param cs        Command stream
551     * \param buf       Buffer
552     * \return          The buffer index, or -1 if the buffer has not been added.
553     */
554    int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf);
555 
556    /**
557     * Return true if there is enough memory in VRAM and GTT for the buffers
558     * added so far. If the validation fails, all buffers which have
559     * been added since the last call of cs_validate will be removed and
560     * the CS will be flushed (provided there are still any buffers).
561     *
562     * \param cs        A command stream to validate.
563     */
564    bool (*cs_validate)(struct radeon_cmdbuf *cs);
565 
566    /**
567     * Check whether the given number of dwords is available in the IB.
568     * Optionally chain a new chunk of the IB if necessary and supported.
569     *
570     * \param cs        A command stream.
571     * \param dw        Number of CS dwords requested by the caller.
572     * \param force_chaining  Chain the IB into a new buffer now to discard
573     *                        the CP prefetch cache (to emulate PKT3_REWIND)
574     * \return true if there is enough space
575     */
576    bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw, bool force_chaining);
577 
578    /**
579     * Return the buffer list.
580     *
581     * This is the buffer list as passed to the kernel, i.e. it only contains
582     * the parent buffers of sub-allocated buffers.
583     *
584     * \param cs    Command stream
585     * \param list  Returned buffer list. Set to NULL to query the count only.
586     * \return      The buffer count.
587     */
588    unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs, struct radeon_bo_list_item *list);
589 
590    /**
591     * Flush a command stream.
592     *
593     * \param cs          A command stream to flush.
594     * \param flags,      PIPE_FLUSH_* flags.
595     * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
596     *                    after the CS and is returned through this parameter.
597     * \return Negative POSIX error code or 0 for success.
598     *         Asynchronous submissions never return an error.
599     */
600    int (*cs_flush)(struct radeon_cmdbuf *cs, unsigned flags, struct pipe_fence_handle **fence);
601 
602    /**
603     * Create a fence before the CS is flushed.
604     * The user must flush manually to complete the initializaton of the fence.
605     *
606     * The fence must not be used for anything except \ref cs_add_fence_dependency
607     * before the flush.
608     */
609    struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
610 
611    /**
612     * Return true if a buffer is referenced by a command stream.
613     *
614     * \param cs        A command stream.
615     * \param buf       A winsys buffer.
616     */
617    bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
618                                    enum radeon_bo_usage usage);
619 
620    /**
621     * Request access to a feature for a command stream.
622     *
623     * \param cs        A command stream.
624     * \param fid       Feature ID, one of RADEON_FID_*
625     * \param enable    Whether to enable or disable the feature.
626     */
627    bool (*cs_request_feature)(struct radeon_cmdbuf *cs, enum radeon_feature_id fid, bool enable);
628    /**
629     * Make sure all asynchronous flush of the cs have completed
630     *
631     * \param cs        A command stream.
632     */
633    void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
634 
635    /**
636     * Add a fence dependency to the CS, so that the CS will wait for
637     * the fence before execution.
638     *
639     * \param dependency_flags  Bitmask of RADEON_DEPENDENCY_*
640     */
641    void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence,
642                                    unsigned dependency_flags);
643 
644    /**
645     * Signal a syncobj when the CS finishes execution.
646     */
647    void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
648 
649    /**
650     * Wait for the fence and return true if the fence has been signalled.
651     * The timeout of 0 will only return the status.
652     * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
653     * is signalled.
654     */
655    bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
656 
657    /**
658     * Reference counting for fences.
659     */
660    void (*fence_reference)(struct pipe_fence_handle **dst, struct pipe_fence_handle *src);
661 
662    /**
663     * Create a new fence object corresponding to the given syncobj fd.
664     */
665    struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws, int fd);
666 
667    /**
668     * Create a new fence object corresponding to the given sync_file.
669     */
670    struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws, int fd);
671 
672    /**
673     * Return a sync_file FD corresponding to the given fence object.
674     */
675    int (*fence_export_sync_file)(struct radeon_winsys *ws, struct pipe_fence_handle *fence);
676 
677    /**
678     * Return a sync file FD that is already signalled.
679     */
680    int (*export_signalled_sync_file)(struct radeon_winsys *ws);
681 
682    /**
683     * Initialize surface
684     *
685     * \param ws        The winsys this function is called from.
686     * \param tex       Input texture description
687     * \param flags     Bitmask of RADEON_SURF_* flags
688     * \param bpe       Bytes per pixel, it can be different for Z buffers.
689     * \param mode      Preferred tile mode. (linear, 1D, or 2D)
690     * \param surf      Output structure
691     */
692    int (*surface_init)(struct radeon_winsys *ws, const struct pipe_resource *tex, unsigned flags,
693                        unsigned bpe, enum radeon_surf_mode mode, struct radeon_surf *surf);
694 
695    uint64_t (*query_value)(struct radeon_winsys *ws, enum radeon_value_id value);
696 
697    bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset, unsigned num_registers,
698                           uint32_t *out);
699 
700    /**
701     * Secure context
702     */
703    bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
704 };
705 
radeon_emitted(struct radeon_cmdbuf * cs,unsigned num_dw)706 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
707 {
708    return cs && (cs->prev_dw + cs->current.cdw > num_dw);
709 }
710 
radeon_emit(struct radeon_cmdbuf * cs,uint32_t value)711 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
712 {
713    cs->current.buf[cs->current.cdw++] = value;
714 }
715 
radeon_emit_array(struct radeon_cmdbuf * cs,const uint32_t * values,unsigned count)716 static inline void radeon_emit_array(struct radeon_cmdbuf *cs, const uint32_t *values,
717                                      unsigned count)
718 {
719    memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
720    cs->current.cdw += count;
721 }
722 
radeon_uses_secure_bos(struct radeon_winsys * ws)723 static inline bool radeon_uses_secure_bos(struct radeon_winsys* ws)
724 {
725   return ws->uses_secure_bos;
726 }
727 
728 static inline void
radeon_bo_reference(struct radeon_winsys * rws,struct pb_buffer ** dst,struct pb_buffer * src)729 radeon_bo_reference(struct radeon_winsys *rws, struct pb_buffer **dst, struct pb_buffer *src)
730 {
731    pb_reference_with_winsys(rws, dst, src);
732 }
733 
734 enum radeon_heap
735 {
736    RADEON_HEAP_VRAM_NO_CPU_ACCESS,
737    RADEON_HEAP_VRAM_READ_ONLY,
738    RADEON_HEAP_VRAM_READ_ONLY_32BIT,
739    RADEON_HEAP_VRAM_32BIT,
740    RADEON_HEAP_VRAM,
741    RADEON_HEAP_GTT_WC,
742    RADEON_HEAP_GTT_WC_READ_ONLY,
743    RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
744    RADEON_HEAP_GTT_WC_32BIT,
745    RADEON_HEAP_GTT,
746    RADEON_HEAP_GTT_UNCACHED_WC,
747    RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY,
748    RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT,
749    RADEON_HEAP_GTT_UNCACHED_WC_32BIT,
750    RADEON_HEAP_GTT_UNCACHED,
751    RADEON_MAX_SLAB_HEAPS,
752    RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
753 };
754 
radeon_domain_from_heap(enum radeon_heap heap)755 static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
756 {
757    switch (heap) {
758    case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
759    case RADEON_HEAP_VRAM_READ_ONLY:
760    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
761    case RADEON_HEAP_VRAM_32BIT:
762    case RADEON_HEAP_VRAM:
763       return RADEON_DOMAIN_VRAM;
764    case RADEON_HEAP_GTT_WC:
765    case RADEON_HEAP_GTT_WC_READ_ONLY:
766    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
767    case RADEON_HEAP_GTT_WC_32BIT:
768    case RADEON_HEAP_GTT:
769    case RADEON_HEAP_GTT_UNCACHED_WC:
770    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
771    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
772    case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
773    case RADEON_HEAP_GTT_UNCACHED:
774       return RADEON_DOMAIN_GTT;
775    default:
776       assert(0);
777       return (enum radeon_bo_domain)0;
778    }
779 }
780 
radeon_flags_from_heap(enum radeon_heap heap)781 static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
782 {
783    unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
784 
785    switch (heap) {
786    case RADEON_HEAP_GTT:
787    case RADEON_HEAP_GTT_UNCACHED:
788       break;
789    default:
790       flags |= RADEON_FLAG_GTT_WC;
791    }
792 
793    switch (heap) {
794    case RADEON_HEAP_GTT_UNCACHED_WC:
795    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
796    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
797    case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
798    case RADEON_HEAP_GTT_UNCACHED:
799       flags |= RADEON_FLAG_UNCACHED;
800       break;
801    default:
802       break;
803    }
804 
805    switch (heap) {
806    case RADEON_HEAP_VRAM_READ_ONLY:
807    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
808    case RADEON_HEAP_GTT_WC_READ_ONLY:
809    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
810    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
811    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
812       flags |= RADEON_FLAG_READ_ONLY;
813       break;
814    default:
815       break;
816    }
817 
818    switch (heap) {
819    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
820    case RADEON_HEAP_VRAM_32BIT:
821    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
822    case RADEON_HEAP_GTT_WC_32BIT:
823    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
824    case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
825       flags |= RADEON_FLAG_32BIT;
826       FALLTHROUGH;
827    default:
828       break;
829    }
830 
831    switch (heap) {
832    case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
833       flags |= RADEON_FLAG_NO_CPU_ACCESS;
834       break;
835    default:
836       break;
837    }
838 
839    return flags;
840 }
841 
842 /* Return the heap index for winsys allocators, or -1 on failure. */
radeon_get_heap_index(enum radeon_bo_domain domain,enum radeon_bo_flag flags)843 static inline int radeon_get_heap_index(enum radeon_bo_domain domain, enum radeon_bo_flag flags)
844 {
845    bool uncached;
846 
847    /* VRAM implies WC (write combining) */
848    assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
849    /* NO_CPU_ACCESS implies VRAM only. */
850    assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
851 
852    /* Resources with interprocess sharing don't use any winsys allocators. */
853    if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
854       return -1;
855 
856    /* Unsupported flags: NO_SUBALLOC, SPARSE. */
857    if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_UNCACHED |
858                  RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT |
859                  RADEON_FLAG_DRIVER_INTERNAL))
860       return -1;
861 
862    switch (domain) {
863    case RADEON_DOMAIN_VRAM:
864       switch (flags & (RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
865       case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
866       case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
867          assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
868          return -1;
869       case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
870          assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
871          return -1;
872       case RADEON_FLAG_NO_CPU_ACCESS:
873          return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
874       case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
875          return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
876       case RADEON_FLAG_READ_ONLY:
877          return RADEON_HEAP_VRAM_READ_ONLY;
878       case RADEON_FLAG_32BIT:
879          return RADEON_HEAP_VRAM_32BIT;
880       case 0:
881          return RADEON_HEAP_VRAM;
882       }
883       break;
884    case RADEON_DOMAIN_GTT:
885       uncached = flags & RADEON_FLAG_UNCACHED;
886 
887       switch (flags & (RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
888       case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
889          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT
890                          : RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
891       case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
892          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY
893                          : RADEON_HEAP_GTT_WC_READ_ONLY;
894       case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
895          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_32BIT
896                          : RADEON_HEAP_GTT_WC_32BIT;
897       case RADEON_FLAG_GTT_WC:
898          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC : RADEON_HEAP_GTT_WC;
899       case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
900       case RADEON_FLAG_READ_ONLY:
901          assert(!"READ_ONLY without WC is disallowed");
902          return -1;
903       case RADEON_FLAG_32BIT:
904          assert(!"32BIT without WC is disallowed");
905          return -1;
906       case 0:
907          return uncached ? RADEON_HEAP_GTT_UNCACHED : RADEON_HEAP_GTT;
908       }
909       break;
910    default:
911       break;
912    }
913    return -1;
914 }
915 
916 #endif
917