• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3  * Copyright © 2015 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  */
27 
28 #ifndef AMDGPU_CS_H
29 #define AMDGPU_CS_H
30 
31 #include "amdgpu_bo.h"
32 #include "util/u_memory.h"
33 #include "drm-uapi/amdgpu_drm.h"
34 
35 struct amdgpu_ctx {
36    struct amdgpu_winsys *ws;
37    amdgpu_context_handle ctx;
38    amdgpu_bo_handle user_fence_bo;
39    uint64_t *user_fence_cpu_address_base;
40    int refcount;
41    unsigned initial_num_total_rejected_cs;
42    unsigned num_rejected_cs;
43 };
44 
45 struct amdgpu_cs_buffer {
46    struct amdgpu_winsys_bo *bo;
47    union {
48       struct {
49          uint32_t priority_usage;
50       } real;
51       struct {
52          uint32_t real_idx; /* index of underlying real BO */
53       } slab;
54    } u;
55    enum radeon_bo_usage usage;
56 };
57 
58 enum ib_type {
59    IB_PREAMBLE,
60    IB_MAIN,
61    IB_PARALLEL_COMPUTE,
62    IB_NUM,
63 };
64 
65 struct amdgpu_ib {
66    struct radeon_cmdbuf base;
67 
68    /* A buffer out of which new IBs are allocated. */
69    struct pb_buffer        *big_ib_buffer;
70    uint8_t                 *ib_mapped;
71    unsigned                used_ib_space;
72 
73    /* The maximum seen size from cs_check_space. If the driver does
74     * cs_check_space and flush, the newly allocated IB should have at least
75     * this size.
76     */
77    unsigned                max_check_space_size;
78 
79    unsigned                max_ib_size;
80    uint32_t                *ptr_ib_size;
81    bool                    ptr_ib_size_inside_ib;
82    enum ib_type            ib_type;
83 };
84 
85 struct amdgpu_fence_list {
86    struct pipe_fence_handle    **list;
87    unsigned                    num;
88    unsigned                    max;
89 };
90 
91 struct amdgpu_cs_context {
92    struct drm_amdgpu_cs_chunk_ib ib[IB_NUM];
93 
94    /* Buffers. */
95    unsigned                    max_real_buffers;
96    unsigned                    num_real_buffers;
97    struct amdgpu_cs_buffer     *real_buffers;
98 
99    unsigned                    num_slab_buffers;
100    unsigned                    max_slab_buffers;
101    struct amdgpu_cs_buffer     *slab_buffers;
102 
103    unsigned                    num_sparse_buffers;
104    unsigned                    max_sparse_buffers;
105    struct amdgpu_cs_buffer     *sparse_buffers;
106 
107    int                         buffer_indices_hashlist[4096];
108 
109    struct amdgpu_winsys_bo     *last_added_bo;
110    unsigned                    last_added_bo_index;
111    unsigned                    last_added_bo_usage;
112    uint32_t                    last_added_bo_priority_usage;
113 
114    struct amdgpu_fence_list    fence_dependencies;
115    struct amdgpu_fence_list    syncobj_dependencies;
116    struct amdgpu_fence_list    syncobj_to_signal;
117 
118    /* The compute IB uses the dependencies above + these: */
119    struct amdgpu_fence_list    compute_fence_dependencies;
120    struct amdgpu_fence_list    compute_start_fence_dependencies;
121 
122    struct pipe_fence_handle    *fence;
123 
124    /* the error returned from cs_flush for non-async submissions */
125    int                         error_code;
126 
127    /* TMZ: will this command be submitted using the TMZ flag */
128    bool secure;
129 };
130 
131 struct amdgpu_cs {
132    struct amdgpu_ib main; /* must be first because this is inherited */
133    struct amdgpu_ib compute_ib; /* optional parallel compute IB */
134    struct amdgpu_ctx *ctx;
135    enum ring_type ring_type;
136    struct drm_amdgpu_cs_chunk_fence fence_chunk;
137 
138    /* We flip between these two CS. While one is being consumed
139     * by the kernel in another thread, the other one is being filled
140     * by the pipe driver. */
141    struct amdgpu_cs_context csc1;
142    struct amdgpu_cs_context csc2;
143    /* The currently-used CS. */
144    struct amdgpu_cs_context *csc;
145    /* The CS being currently-owned by the other thread. */
146    struct amdgpu_cs_context *cst;
147 
148    /* Flush CS. */
149    void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
150    void *flush_data;
151    bool stop_exec_on_failure;
152 
153    struct util_queue_fence flush_completed;
154    struct pipe_fence_handle *next_fence;
155    struct pb_buffer *preamble_ib_bo;
156 };
157 
158 struct amdgpu_fence {
159    struct pipe_reference reference;
160    /* If ctx == NULL, this fence is syncobj-based. */
161    uint32_t syncobj;
162 
163    struct amdgpu_winsys *ws;
164    struct amdgpu_ctx *ctx;  /* submission context */
165    struct amdgpu_cs_fence fence;
166    uint64_t *user_fence_cpu_address;
167 
168    /* If the fence has been submitted. This is unsignalled for deferred fences
169     * (cs->next_fence) and while an IB is still being submitted in the submit
170     * thread. */
171    struct util_queue_fence submitted;
172 
173    volatile int signalled;              /* bool (int for atomicity) */
174 };
175 
amdgpu_fence_is_syncobj(struct amdgpu_fence * fence)176 static inline bool amdgpu_fence_is_syncobj(struct amdgpu_fence *fence)
177 {
178    return fence->ctx == NULL;
179 }
180 
amdgpu_ctx_unref(struct amdgpu_ctx * ctx)181 static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
182 {
183    if (p_atomic_dec_zero(&ctx->refcount)) {
184       amdgpu_cs_ctx_free(ctx->ctx);
185       amdgpu_bo_free(ctx->user_fence_bo);
186       FREE(ctx);
187    }
188 }
189 
amdgpu_fence_reference(struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)190 static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
191                                           struct pipe_fence_handle *src)
192 {
193    struct amdgpu_fence **adst = (struct amdgpu_fence **)dst;
194    struct amdgpu_fence *asrc = (struct amdgpu_fence *)src;
195 
196    if (pipe_reference(&(*adst)->reference, &asrc->reference)) {
197       struct amdgpu_fence *fence = *adst;
198 
199       if (amdgpu_fence_is_syncobj(fence))
200          amdgpu_cs_destroy_syncobj(fence->ws->dev, fence->syncobj);
201       else
202          amdgpu_ctx_unref(fence->ctx);
203 
204       util_queue_fence_destroy(&fence->submitted);
205       FREE(fence);
206    }
207    *adst = asrc;
208 }
209 
210 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
211 
212 static inline struct amdgpu_ib *
amdgpu_ib(struct radeon_cmdbuf * base)213 amdgpu_ib(struct radeon_cmdbuf *base)
214 {
215    return (struct amdgpu_ib *)base;
216 }
217 
218 static inline struct amdgpu_cs *
amdgpu_cs(struct radeon_cmdbuf * base)219 amdgpu_cs(struct radeon_cmdbuf *base)
220 {
221    assert(amdgpu_ib(base)->ib_type == IB_MAIN);
222    return (struct amdgpu_cs*)base;
223 }
224 
225 #define get_container(member_ptr, container_type, container_member) \
226    (container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
227 
228 static inline struct amdgpu_cs *
amdgpu_cs_from_ib(struct amdgpu_ib * ib)229 amdgpu_cs_from_ib(struct amdgpu_ib *ib)
230 {
231    switch (ib->ib_type) {
232    case IB_MAIN:
233       return get_container(ib, struct amdgpu_cs, main);
234    case IB_PARALLEL_COMPUTE:
235       return get_container(ib, struct amdgpu_cs, compute_ib);
236    default:
237       unreachable("bad ib_type");
238    }
239 }
240 
241 static inline bool
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs * cs,struct amdgpu_winsys_bo * bo)242 amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
243                               struct amdgpu_winsys_bo *bo)
244 {
245    int num_refs = bo->num_cs_references;
246    return num_refs == bo->ws->num_cs ||
247          (num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
248 }
249 
250 static inline bool
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs * cs,struct amdgpu_winsys_bo * bo,enum radeon_bo_usage usage)251 amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
252                                          struct amdgpu_winsys_bo *bo,
253                                          enum radeon_bo_usage usage)
254 {
255    int index;
256    struct amdgpu_cs_buffer *buffer;
257 
258    if (!bo->num_cs_references)
259       return false;
260 
261    index = amdgpu_lookup_buffer(cs->csc, bo);
262    if (index == -1)
263       return false;
264 
265    buffer = bo->bo ? &cs->csc->real_buffers[index] :
266             bo->sparse ? &cs->csc->sparse_buffers[index] :
267             &cs->csc->slab_buffers[index];
268 
269    return (buffer->usage & usage) != 0;
270 }
271 
272 static inline bool
amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo * bo)273 amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
274 {
275    return bo->num_cs_references != 0;
276 }
277 
278 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
279                        bool absolute);
280 void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
281                        unsigned num_fences,
282                        struct pipe_fence_handle **fences);
283 void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
284 void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
285 
286 #endif
287