• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3  * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5  * Copyright © 2015 Advanced Micro Devices, Inc.
6  *
7  * SPDX-License-Identifier: MIT
8  */
9 
10 #include "amdgpu_cs.h"
11 
12 #include "util/os_drm.h"
13 #include "util/os_file.h"
14 #include "util/os_misc.h"
15 #include "util/u_cpu_detect.h"
16 #include "util/u_hash_table.h"
17 #include "util/hash_table.h"
18 #include "util/thread_sched.h"
19 #include "util/xmlconfig.h"
20 #include "drm-uapi/amdgpu_drm.h"
21 #include <xf86drm.h>
22 #include <stdio.h>
23 #include <sys/stat.h>
24 #include <fcntl.h>
25 #include "sid.h"
26 
27 static struct hash_table *dev_tab = NULL;
28 static simple_mtx_t dev_tab_mutex = SIMPLE_MTX_INITIALIZER;
29 
30 #if MESA_DEBUG
31 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
32 #endif
33 
34 /* Helper function to do the ioctls needed for setup and init. */
do_winsys_init(struct amdgpu_winsys * aws,const struct pipe_screen_config * config,int fd)35 static bool do_winsys_init(struct amdgpu_winsys *aws,
36                            const struct pipe_screen_config *config,
37                            int fd)
38 {
39    if (!ac_query_gpu_info(fd, aws->dev, &aws->info, false))
40       goto fail;
41 
42    aws->addrlib = ac_addrlib_create(&aws->info, &aws->info.max_alignment);
43    if (!aws->addrlib) {
44       fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
45       goto fail;
46    }
47 
48    aws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL ||
49                   strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL;
50    aws->noop_cs = aws->info.family_overridden || debug_get_bool_option("RADEON_NOOP", false);
51 #if MESA_DEBUG
52    aws->debug_all_bos = debug_get_option_all_bos();
53 #endif
54    aws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL ||
55                       strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL ||
56                       strstr(debug_get_option("AMD_DEBUG", ""), "sqtt") != NULL;
57    aws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
58                               driQueryOptionb(config->options, "radeonsi_zerovram");
59    aws->info.use_userq = debug_get_bool_option("AMD_USERQ", false);
60 
61    for (unsigned i = 0; i < ARRAY_SIZE(aws->queues); i++)
62       simple_mtx_init(&aws->queues[i].userq.lock, mtx_plain);
63 
64    /* TODO: Enable this once the kernel handles it efficiently. */
65    if (aws->info.has_dedicated_vram && !aws->info.use_userq)
66       aws->info.has_local_buffers = false;
67 
68    return true;
69 
70 fail:
71    ac_drm_device_deinitialize(aws->dev);
72    aws->dev = NULL;
73    return false;
74 }
75 
do_winsys_deinit(struct amdgpu_winsys * aws)76 static void do_winsys_deinit(struct amdgpu_winsys *aws)
77 {
78    if (aws->reserve_vmid)
79       ac_drm_vm_unreserve_vmid(aws->dev, 0);
80 
81    for (unsigned i = 0; i < ARRAY_SIZE(aws->queues); i++) {
82       for (unsigned j = 0; j < ARRAY_SIZE(aws->queues[i].fences); j++)
83          amdgpu_fence_reference(&aws->queues[i].fences[j], NULL);
84 
85       amdgpu_userq_deinit(aws, &aws->queues[i].userq);
86       simple_mtx_destroy(&aws->queues[i].userq.lock);
87 
88       amdgpu_ctx_reference(&aws->queues[i].last_ctx, NULL);
89    }
90 
91    if (util_queue_is_initialized(&aws->cs_queue))
92       util_queue_destroy(&aws->cs_queue);
93 
94    if (aws->bo_slabs.groups)
95       pb_slabs_deinit(&aws->bo_slabs);
96    pb_cache_deinit(&aws->bo_cache);
97    _mesa_hash_table_destroy(aws->bo_export_table, NULL);
98    simple_mtx_destroy(&aws->sws_list_lock);
99 #if MESA_DEBUG
100    simple_mtx_destroy(&aws->global_bo_list_lock);
101 #endif
102    simple_mtx_destroy(&aws->bo_export_table_lock);
103 
104    ac_addrlib_destroy(aws->addrlib);
105    ac_drm_device_deinitialize(aws->dev);
106    ac_drm_cs_destroy_syncobj(aws->fd, aws->vm_timeline_syncobj);
107    simple_mtx_destroy(&aws->bo_fence_lock);
108 
109    FREE(aws);
110 }
111 
amdgpu_winsys_destroy_locked(struct radeon_winsys * rws,bool locked)112 static void amdgpu_winsys_destroy_locked(struct radeon_winsys *rws, bool locked)
113 {
114    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
115    struct amdgpu_winsys *aws = sws->aws;
116    bool destroy;
117 
118    /* When the reference counter drops to zero, remove the device pointer
119     * from the table.
120     * This must happen while the mutex is locked, so that
121     * amdgpu_winsys_create in another thread doesn't get the winsys
122     * from the table when the counter drops to 0.
123     */
124    if (!locked)
125       simple_mtx_lock(&dev_tab_mutex);
126 
127    destroy = pipe_reference(&aws->reference, NULL);
128    if (destroy && dev_tab) {
129       _mesa_hash_table_remove_key(dev_tab,
130                                   (void *)ac_drm_device_get_cookie(aws->dev));
131       if (_mesa_hash_table_num_entries(dev_tab) == 0) {
132          _mesa_hash_table_destroy(dev_tab, NULL);
133          dev_tab = NULL;
134       }
135    }
136 
137    if (!locked)
138       simple_mtx_unlock(&dev_tab_mutex);
139 
140    if (sws->fd != aws->fd)
141       close(sws->fd);
142 
143    if (destroy)
144       do_winsys_deinit(aws);
145 
146    FREE(rws);
147 }
148 
amdgpu_winsys_destroy(struct radeon_winsys * rws)149 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
150 {
151    amdgpu_winsys_destroy_locked(rws, false);
152 }
153 
amdgpu_winsys_query_info(struct radeon_winsys * rws,struct radeon_info * info)154 static void amdgpu_winsys_query_info(struct radeon_winsys *rws, struct radeon_info *info)
155 {
156    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
157 
158    *info = aws->info;
159 }
160 
amdgpu_cs_request_feature(struct radeon_cmdbuf * rcs,enum radeon_feature_id fid,bool enable)161 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
162                                       enum radeon_feature_id fid,
163                                       bool enable)
164 {
165    return false;
166 }
167 
amdgpu_query_value(struct radeon_winsys * rws,enum radeon_value_id value)168 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
169                                    enum radeon_value_id value)
170 {
171    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
172    struct amdgpu_heap_info heap = {0};
173    uint64_t retval = 0;
174 
175    switch (value) {
176    case RADEON_REQUESTED_VRAM_MEMORY:
177       return aws->allocated_vram;
178    case RADEON_REQUESTED_GTT_MEMORY:
179       return aws->allocated_gtt;
180    case RADEON_MAPPED_VRAM:
181       return aws->mapped_vram;
182    case RADEON_MAPPED_GTT:
183       return aws->mapped_gtt;
184    case RADEON_SLAB_WASTED_VRAM:
185       return aws->slab_wasted_vram;
186    case RADEON_SLAB_WASTED_GTT:
187       return aws->slab_wasted_gtt;
188    case RADEON_BUFFER_WAIT_TIME_NS:
189       return aws->buffer_wait_time;
190    case RADEON_NUM_MAPPED_BUFFERS:
191       return aws->num_mapped_buffers;
192    case RADEON_TIMESTAMP:
193       ac_drm_query_info(aws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
194       return retval;
195    case RADEON_NUM_GFX_IBS:
196       return aws->num_gfx_IBs;
197    case RADEON_NUM_SDMA_IBS:
198       return aws->num_sdma_IBs;
199    case RADEON_GFX_BO_LIST_COUNTER:
200       return aws->gfx_bo_list_counter;
201    case RADEON_GFX_IB_SIZE_COUNTER:
202       return aws->gfx_ib_size_counter;
203    case RADEON_NUM_BYTES_MOVED:
204       ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
205       return retval;
206    case RADEON_NUM_EVICTIONS:
207       ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
208       return retval;
209    case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
210       ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
211       return retval;
212    case RADEON_VRAM_USAGE:
213       ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
214       return heap.heap_usage;
215    case RADEON_VRAM_VIS_USAGE:
216       ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM,
217                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
218       return heap.heap_usage;
219    case RADEON_GTT_USAGE:
220       ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
221       return heap.heap_usage;
222    case RADEON_GPU_TEMPERATURE:
223       ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
224       return retval;
225    case RADEON_CURRENT_SCLK:
226       ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
227       return retval;
228    case RADEON_CURRENT_MCLK:
229       ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
230       return retval;
231    case RADEON_CS_THREAD_TIME:
232       return util_queue_get_thread_time_nano(&aws->cs_queue, 0);
233    }
234    return 0;
235 }
236 
amdgpu_read_registers(struct radeon_winsys * rws,unsigned reg_offset,unsigned num_registers,uint32_t * out)237 static bool amdgpu_read_registers(struct radeon_winsys *rws,
238                                   unsigned reg_offset,
239                                   unsigned num_registers, uint32_t *out)
240 {
241    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
242 
243    return ac_drm_read_mm_registers(aws->dev, reg_offset / 4, num_registers,
244                                    0xffffffff, 0, out) == 0;
245 }
246 
amdgpu_winsys_unref(struct radeon_winsys * rws)247 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
248 {
249    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
250    struct amdgpu_winsys *aws = sws->aws;
251    bool ret;
252 
253    simple_mtx_lock(&aws->sws_list_lock);
254 
255    ret = pipe_reference(&sws->reference, NULL);
256    if (ret) {
257       struct amdgpu_screen_winsys **sws_iter;
258       struct amdgpu_winsys *aws = sws->aws;
259 
260       /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list, so that
261        * amdgpu_winsys_create can't re-use it anymore
262        */
263       for (sws_iter = &aws->sws_list; *sws_iter; sws_iter = &(*sws_iter)->next) {
264          if (*sws_iter == sws) {
265             *sws_iter = sws->next;
266             break;
267          }
268       }
269    }
270 
271    simple_mtx_unlock(&aws->sws_list_lock);
272 
273    if (ret && sws->kms_handles) {
274       struct drm_gem_close args;
275 
276       hash_table_foreach(sws->kms_handles, entry) {
277          args.handle = (uintptr_t)entry->data;
278          drm_ioctl(sws->fd, DRM_IOCTL_GEM_CLOSE, &args);
279       }
280       _mesa_hash_table_destroy(sws->kms_handles, NULL);
281    }
282 
283    return ret;
284 }
285 
amdgpu_pin_threads_to_L3_cache(struct radeon_winsys * rws,unsigned cpu)286 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
287                                            unsigned cpu)
288 {
289    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
290 
291    util_thread_sched_apply_policy(aws->cs_queue.threads[0],
292                                   UTIL_THREAD_DRIVER_SUBMIT, cpu, NULL);
293 }
294 
kms_handle_hash(const void * key)295 static uint32_t kms_handle_hash(const void *key)
296 {
297    const struct amdgpu_bo_real *bo = key;
298 
299    return bo->kms_handle;
300 }
301 
kms_handle_equals(const void * a,const void * b)302 static bool kms_handle_equals(const void *a, const void *b)
303 {
304    return a == b;
305 }
306 
amdgpu_cs_is_secure(struct radeon_cmdbuf * rcs)307 static bool amdgpu_cs_is_secure(struct radeon_cmdbuf *rcs)
308 {
309    struct amdgpu_cs *cs = amdgpu_cs(rcs);
310    return cs->csc->secure;
311 }
312 
313 static uint32_t
radeon_to_amdgpu_pstate(enum radeon_ctx_pstate pstate)314 radeon_to_amdgpu_pstate(enum radeon_ctx_pstate pstate)
315 {
316    switch (pstate) {
317    case RADEON_CTX_PSTATE_NONE:
318       return AMDGPU_CTX_STABLE_PSTATE_NONE;
319    case RADEON_CTX_PSTATE_STANDARD:
320       return AMDGPU_CTX_STABLE_PSTATE_STANDARD;
321    case RADEON_CTX_PSTATE_MIN_SCLK:
322       return AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
323    case RADEON_CTX_PSTATE_MIN_MCLK:
324       return AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
325    case RADEON_CTX_PSTATE_PEAK:
326       return AMDGPU_CTX_STABLE_PSTATE_PEAK;
327    default:
328       unreachable("Invalid pstate");
329    }
330 }
331 
332 static bool
amdgpu_cs_set_pstate(struct radeon_cmdbuf * rcs,enum radeon_ctx_pstate pstate)333 amdgpu_cs_set_pstate(struct radeon_cmdbuf *rcs, enum radeon_ctx_pstate pstate)
334 {
335    struct amdgpu_cs *cs = amdgpu_cs(rcs);
336 
337    if (!cs->aws->info.has_stable_pstate)
338       return false;
339 
340    uint32_t amdgpu_pstate = radeon_to_amdgpu_pstate(pstate);
341    return ac_drm_cs_ctx_stable_pstate(cs->aws->dev, cs->ctx->ctx_handle,
342       AMDGPU_CTX_OP_SET_STABLE_PSTATE, amdgpu_pstate, NULL) == 0;
343 }
344 
345 static bool
are_file_descriptions_equal(int fd1,int fd2)346 are_file_descriptions_equal(int fd1, int fd2)
347 {
348    int r = os_same_file_description(fd1, fd2);
349 
350    if (r == 0)
351       return true;
352 
353    if (r < 0) {
354       static bool logged;
355 
356       if (!logged) {
357          os_log_message("amdgpu: os_same_file_description couldn't "
358                         "determine if two DRM fds reference the same "
359                         "file description.\n"
360                         "If they do, bad things may happen!\n");
361          logged = true;
362       }
363    }
364    return false;
365 }
366 
367 static int
amdgpu_drm_winsys_get_fd(struct radeon_winsys * rws)368 amdgpu_drm_winsys_get_fd(struct radeon_winsys *rws)
369 {
370    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
371 
372    return sws->fd;
373 }
374 
375 PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd,const struct pipe_screen_config * config,radeon_screen_create_t screen_create,bool is_virtio)376 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
377 		     radeon_screen_create_t screen_create, bool is_virtio)
378 {
379    struct amdgpu_screen_winsys *sws;
380    struct amdgpu_winsys *aws;
381    ac_drm_device *dev;
382    uint32_t drm_major, drm_minor;
383    int r;
384 
385    sws = CALLOC_STRUCT(amdgpu_screen_winsys);
386    if (!sws)
387       return NULL;
388 
389    pipe_reference_init(&sws->reference, 1);
390    sws->fd = -1;
391 
392    /* Look up the winsys from the dev table. */
393    simple_mtx_lock(&dev_tab_mutex);
394    if (!dev_tab)
395       dev_tab = util_hash_table_create_ptr_keys();
396 
397    /* Initialize the amdgpu device. This should always return the same pointer
398     * for the same fd. */
399    r = ac_drm_device_initialize(fd, is_virtio, &drm_major, &drm_minor, &dev);
400    if (r) {
401       fprintf(stderr, "amdgpu: amd%s_device_initialize failed.\n",
402          is_virtio ? "vgpu" : "gpu");
403       goto fail;
404    }
405 
406    /* Lookup a winsys if we have already created one for this device. */
407    aws = util_hash_table_get(dev_tab, (void *)ac_drm_device_get_cookie(dev));
408    if (aws) {
409       struct amdgpu_screen_winsys *sws_iter;
410 
411       /* Release the device handle, because we don't need it anymore.
412        * This function is returning an existing winsys instance, which
413        * has its own device handle.
414        */
415       ac_drm_device_deinitialize((void*)dev);
416 
417       simple_mtx_lock(&aws->sws_list_lock);
418       for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
419          if (are_file_descriptions_equal(sws_iter->fd, fd)) {
420             FREE(sws);
421             sws = sws_iter;
422             pipe_reference(NULL, &sws->reference);
423             simple_mtx_unlock(&aws->sws_list_lock);
424             goto unlock;
425          }
426       }
427       simple_mtx_unlock(&aws->sws_list_lock);
428 
429       sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
430                                                 kms_handle_equals);
431       if (!sws->kms_handles)
432          goto fail;
433 
434       pipe_reference(NULL, &aws->reference);
435    } else {
436       /* Create a new winsys. */
437       aws = CALLOC_STRUCT(amdgpu_winsys);
438       if (!aws)
439          goto fail;
440 
441       aws->dev = dev;
442 
443       /* The device fd might be different from the one we passed because of
444        * libdrm_amdgpu device dedup logic. This can happen if radv is initialized
445        * first.
446        * Get the correct fd or the buffer sharing will not work (see #3424).
447        */
448       aws->fd = ac_drm_device_get_fd(dev);
449       if (!are_file_descriptions_equal(aws->fd, fd)) {
450          sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
451                                                    kms_handle_equals);
452          if (!sws->kms_handles)
453             goto fail;
454       } else {
455          sws->fd = aws->fd;
456       }
457       aws->info.drm_major = drm_major;
458       aws->info.drm_minor = drm_minor;
459 
460       if (ac_drm_cs_create_syncobj(aws->fd, &aws->vm_timeline_syncobj))
461          goto fail_alloc;
462       simple_mtx_init(&aws->vm_ioctl_lock, mtx_plain);
463 
464       aws->info.is_virtio = is_virtio;
465 
466       /* Only aws and buffer functions are used. */
467       aws->dummy_sws.aws = aws;
468       amdgpu_bo_init_functions(&aws->dummy_sws);
469 
470       if (!do_winsys_init(aws, config, fd))
471          goto fail_alloc;
472 
473       /* Create managers. */
474       pb_cache_init(&aws->bo_cache, RADEON_NUM_HEAPS,
475                     500000, aws->check_vm ? 1.0f : 1.5f, 0,
476                     ((uint64_t)aws->info.vram_size_kb + aws->info.gart_size_kb) * 1024 / 8,
477                     offsetof(struct amdgpu_bo_real_reusable, cache_entry), aws,
478                     /* Cast to void* because one of the function parameters
479                      * is a struct pointer instead of void*. */
480                     (void*)amdgpu_bo_destroy, (void*)amdgpu_bo_can_reclaim);
481 
482       if (!pb_slabs_init(&aws->bo_slabs,
483                          8,  /* min slab entry size: 256 bytes */
484                          20, /* max slab entry size: 1 MB (slab size = 2 MB) */
485                          RADEON_NUM_HEAPS, true,
486                          aws,
487                          amdgpu_bo_can_reclaim_slab,
488                          amdgpu_bo_slab_alloc,
489                          /* Cast to void* because one of the function parameters
490                           * is a struct pointer instead of void*. */
491                          (void*)amdgpu_bo_slab_free)) {
492          amdgpu_winsys_destroy_locked(&sws->base, true);
493          simple_mtx_unlock(&dev_tab_mutex);
494          return NULL;
495       }
496 
497       aws->info.min_alloc_size = 1 << aws->bo_slabs.min_order;
498 
499       /* init reference */
500       pipe_reference_init(&aws->reference, 1);
501 #if MESA_DEBUG
502       list_inithead(&aws->global_bo_list);
503 #endif
504       aws->bo_export_table = util_hash_table_create_ptr_keys();
505 
506       (void) simple_mtx_init(&aws->sws_list_lock, mtx_plain);
507 #if MESA_DEBUG
508       (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
509 #endif
510       (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
511       (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
512 
513       if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
514                            UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL)) {
515          amdgpu_winsys_destroy_locked(&sws->base, true);
516          simple_mtx_unlock(&dev_tab_mutex);
517          return NULL;
518       }
519 
520       _mesa_hash_table_insert(dev_tab, (void *)ac_drm_device_get_cookie(dev), aws);
521 
522       if (aws->reserve_vmid) {
523          r = ac_drm_vm_reserve_vmid(aws->dev, 0);
524          if (r) {
525             amdgpu_winsys_destroy_locked(&sws->base, true);
526             simple_mtx_unlock(&dev_tab_mutex);
527             return NULL;
528          }
529       }
530    }
531 
532    if (sws->fd < 0)
533       sws->fd = os_dupfd_cloexec(fd);
534 
535    sws->aws = aws;
536 
537    /* Set functions. */
538    sws->base.unref = amdgpu_winsys_unref;
539    sws->base.destroy = amdgpu_winsys_destroy;
540    sws->base.get_fd = amdgpu_drm_winsys_get_fd;
541    sws->base.query_info = amdgpu_winsys_query_info;
542    sws->base.cs_request_feature = amdgpu_cs_request_feature;
543    sws->base.query_value = amdgpu_query_value;
544    sws->base.read_registers = amdgpu_read_registers;
545    sws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
546    sws->base.cs_is_secure = amdgpu_cs_is_secure;
547    sws->base.cs_set_pstate = amdgpu_cs_set_pstate;
548 
549    amdgpu_bo_init_functions(sws);
550    amdgpu_cs_init_functions(sws);
551    amdgpu_surface_init_functions(sws);
552 
553    simple_mtx_lock(&aws->sws_list_lock);
554    sws->next = aws->sws_list;
555    aws->sws_list = sws;
556    simple_mtx_unlock(&aws->sws_list_lock);
557 
558    /* Create the screen at the end. The winsys must be initialized
559     * completely.
560     *
561     * Alternatively, we could create the screen based on "ws->gen"
562     * and link all drivers into one binary blob. */
563    sws->base.screen = screen_create(&sws->base, config);
564    if (!sws->base.screen) {
565       amdgpu_winsys_destroy_locked(&sws->base, true);
566       simple_mtx_unlock(&dev_tab_mutex);
567       return NULL;
568    }
569 
570 unlock:
571    /* We must unlock the mutex once the winsys is fully initialized, so that
572     * other threads attempting to create the winsys from the same fd will
573     * get a fully initialized winsys and not just half-way initialized. */
574    simple_mtx_unlock(&dev_tab_mutex);
575 
576    return &sws->base;
577 
578 fail_alloc:
579    FREE(aws);
580 fail:
581    if (sws->kms_handles)
582       _mesa_hash_table_destroy(sws->kms_handles, NULL);
583    FREE(sws);
584    simple_mtx_unlock(&dev_tab_mutex);
585    return NULL;
586 }
587