• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3  * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5  * Copyright © 2015 Advanced Micro Devices, Inc.
6  *
7  * SPDX-License-Identifier: MIT
8  */
9 
10 #include "amdgpu_cs.h"
11 
12 #include "util/os_drm.h"
13 #include "util/os_file.h"
14 #include "util/os_misc.h"
15 #include "util/u_cpu_detect.h"
16 #include "util/u_hash_table.h"
17 #include "util/hash_table.h"
18 #include "util/thread_sched.h"
19 #include "util/xmlconfig.h"
20 #include "drm-uapi/amdgpu_drm.h"
21 #include <xf86drm.h>
22 #include <stdio.h>
23 #include <sys/stat.h>
24 #include <fcntl.h>
25 #include "sid.h"
26 
27 static struct hash_table *dev_tab = NULL;
28 static simple_mtx_t dev_tab_mutex = SIMPLE_MTX_INITIALIZER;
29 
30 #if MESA_DEBUG
31 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
32 #endif
33 
34 /* Helper function to do the ioctls needed for setup and init. */
do_winsys_init(struct amdgpu_winsys * aws,const struct pipe_screen_config * config,int fd)35 static bool do_winsys_init(struct amdgpu_winsys *aws,
36                            const struct pipe_screen_config *config,
37                            int fd)
38 {
39    if (!ac_query_gpu_info(fd, aws->dev, &aws->info, false))
40       goto fail;
41 
42    aws->addrlib = ac_addrlib_create(&aws->info, &aws->info.max_alignment);
43    if (!aws->addrlib) {
44       fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
45       goto fail;
46    }
47 
48    aws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL ||
49                   strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL;
50    aws->noop_cs = aws->info.family_overridden || debug_get_bool_option("RADEON_NOOP", false);
51 #if MESA_DEBUG
52    aws->debug_all_bos = debug_get_option_all_bos();
53 #endif
54    aws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL ||
55                       strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL ||
56                       strstr(debug_get_option("AMD_DEBUG", ""), "sqtt") != NULL;
57    aws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
58                               driQueryOptionb(config->options, "radeonsi_zerovram");
59    aws->info.use_userq = debug_get_bool_option("AMD_USERQ", false);
60 
61    for (unsigned i = 0; i < ARRAY_SIZE(aws->queues); i++)
62       simple_mtx_init(&aws->queues[i].userq.lock, mtx_plain);
63 
64    /* TODO: Enable this once the kernel handles it efficiently. */
65    if (aws->info.has_dedicated_vram && !aws->info.use_userq)
66       aws->info.has_local_buffers = false;
67 
68    return true;
69 
70 fail:
71    ac_drm_device_deinitialize(aws->dev);
72    aws->dev = NULL;
73    return false;
74 }
75 
do_winsys_deinit(struct amdgpu_winsys * aws)76 static void do_winsys_deinit(struct amdgpu_winsys *aws)
77 {
78    if (aws->reserve_vmid)
79       ac_drm_vm_unreserve_vmid(aws->dev, 0);
80 
81    for (unsigned i = 0; i < ARRAY_SIZE(aws->queues); i++) {
82       for (unsigned j = 0; j < ARRAY_SIZE(aws->queues[i].fences); j++)
83          amdgpu_fence_reference(&aws->queues[i].fences[j], NULL);
84 
85       amdgpu_userq_deinit(aws, &aws->queues[i].userq);
86       simple_mtx_destroy(&aws->queues[i].userq.lock);
87 
88       amdgpu_ctx_reference(&aws->queues[i].last_ctx, NULL);
89    }
90 
91    if (util_queue_is_initialized(&aws->cs_queue))
92       util_queue_destroy(&aws->cs_queue);
93 
94    if (aws->bo_slabs.groups)
95       pb_slabs_deinit(&aws->bo_slabs);
96    pb_cache_deinit(&aws->bo_cache);
97    _mesa_hash_table_destroy(aws->bo_export_table, NULL);
98    simple_mtx_destroy(&aws->sws_list_lock);
99 #if MESA_DEBUG
100    simple_mtx_destroy(&aws->global_bo_list_lock);
101 #endif
102    simple_mtx_destroy(&aws->bo_export_table_lock);
103 
104    ac_addrlib_destroy(aws->addrlib);
105    ac_drm_device_deinitialize(aws->dev);
106    ac_drm_cs_destroy_syncobj(aws->fd, aws->vm_timeline_syncobj);
107    simple_mtx_destroy(&aws->bo_fence_lock);
108 
109    FREE(aws);
110 }
111 
amdgpu_winsys_destroy_locked(struct radeon_winsys * rws,bool locked)112 static void amdgpu_winsys_destroy_locked(struct radeon_winsys *rws, bool locked)
113 {
114    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
115    struct amdgpu_winsys *aws = sws->aws;
116    bool destroy;
117 
118    /* When the reference counter drops to zero, remove the device pointer
119     * from the table.
120     * This must happen while the mutex is locked, so that
121     * amdgpu_winsys_create in another thread doesn't get the winsys
122     * from the table when the counter drops to 0.
123     */
124    if (!locked)
125       simple_mtx_lock(&dev_tab_mutex);
126 
127    destroy = pipe_reference(&aws->reference, NULL);
128    if (destroy && dev_tab) {
129       _mesa_hash_table_remove_key(dev_tab, aws->dev);
130       if (_mesa_hash_table_num_entries(dev_tab) == 0) {
131          _mesa_hash_table_destroy(dev_tab, NULL);
132          dev_tab = NULL;
133       }
134    }
135 
136    if (!locked)
137       simple_mtx_unlock(&dev_tab_mutex);
138 
139    if (sws->fd != aws->fd)
140       close(sws->fd);
141 
142    if (destroy)
143       do_winsys_deinit(aws);
144 
145    FREE(rws);
146 }
147 
amdgpu_winsys_destroy(struct radeon_winsys * rws)148 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
149 {
150    amdgpu_winsys_destroy_locked(rws, false);
151 }
152 
amdgpu_winsys_query_info(struct radeon_winsys * rws,struct radeon_info * info)153 static void amdgpu_winsys_query_info(struct radeon_winsys *rws, struct radeon_info *info)
154 {
155    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
156 
157    *info = aws->info;
158 }
159 
amdgpu_cs_request_feature(struct radeon_cmdbuf * rcs,enum radeon_feature_id fid,bool enable)160 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
161                                       enum radeon_feature_id fid,
162                                       bool enable)
163 {
164    return false;
165 }
166 
amdgpu_query_value(struct radeon_winsys * rws,enum radeon_value_id value)167 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
168                                    enum radeon_value_id value)
169 {
170    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
171    struct amdgpu_heap_info heap = {0};
172    uint64_t retval = 0;
173 
174    switch (value) {
175    case RADEON_REQUESTED_VRAM_MEMORY:
176       return aws->allocated_vram;
177    case RADEON_REQUESTED_GTT_MEMORY:
178       return aws->allocated_gtt;
179    case RADEON_MAPPED_VRAM:
180       return aws->mapped_vram;
181    case RADEON_MAPPED_GTT:
182       return aws->mapped_gtt;
183    case RADEON_SLAB_WASTED_VRAM:
184       return aws->slab_wasted_vram;
185    case RADEON_SLAB_WASTED_GTT:
186       return aws->slab_wasted_gtt;
187    case RADEON_BUFFER_WAIT_TIME_NS:
188       return aws->buffer_wait_time;
189    case RADEON_NUM_MAPPED_BUFFERS:
190       return aws->num_mapped_buffers;
191    case RADEON_TIMESTAMP:
192       ac_drm_query_info(aws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
193       return retval;
194    case RADEON_NUM_GFX_IBS:
195       return aws->num_gfx_IBs;
196    case RADEON_NUM_SDMA_IBS:
197       return aws->num_sdma_IBs;
198    case RADEON_GFX_BO_LIST_COUNTER:
199       return aws->gfx_bo_list_counter;
200    case RADEON_GFX_IB_SIZE_COUNTER:
201       return aws->gfx_ib_size_counter;
202    case RADEON_NUM_BYTES_MOVED:
203       ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
204       return retval;
205    case RADEON_NUM_EVICTIONS:
206       ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
207       return retval;
208    case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
209       ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
210       return retval;
211    case RADEON_VRAM_USAGE:
212       ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
213       return heap.heap_usage;
214    case RADEON_VRAM_VIS_USAGE:
215       ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM,
216                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
217       return heap.heap_usage;
218    case RADEON_GTT_USAGE:
219       ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
220       return heap.heap_usage;
221    case RADEON_GPU_TEMPERATURE:
222       ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
223       return retval;
224    case RADEON_CURRENT_SCLK:
225       ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
226       return retval;
227    case RADEON_CURRENT_MCLK:
228       ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
229       return retval;
230    case RADEON_CS_THREAD_TIME:
231       return util_queue_get_thread_time_nano(&aws->cs_queue, 0);
232    }
233    return 0;
234 }
235 
amdgpu_read_registers(struct radeon_winsys * rws,unsigned reg_offset,unsigned num_registers,uint32_t * out)236 static bool amdgpu_read_registers(struct radeon_winsys *rws,
237                                   unsigned reg_offset,
238                                   unsigned num_registers, uint32_t *out)
239 {
240    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
241 
242    return ac_drm_read_mm_registers(aws->dev, reg_offset / 4, num_registers,
243                                    0xffffffff, 0, out) == 0;
244 }
245 
amdgpu_winsys_unref(struct radeon_winsys * rws)246 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
247 {
248    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
249    struct amdgpu_winsys *aws = sws->aws;
250    bool ret;
251 
252    simple_mtx_lock(&aws->sws_list_lock);
253 
254    ret = pipe_reference(&sws->reference, NULL);
255    if (ret) {
256       struct amdgpu_screen_winsys **sws_iter;
257       struct amdgpu_winsys *aws = sws->aws;
258 
259       /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list, so that
260        * amdgpu_winsys_create can't re-use it anymore
261        */
262       for (sws_iter = &aws->sws_list; *sws_iter; sws_iter = &(*sws_iter)->next) {
263          if (*sws_iter == sws) {
264             *sws_iter = sws->next;
265             break;
266          }
267       }
268    }
269 
270    simple_mtx_unlock(&aws->sws_list_lock);
271 
272    if (ret && sws->kms_handles) {
273       struct drm_gem_close args;
274 
275       hash_table_foreach(sws->kms_handles, entry) {
276          args.handle = (uintptr_t)entry->data;
277          drm_ioctl(sws->fd, DRM_IOCTL_GEM_CLOSE, &args);
278       }
279       _mesa_hash_table_destroy(sws->kms_handles, NULL);
280    }
281 
282    return ret;
283 }
284 
amdgpu_pin_threads_to_L3_cache(struct radeon_winsys * rws,unsigned cpu)285 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
286                                            unsigned cpu)
287 {
288    struct amdgpu_winsys *aws = amdgpu_winsys(rws);
289 
290    util_thread_sched_apply_policy(aws->cs_queue.threads[0],
291                                   UTIL_THREAD_DRIVER_SUBMIT, cpu, NULL);
292 }
293 
kms_handle_hash(const void * key)294 static uint32_t kms_handle_hash(const void *key)
295 {
296    const struct amdgpu_bo_real *bo = key;
297 
298    return bo->kms_handle;
299 }
300 
kms_handle_equals(const void * a,const void * b)301 static bool kms_handle_equals(const void *a, const void *b)
302 {
303    return a == b;
304 }
305 
amdgpu_cs_is_secure(struct radeon_cmdbuf * rcs)306 static bool amdgpu_cs_is_secure(struct radeon_cmdbuf *rcs)
307 {
308    struct amdgpu_cs *cs = amdgpu_cs(rcs);
309    return cs->csc->secure;
310 }
311 
312 static uint32_t
radeon_to_amdgpu_pstate(enum radeon_ctx_pstate pstate)313 radeon_to_amdgpu_pstate(enum radeon_ctx_pstate pstate)
314 {
315    switch (pstate) {
316    case RADEON_CTX_PSTATE_NONE:
317       return AMDGPU_CTX_STABLE_PSTATE_NONE;
318    case RADEON_CTX_PSTATE_STANDARD:
319       return AMDGPU_CTX_STABLE_PSTATE_STANDARD;
320    case RADEON_CTX_PSTATE_MIN_SCLK:
321       return AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
322    case RADEON_CTX_PSTATE_MIN_MCLK:
323       return AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
324    case RADEON_CTX_PSTATE_PEAK:
325       return AMDGPU_CTX_STABLE_PSTATE_PEAK;
326    default:
327       unreachable("Invalid pstate");
328    }
329 }
330 
331 static bool
amdgpu_cs_set_pstate(struct radeon_cmdbuf * rcs,enum radeon_ctx_pstate pstate)332 amdgpu_cs_set_pstate(struct radeon_cmdbuf *rcs, enum radeon_ctx_pstate pstate)
333 {
334    struct amdgpu_cs *cs = amdgpu_cs(rcs);
335 
336    if (!cs->aws->info.has_stable_pstate)
337       return false;
338 
339    uint32_t amdgpu_pstate = radeon_to_amdgpu_pstate(pstate);
340    return ac_drm_cs_ctx_stable_pstate(cs->aws->dev, cs->ctx->ctx_handle,
341       AMDGPU_CTX_OP_SET_STABLE_PSTATE, amdgpu_pstate, NULL) == 0;
342 }
343 
344 static bool
are_file_descriptions_equal(int fd1,int fd2)345 are_file_descriptions_equal(int fd1, int fd2)
346 {
347    int r = os_same_file_description(fd1, fd2);
348 
349    if (r == 0)
350       return true;
351 
352    if (r < 0) {
353       static bool logged;
354 
355       if (!logged) {
356          os_log_message("amdgpu: os_same_file_description couldn't "
357                         "determine if two DRM fds reference the same "
358                         "file description.\n"
359                         "If they do, bad things may happen!\n");
360          logged = true;
361       }
362    }
363    return false;
364 }
365 
366 static int
amdgpu_drm_winsys_get_fd(struct radeon_winsys * rws)367 amdgpu_drm_winsys_get_fd(struct radeon_winsys *rws)
368 {
369    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
370 
371    return sws->fd;
372 }
373 
374 PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd,const struct pipe_screen_config * config,radeon_screen_create_t screen_create,bool is_virtio)375 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
376 		     radeon_screen_create_t screen_create, bool is_virtio)
377 {
378    struct amdgpu_screen_winsys *sws;
379    struct amdgpu_winsys *aws;
380    ac_drm_device *dev;
381    uint32_t drm_major, drm_minor;
382    int r;
383 
384    sws = CALLOC_STRUCT(amdgpu_screen_winsys);
385    if (!sws)
386       return NULL;
387 
388    pipe_reference_init(&sws->reference, 1);
389    sws->fd = -1;
390 
391    /* Look up the winsys from the dev table. */
392    simple_mtx_lock(&dev_tab_mutex);
393    if (!dev_tab)
394       dev_tab = util_hash_table_create_ptr_keys();
395 
396    /* Initialize the amdgpu device. This should always return the same pointer
397     * for the same fd. */
398    r = ac_drm_device_initialize(fd, is_virtio, &drm_major, &drm_minor, &dev);
399    if (r) {
400       fprintf(stderr, "amdgpu: amd%s_device_initialize failed.\n",
401          is_virtio ? "vgpu" : "gpu");
402       goto fail;
403    }
404 
405    /* Lookup a winsys if we have already created one for this device. */
406    aws = util_hash_table_get(dev_tab, dev);
407    if (aws) {
408       struct amdgpu_screen_winsys *sws_iter;
409 
410       /* Release the device handle, because we don't need it anymore.
411        * This function is returning an existing winsys instance, which
412        * has its own device handle.
413        */
414       ac_drm_device_deinitialize((void*)dev);
415 
416       simple_mtx_lock(&aws->sws_list_lock);
417       for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
418          if (are_file_descriptions_equal(sws_iter->fd, fd)) {
419             FREE(sws);
420             sws = sws_iter;
421             pipe_reference(NULL, &sws->reference);
422             simple_mtx_unlock(&aws->sws_list_lock);
423             goto unlock;
424          }
425       }
426       simple_mtx_unlock(&aws->sws_list_lock);
427 
428       sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
429                                                 kms_handle_equals);
430       if (!sws->kms_handles)
431          goto fail;
432 
433       pipe_reference(NULL, &aws->reference);
434    } else {
435       /* Create a new winsys. */
436       aws = CALLOC_STRUCT(amdgpu_winsys);
437       if (!aws)
438          goto fail;
439 
440       aws->dev = dev;
441 
442       /* The device fd might be different from the one we passed because of
443        * libdrm_amdgpu device dedup logic. This can happen if radv is initialized
444        * first.
445        * Get the correct fd or the buffer sharing will not work (see #3424).
446        */
447       aws->fd = ac_drm_device_get_fd(dev);
448       if (!are_file_descriptions_equal(aws->fd, fd)) {
449          sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
450                                                    kms_handle_equals);
451          if (!sws->kms_handles)
452             goto fail;
453       } else {
454          sws->fd = aws->fd;
455       }
456       aws->info.drm_major = drm_major;
457       aws->info.drm_minor = drm_minor;
458 
459       if (ac_drm_cs_create_syncobj(aws->fd, &aws->vm_timeline_syncobj))
460          goto fail_alloc;
461       simple_mtx_init(&aws->vm_ioctl_lock, mtx_plain);
462 
463       aws->info.is_virtio = is_virtio;
464 
465       /* Only aws and buffer functions are used. */
466       aws->dummy_sws.aws = aws;
467       amdgpu_bo_init_functions(&aws->dummy_sws);
468 
469       if (!do_winsys_init(aws, config, fd))
470          goto fail_alloc;
471 
472       /* Create managers. */
473       pb_cache_init(&aws->bo_cache, RADEON_NUM_HEAPS,
474                     500000, aws->check_vm ? 1.0f : 1.5f, 0,
475                     ((uint64_t)aws->info.vram_size_kb + aws->info.gart_size_kb) * 1024 / 8,
476                     offsetof(struct amdgpu_bo_real_reusable, cache_entry), aws,
477                     /* Cast to void* because one of the function parameters
478                      * is a struct pointer instead of void*. */
479                     (void*)amdgpu_bo_destroy, (void*)amdgpu_bo_can_reclaim);
480 
481       if (!pb_slabs_init(&aws->bo_slabs,
482                          8,  /* min slab entry size: 256 bytes */
483                          20, /* max slab entry size: 1 MB (slab size = 2 MB) */
484                          RADEON_NUM_HEAPS, true,
485                          aws,
486                          amdgpu_bo_can_reclaim_slab,
487                          amdgpu_bo_slab_alloc,
488                          /* Cast to void* because one of the function parameters
489                           * is a struct pointer instead of void*. */
490                          (void*)amdgpu_bo_slab_free)) {
491          amdgpu_winsys_destroy_locked(&sws->base, true);
492          simple_mtx_unlock(&dev_tab_mutex);
493          return NULL;
494       }
495 
496       aws->info.min_alloc_size = 1 << aws->bo_slabs.min_order;
497 
498       /* init reference */
499       pipe_reference_init(&aws->reference, 1);
500 #if MESA_DEBUG
501       list_inithead(&aws->global_bo_list);
502 #endif
503       aws->bo_export_table = util_hash_table_create_ptr_keys();
504 
505       (void) simple_mtx_init(&aws->sws_list_lock, mtx_plain);
506 #if MESA_DEBUG
507       (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
508 #endif
509       (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
510       (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
511 
512       if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
513                            UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL)) {
514          amdgpu_winsys_destroy_locked(&sws->base, true);
515          simple_mtx_unlock(&dev_tab_mutex);
516          return NULL;
517       }
518 
519       _mesa_hash_table_insert(dev_tab, dev, aws);
520 
521       if (aws->reserve_vmid) {
522          r = ac_drm_vm_reserve_vmid(aws->dev, 0);
523          if (r) {
524             amdgpu_winsys_destroy_locked(&sws->base, true);
525             simple_mtx_unlock(&dev_tab_mutex);
526             return NULL;
527          }
528       }
529    }
530 
531    if (sws->fd < 0)
532       sws->fd = os_dupfd_cloexec(fd);
533 
534    sws->aws = aws;
535 
536    /* Set functions. */
537    sws->base.unref = amdgpu_winsys_unref;
538    sws->base.destroy = amdgpu_winsys_destroy;
539    sws->base.get_fd = amdgpu_drm_winsys_get_fd;
540    sws->base.query_info = amdgpu_winsys_query_info;
541    sws->base.cs_request_feature = amdgpu_cs_request_feature;
542    sws->base.query_value = amdgpu_query_value;
543    sws->base.read_registers = amdgpu_read_registers;
544    sws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
545    sws->base.cs_is_secure = amdgpu_cs_is_secure;
546    sws->base.cs_set_pstate = amdgpu_cs_set_pstate;
547 
548    amdgpu_bo_init_functions(sws);
549    amdgpu_cs_init_functions(sws);
550    amdgpu_surface_init_functions(sws);
551 
552    simple_mtx_lock(&aws->sws_list_lock);
553    sws->next = aws->sws_list;
554    aws->sws_list = sws;
555    simple_mtx_unlock(&aws->sws_list_lock);
556 
557    /* Create the screen at the end. The winsys must be initialized
558     * completely.
559     *
560     * Alternatively, we could create the screen based on "ws->gen"
561     * and link all drivers into one binary blob. */
562    sws->base.screen = screen_create(&sws->base, config);
563    if (!sws->base.screen) {
564       amdgpu_winsys_destroy_locked(&sws->base, true);
565       simple_mtx_unlock(&dev_tab_mutex);
566       return NULL;
567    }
568 
569 unlock:
570    /* We must unlock the mutex once the winsys is fully initialized, so that
571     * other threads attempting to create the winsys from the same fd will
572     * get a fully initialized winsys and not just half-way initialized. */
573    simple_mtx_unlock(&dev_tab_mutex);
574 
575    return &sws->base;
576 
577 fail_alloc:
578    FREE(aws);
579 fail:
580    if (sws->kms_handles)
581       _mesa_hash_table_destroy(sws->kms_handles, NULL);
582    FREE(sws);
583    simple_mtx_unlock(&dev_tab_mutex);
584    return NULL;
585 }
586