1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 /**
25 * \file amdgpu_device.c
26 *
27 * Implementation of functions for AMD GPU device
28 *
29 */
30
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <sys/stat.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <unistd.h>
41
42 #include "xf86drm.h"
43 #include "amdgpu_drm.h"
44 #include "amdgpu_internal.h"
45 #include "util_hash_table.h"
46 #include "util_math.h"
47 #include "amdgpu_asic_id.h"
48
49 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
50 #define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
51
52 static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
53 static struct util_hash_table *fd_tab;
54
handle_hash(void * key)55 static unsigned handle_hash(void *key)
56 {
57 return PTR_TO_UINT(key);
58 }
59
handle_compare(void * key1,void * key2)60 static int handle_compare(void *key1, void *key2)
61 {
62 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
63 }
64
fd_hash(void * key)65 static unsigned fd_hash(void *key)
66 {
67 int fd = PTR_TO_UINT(key);
68 char *name = drmGetPrimaryDeviceNameFromFd(fd);
69 unsigned result = 0;
70 char *c;
71
72 if (name == NULL)
73 return 0;
74
75 for (c = name; *c; ++c)
76 result += *c;
77
78 free(name);
79
80 return result;
81 }
82
fd_compare(void * key1,void * key2)83 static int fd_compare(void *key1, void *key2)
84 {
85 int fd1 = PTR_TO_UINT(key1);
86 int fd2 = PTR_TO_UINT(key2);
87 char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
88 char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
89 int result;
90
91 if (name1 == NULL || name2 == NULL) {
92 free(name1);
93 free(name2);
94 return 0;
95 }
96
97 result = strcmp(name1, name2);
98 free(name1);
99 free(name2);
100
101 return result;
102 }
103
104 /**
105 * Get the authenticated form fd,
106 *
107 * \param fd - \c [in] File descriptor for AMD GPU device
108 * \param auth - \c [out] Pointer to output the fd is authenticated or not
109 * A render node fd, output auth = 0
110 * A legacy fd, get the authenticated for compatibility root
111 *
112 * \return 0 on success\n
113 * >0 - AMD specific error code\n
114 * <0 - Negative POSIX Error code
115 */
amdgpu_get_auth(int fd,int * auth)116 static int amdgpu_get_auth(int fd, int *auth)
117 {
118 int r = 0;
119 drm_client_t client = {};
120
121 if (drmGetNodeTypeFromFd(fd) == DRM_NODE_RENDER)
122 *auth = 0;
123 else {
124 client.idx = 0;
125 r = drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
126 if (!r)
127 *auth = client.auth;
128 }
129 return r;
130 }
131
amdgpu_device_free_internal(amdgpu_device_handle dev)132 static void amdgpu_device_free_internal(amdgpu_device_handle dev)
133 {
134 amdgpu_vamgr_deinit(dev->vamgr);
135 free(dev->vamgr);
136 amdgpu_vamgr_deinit(dev->vamgr_32);
137 free(dev->vamgr_32);
138 util_hash_table_destroy(dev->bo_flink_names);
139 util_hash_table_destroy(dev->bo_handles);
140 pthread_mutex_destroy(&dev->bo_table_mutex);
141 util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
142 close(dev->fd);
143 if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
144 close(dev->flink_fd);
145 free(dev);
146 }
147
148 /**
149 * Assignment between two amdgpu_device pointers with reference counting.
150 *
151 * Usage:
152 * struct amdgpu_device *dst = ... , *src = ...;
153 *
154 * dst = src;
155 * // No reference counting. Only use this when you need to move
156 * // a reference from one pointer to another.
157 *
158 * amdgpu_device_reference(&dst, src);
159 * // Reference counters are updated. dst is decremented and src is
160 * // incremented. dst is freed if its reference counter is 0.
161 */
amdgpu_device_reference(struct amdgpu_device ** dst,struct amdgpu_device * src)162 static void amdgpu_device_reference(struct amdgpu_device **dst,
163 struct amdgpu_device *src)
164 {
165 if (update_references(&(*dst)->refcount, &src->refcount))
166 amdgpu_device_free_internal(*dst);
167 *dst = src;
168 }
169
amdgpu_device_initialize(int fd,uint32_t * major_version,uint32_t * minor_version,amdgpu_device_handle * device_handle)170 int amdgpu_device_initialize(int fd,
171 uint32_t *major_version,
172 uint32_t *minor_version,
173 amdgpu_device_handle *device_handle)
174 {
175 struct amdgpu_device *dev;
176 drmVersionPtr version;
177 int r;
178 int flag_auth = 0;
179 int flag_authexist=0;
180 uint32_t accel_working = 0;
181 uint64_t start, max;
182
183 *device_handle = NULL;
184
185 pthread_mutex_lock(&fd_mutex);
186 if (!fd_tab)
187 fd_tab = util_hash_table_create(fd_hash, fd_compare);
188 r = amdgpu_get_auth(fd, &flag_auth);
189 if (r) {
190 pthread_mutex_unlock(&fd_mutex);
191 return r;
192 }
193 dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
194 if (dev) {
195 r = amdgpu_get_auth(dev->fd, &flag_authexist);
196 if (r) {
197 pthread_mutex_unlock(&fd_mutex);
198 return r;
199 }
200 if ((flag_auth) && (!flag_authexist)) {
201 dev->flink_fd = dup(fd);
202 }
203 *major_version = dev->major_version;
204 *minor_version = dev->minor_version;
205 amdgpu_device_reference(device_handle, dev);
206 pthread_mutex_unlock(&fd_mutex);
207 return 0;
208 }
209
210 dev = calloc(1, sizeof(struct amdgpu_device));
211 if (!dev) {
212 pthread_mutex_unlock(&fd_mutex);
213 return -ENOMEM;
214 }
215
216 dev->fd = -1;
217 dev->flink_fd = -1;
218
219 atomic_set(&dev->refcount, 1);
220
221 version = drmGetVersion(fd);
222 if (version->version_major != 3) {
223 fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
224 "only compatible with 3.x.x.\n",
225 __func__,
226 version->version_major,
227 version->version_minor,
228 version->version_patchlevel);
229 drmFreeVersion(version);
230 r = -EBADF;
231 goto cleanup;
232 }
233
234 dev->fd = dup(fd);
235 dev->flink_fd = dev->fd;
236 dev->major_version = version->version_major;
237 dev->minor_version = version->version_minor;
238 drmFreeVersion(version);
239
240 dev->bo_flink_names = util_hash_table_create(handle_hash,
241 handle_compare);
242 dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
243 pthread_mutex_init(&dev->bo_table_mutex, NULL);
244
245 /* Check if acceleration is working. */
246 r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
247 if (r)
248 goto cleanup;
249 if (!accel_working) {
250 r = -EBADF;
251 goto cleanup;
252 }
253
254 r = amdgpu_query_gpu_info_init(dev);
255 if (r)
256 goto cleanup;
257
258 dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
259 if (dev->vamgr == NULL)
260 goto cleanup;
261
262 amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset,
263 dev->dev_info.virtual_address_max,
264 dev->dev_info.virtual_address_alignment);
265
266 max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
267 start = amdgpu_vamgr_find_va(dev->vamgr,
268 max - dev->dev_info.virtual_address_offset,
269 dev->dev_info.virtual_address_alignment, 0);
270 if (start > 0xffffffff)
271 goto free_va; /* shouldn't get here */
272
273 dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
274 if (dev->vamgr_32 == NULL)
275 goto free_va;
276 amdgpu_vamgr_init(dev->vamgr_32, start, max,
277 dev->dev_info.virtual_address_alignment);
278
279 *major_version = dev->major_version;
280 *minor_version = dev->minor_version;
281 *device_handle = dev;
282 util_hash_table_set(fd_tab, UINT_TO_PTR(dev->fd), dev);
283 pthread_mutex_unlock(&fd_mutex);
284
285 return 0;
286
287 free_va:
288 r = -ENOMEM;
289 amdgpu_vamgr_free_va(dev->vamgr, start,
290 max - dev->dev_info.virtual_address_offset);
291 amdgpu_vamgr_deinit(dev->vamgr);
292 free(dev->vamgr);
293
294 cleanup:
295 if (dev->fd >= 0)
296 close(dev->fd);
297 free(dev);
298 pthread_mutex_unlock(&fd_mutex);
299 return r;
300 }
301
amdgpu_device_deinitialize(amdgpu_device_handle dev)302 int amdgpu_device_deinitialize(amdgpu_device_handle dev)
303 {
304 amdgpu_device_reference(&dev, NULL);
305 return 0;
306 }
307
amdgpu_get_marketing_name(amdgpu_device_handle dev)308 const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
309 {
310 const struct amdgpu_asic_id_table_t *t = amdgpu_asic_id_table;
311
312 while (t->did) {
313 if ((t->did == dev->info.asic_id) &&
314 (t->rid == dev->info.pci_rev_id))
315 return t->marketing_name;
316 t++;
317 }
318
319 return NULL;
320 }
321