1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 /**
25 * \file amdgpu_device.c
26 *
27 * Implementation of functions for AMD GPU device
28 *
29 */
30
31 #include <sys/stat.h>
32 #include <errno.h>
33 #include <string.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37
38 #include "xf86drm.h"
39 #include "amdgpu_drm.h"
40 #include "amdgpu_internal.h"
41 #include "util_hash_table.h"
42 #include "util_math.h"
43
44 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
45 #define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
46
47 static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
48 static struct util_hash_table *fd_tab;
49
handle_hash(void * key)50 static unsigned handle_hash(void *key)
51 {
52 return PTR_TO_UINT(key);
53 }
54
handle_compare(void * key1,void * key2)55 static int handle_compare(void *key1, void *key2)
56 {
57 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
58 }
59
fd_hash(void * key)60 static unsigned fd_hash(void *key)
61 {
62 int fd = PTR_TO_UINT(key);
63 char *name = drmGetPrimaryDeviceNameFromFd(fd);
64 unsigned result = 0;
65 char *c;
66
67 if (name == NULL)
68 return 0;
69
70 for (c = name; *c; ++c)
71 result += *c;
72
73 free(name);
74
75 return result;
76 }
77
fd_compare(void * key1,void * key2)78 static int fd_compare(void *key1, void *key2)
79 {
80 int fd1 = PTR_TO_UINT(key1);
81 int fd2 = PTR_TO_UINT(key2);
82 char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
83 char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
84 int result;
85
86 if (name1 == NULL || name2 == NULL) {
87 free(name1);
88 free(name2);
89 return 0;
90 }
91
92 result = strcmp(name1, name2);
93 free(name1);
94 free(name2);
95
96 return result;
97 }
98
99 /**
100 * Get the authenticated form fd,
101 *
102 * \param fd - \c [in] File descriptor for AMD GPU device
103 * \param auth - \c [out] Pointer to output the fd is authenticated or not
104 * A render node fd, output auth = 0
105 * A legacy fd, get the authenticated for compatibility root
106 *
107 * \return 0 on success\n
108 * >0 - AMD specific error code\n
109 * <0 - Negative POSIX Error code
110 */
amdgpu_get_auth(int fd,int * auth)111 static int amdgpu_get_auth(int fd, int *auth)
112 {
113 int r = 0;
114 drm_client_t client = {};
115
116 if (drmGetNodeTypeFromFd(fd) == DRM_NODE_RENDER)
117 *auth = 0;
118 else {
119 client.idx = 0;
120 r = drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
121 if (!r)
122 *auth = client.auth;
123 }
124 return r;
125 }
126
amdgpu_device_free_internal(amdgpu_device_handle dev)127 static void amdgpu_device_free_internal(amdgpu_device_handle dev)
128 {
129 amdgpu_vamgr_deinit(&dev->vamgr_32);
130 amdgpu_vamgr_deinit(&dev->vamgr);
131 util_hash_table_destroy(dev->bo_flink_names);
132 util_hash_table_destroy(dev->bo_handles);
133 pthread_mutex_destroy(&dev->bo_table_mutex);
134 util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
135 close(dev->fd);
136 if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
137 close(dev->flink_fd);
138 free(dev->marketing_name);
139 free(dev);
140 }
141
142 /**
143 * Assignment between two amdgpu_device pointers with reference counting.
144 *
145 * Usage:
146 * struct amdgpu_device *dst = ... , *src = ...;
147 *
148 * dst = src;
149 * // No reference counting. Only use this when you need to move
150 * // a reference from one pointer to another.
151 *
152 * amdgpu_device_reference(&dst, src);
153 * // Reference counters are updated. dst is decremented and src is
154 * // incremented. dst is freed if its reference counter is 0.
155 */
amdgpu_device_reference(struct amdgpu_device ** dst,struct amdgpu_device * src)156 static void amdgpu_device_reference(struct amdgpu_device **dst,
157 struct amdgpu_device *src)
158 {
159 if (update_references(&(*dst)->refcount, &src->refcount))
160 amdgpu_device_free_internal(*dst);
161 *dst = src;
162 }
163
amdgpu_device_initialize(int fd,uint32_t * major_version,uint32_t * minor_version,amdgpu_device_handle * device_handle)164 int amdgpu_device_initialize(int fd,
165 uint32_t *major_version,
166 uint32_t *minor_version,
167 amdgpu_device_handle *device_handle)
168 {
169 struct amdgpu_device *dev;
170 drmVersionPtr version;
171 int r;
172 int flag_auth = 0;
173 int flag_authexist=0;
174 uint32_t accel_working = 0;
175 uint64_t start, max;
176
177 *device_handle = NULL;
178
179 pthread_mutex_lock(&fd_mutex);
180 if (!fd_tab)
181 fd_tab = util_hash_table_create(fd_hash, fd_compare);
182 r = amdgpu_get_auth(fd, &flag_auth);
183 if (r) {
184 fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
185 __func__, r);
186 pthread_mutex_unlock(&fd_mutex);
187 return r;
188 }
189 dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
190 if (dev) {
191 r = amdgpu_get_auth(dev->fd, &flag_authexist);
192 if (r) {
193 fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
194 __func__, r);
195 pthread_mutex_unlock(&fd_mutex);
196 return r;
197 }
198 if ((flag_auth) && (!flag_authexist)) {
199 dev->flink_fd = dup(fd);
200 }
201 *major_version = dev->major_version;
202 *minor_version = dev->minor_version;
203 amdgpu_device_reference(device_handle, dev);
204 pthread_mutex_unlock(&fd_mutex);
205 return 0;
206 }
207
208 dev = calloc(1, sizeof(struct amdgpu_device));
209 if (!dev) {
210 fprintf(stderr, "%s: calloc failed\n", __func__);
211 pthread_mutex_unlock(&fd_mutex);
212 return -ENOMEM;
213 }
214
215 dev->fd = -1;
216 dev->flink_fd = -1;
217
218 atomic_set(&dev->refcount, 1);
219
220 version = drmGetVersion(fd);
221 if (version->version_major != 3) {
222 fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
223 "only compatible with 3.x.x.\n",
224 __func__,
225 version->version_major,
226 version->version_minor,
227 version->version_patchlevel);
228 drmFreeVersion(version);
229 r = -EBADF;
230 goto cleanup;
231 }
232
233 dev->fd = dup(fd);
234 dev->flink_fd = dev->fd;
235 dev->major_version = version->version_major;
236 dev->minor_version = version->version_minor;
237 drmFreeVersion(version);
238
239 dev->bo_flink_names = util_hash_table_create(handle_hash,
240 handle_compare);
241 dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
242 pthread_mutex_init(&dev->bo_table_mutex, NULL);
243
244 /* Check if acceleration is working. */
245 r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
246 if (r) {
247 fprintf(stderr, "%s: amdgpu_query_info(ACCEL_WORKING) failed (%i)\n",
248 __func__, r);
249 goto cleanup;
250 }
251 if (!accel_working) {
252 fprintf(stderr, "%s: AMDGPU_INFO_ACCEL_WORKING = 0\n", __func__);
253 r = -EBADF;
254 goto cleanup;
255 }
256
257 r = amdgpu_query_gpu_info_init(dev);
258 if (r) {
259 fprintf(stderr, "%s: amdgpu_query_gpu_info_init failed\n", __func__);
260 goto cleanup;
261 }
262
263 start = dev->dev_info.virtual_address_offset;
264 max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
265 amdgpu_vamgr_init(&dev->vamgr_32, start, max,
266 dev->dev_info.virtual_address_alignment);
267
268 start = max;
269 max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
270 amdgpu_vamgr_init(&dev->vamgr, start, max,
271 dev->dev_info.virtual_address_alignment);
272
273 start = dev->dev_info.high_va_offset;
274 max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
275 0x100000000ULL);
276 amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
277 dev->dev_info.virtual_address_alignment);
278
279 start = max;
280 max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
281 0x100000000ULL);
282 amdgpu_vamgr_init(&dev->vamgr_high, start, max,
283 dev->dev_info.virtual_address_alignment);
284
285 amdgpu_parse_asic_ids(dev);
286
287 *major_version = dev->major_version;
288 *minor_version = dev->minor_version;
289 *device_handle = dev;
290 util_hash_table_set(fd_tab, UINT_TO_PTR(dev->fd), dev);
291 pthread_mutex_unlock(&fd_mutex);
292
293 return 0;
294
295 cleanup:
296 if (dev->fd >= 0)
297 close(dev->fd);
298 free(dev);
299 pthread_mutex_unlock(&fd_mutex);
300 return r;
301 }
302
amdgpu_device_deinitialize(amdgpu_device_handle dev)303 int amdgpu_device_deinitialize(amdgpu_device_handle dev)
304 {
305 amdgpu_device_reference(&dev, NULL);
306 return 0;
307 }
308
amdgpu_get_marketing_name(amdgpu_device_handle dev)309 const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
310 {
311 return dev->marketing_name;
312 }
313
amdgpu_query_sw_info(amdgpu_device_handle dev,enum amdgpu_sw_info info,void * value)314 int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
315 void *value)
316 {
317 uint32_t *val32 = (uint32_t*)value;
318
319 switch (info) {
320 case amdgpu_sw_info_address32_hi:
321 if (dev->vamgr_high_32.va_max)
322 *val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
323 else
324 *val32 = (dev->vamgr_32.va_max - 1) >> 32;
325 return 0;
326 }
327 return -EINVAL;
328 }
329