1 /*
2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
27 * of the Software.
28 */
29
30 #include "amdgpu_cs.h"
31 #include "amdgpu_public.h"
32
33 #include "util/u_hash_table.h"
34 #include <amdgpu_drm.h>
35 #include <xf86drm.h>
36 #include <stdio.h>
37 #include <sys/stat.h>
38 #include "amd/common/sid.h"
39 #include "amd/common/gfx9d.h"
40
41 #ifndef AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
42 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
43 #endif
44
45 static struct util_hash_table *dev_tab = NULL;
46 static simple_mtx_t dev_tab_mutex = _SIMPLE_MTX_INITIALIZER_NP;
47
48 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
49
50 /* Helper function to do the ioctls needed for setup and init. */
do_winsys_init(struct amdgpu_winsys * ws,int fd)51 static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
52 {
53 if (!ac_query_gpu_info(fd, ws->dev, &ws->info, &ws->amdinfo))
54 goto fail;
55
56 /* LLVM 5.0 is required for GFX9. */
57 if (ws->info.chip_class >= GFX9 && HAVE_LLVM < 0x0500) {
58 fprintf(stderr, "amdgpu: LLVM 5.0 is required, got LLVM %i.%i\n",
59 HAVE_LLVM >> 8, HAVE_LLVM & 255);
60 goto fail;
61 }
62
63 ws->addrlib = amdgpu_addr_create(&ws->info, &ws->amdinfo, &ws->info.max_alignment);
64 if (!ws->addrlib) {
65 fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
66 goto fail;
67 }
68
69 ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
70 ws->debug_all_bos = debug_get_option_all_bos();
71 ws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL;
72
73 return true;
74
75 fail:
76 amdgpu_device_deinitialize(ws->dev);
77 ws->dev = NULL;
78 return false;
79 }
80
do_winsys_deinit(struct amdgpu_winsys * ws)81 static void do_winsys_deinit(struct amdgpu_winsys *ws)
82 {
83 AddrDestroy(ws->addrlib);
84 amdgpu_device_deinitialize(ws->dev);
85 }
86
amdgpu_winsys_destroy(struct radeon_winsys * rws)87 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
88 {
89 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
90
91 if (ws->reserve_vmid)
92 amdgpu_vm_unreserve_vmid(ws->dev, 0);
93
94 if (util_queue_is_initialized(&ws->cs_queue))
95 util_queue_destroy(&ws->cs_queue);
96
97 simple_mtx_destroy(&ws->bo_fence_lock);
98 pb_slabs_deinit(&ws->bo_slabs);
99 pb_cache_deinit(&ws->bo_cache);
100 simple_mtx_destroy(&ws->global_bo_list_lock);
101 do_winsys_deinit(ws);
102 FREE(rws);
103 }
104
amdgpu_winsys_query_info(struct radeon_winsys * rws,struct radeon_info * info)105 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
106 struct radeon_info *info)
107 {
108 *info = ((struct amdgpu_winsys *)rws)->info;
109 }
110
amdgpu_cs_request_feature(struct radeon_winsys_cs * rcs,enum radeon_feature_id fid,bool enable)111 static bool amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
112 enum radeon_feature_id fid,
113 bool enable)
114 {
115 return false;
116 }
117
amdgpu_query_value(struct radeon_winsys * rws,enum radeon_value_id value)118 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
119 enum radeon_value_id value)
120 {
121 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
122 struct amdgpu_heap_info heap;
123 uint64_t retval = 0;
124
125 switch (value) {
126 case RADEON_REQUESTED_VRAM_MEMORY:
127 return ws->allocated_vram;
128 case RADEON_REQUESTED_GTT_MEMORY:
129 return ws->allocated_gtt;
130 case RADEON_MAPPED_VRAM:
131 return ws->mapped_vram;
132 case RADEON_MAPPED_GTT:
133 return ws->mapped_gtt;
134 case RADEON_BUFFER_WAIT_TIME_NS:
135 return ws->buffer_wait_time;
136 case RADEON_NUM_MAPPED_BUFFERS:
137 return ws->num_mapped_buffers;
138 case RADEON_TIMESTAMP:
139 amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
140 return retval;
141 case RADEON_NUM_GFX_IBS:
142 return ws->num_gfx_IBs;
143 case RADEON_NUM_SDMA_IBS:
144 return ws->num_sdma_IBs;
145 case RADEON_GFX_BO_LIST_COUNTER:
146 return ws->gfx_bo_list_counter;
147 case RADEON_GFX_IB_SIZE_COUNTER:
148 return ws->gfx_ib_size_counter;
149 case RADEON_NUM_BYTES_MOVED:
150 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
151 return retval;
152 case RADEON_NUM_EVICTIONS:
153 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
154 return retval;
155 case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
156 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
157 return retval;
158 case RADEON_VRAM_USAGE:
159 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
160 return heap.heap_usage;
161 case RADEON_VRAM_VIS_USAGE:
162 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
163 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
164 return heap.heap_usage;
165 case RADEON_GTT_USAGE:
166 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
167 return heap.heap_usage;
168 case RADEON_GPU_TEMPERATURE:
169 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
170 return retval;
171 case RADEON_CURRENT_SCLK:
172 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
173 return retval;
174 case RADEON_CURRENT_MCLK:
175 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
176 return retval;
177 case RADEON_GPU_RESET_COUNTER:
178 assert(0);
179 return 0;
180 case RADEON_CS_THREAD_TIME:
181 return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
182 }
183 return 0;
184 }
185
amdgpu_read_registers(struct radeon_winsys * rws,unsigned reg_offset,unsigned num_registers,uint32_t * out)186 static bool amdgpu_read_registers(struct radeon_winsys *rws,
187 unsigned reg_offset,
188 unsigned num_registers, uint32_t *out)
189 {
190 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
191
192 return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
193 0xffffffff, 0, out) == 0;
194 }
195
hash_dev(void * key)196 static unsigned hash_dev(void *key)
197 {
198 #if defined(PIPE_ARCH_X86_64)
199 return pointer_to_intptr(key) ^ (pointer_to_intptr(key) >> 32);
200 #else
201 return pointer_to_intptr(key);
202 #endif
203 }
204
compare_dev(void * key1,void * key2)205 static int compare_dev(void *key1, void *key2)
206 {
207 return key1 != key2;
208 }
209
amdgpu_winsys_unref(struct radeon_winsys * rws)210 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
211 {
212 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
213 bool destroy;
214
215 /* When the reference counter drops to zero, remove the device pointer
216 * from the table.
217 * This must happen while the mutex is locked, so that
218 * amdgpu_winsys_create in another thread doesn't get the winsys
219 * from the table when the counter drops to 0. */
220 simple_mtx_lock(&dev_tab_mutex);
221
222 destroy = pipe_reference(&ws->reference, NULL);
223 if (destroy && dev_tab)
224 util_hash_table_remove(dev_tab, ws->dev);
225
226 simple_mtx_unlock(&dev_tab_mutex);
227 return destroy;
228 }
229
amdgpu_get_chip_name(struct radeon_winsys * ws)230 static const char* amdgpu_get_chip_name(struct radeon_winsys *ws)
231 {
232 amdgpu_device_handle dev = ((struct amdgpu_winsys *)ws)->dev;
233 return amdgpu_get_marketing_name(dev);
234 }
235
236
237 PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd,const struct pipe_screen_config * config,radeon_screen_create_t screen_create)238 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
239 radeon_screen_create_t screen_create)
240 {
241 struct amdgpu_winsys *ws;
242 drmVersionPtr version = drmGetVersion(fd);
243 amdgpu_device_handle dev;
244 uint32_t drm_major, drm_minor, r;
245
246 /* The DRM driver version of amdgpu is 3.x.x. */
247 if (version->version_major != 3) {
248 drmFreeVersion(version);
249 return NULL;
250 }
251 drmFreeVersion(version);
252
253 /* Look up the winsys from the dev table. */
254 simple_mtx_lock(&dev_tab_mutex);
255 if (!dev_tab)
256 dev_tab = util_hash_table_create(hash_dev, compare_dev);
257
258 /* Initialize the amdgpu device. This should always return the same pointer
259 * for the same fd. */
260 r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
261 if (r) {
262 simple_mtx_unlock(&dev_tab_mutex);
263 fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
264 return NULL;
265 }
266
267 /* Lookup a winsys if we have already created one for this device. */
268 ws = util_hash_table_get(dev_tab, dev);
269 if (ws) {
270 pipe_reference(NULL, &ws->reference);
271 simple_mtx_unlock(&dev_tab_mutex);
272 return &ws->base;
273 }
274
275 /* Create a new winsys. */
276 ws = CALLOC_STRUCT(amdgpu_winsys);
277 if (!ws)
278 goto fail;
279
280 ws->dev = dev;
281 ws->info.drm_major = drm_major;
282 ws->info.drm_minor = drm_minor;
283
284 if (!do_winsys_init(ws, fd))
285 goto fail_alloc;
286
287 /* Create managers. */
288 pb_cache_init(&ws->bo_cache, 500000, ws->check_vm ? 1.0f : 2.0f, 0,
289 (ws->info.vram_size + ws->info.gart_size) / 8,
290 amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
291
292 if (!pb_slabs_init(&ws->bo_slabs,
293 AMDGPU_SLAB_MIN_SIZE_LOG2, AMDGPU_SLAB_MAX_SIZE_LOG2,
294 RADEON_MAX_SLAB_HEAPS,
295 ws,
296 amdgpu_bo_can_reclaim_slab,
297 amdgpu_bo_slab_alloc,
298 amdgpu_bo_slab_free))
299 goto fail_cache;
300
301 ws->info.min_alloc_size = 1 << AMDGPU_SLAB_MIN_SIZE_LOG2;
302
303 /* init reference */
304 pipe_reference_init(&ws->reference, 1);
305
306 /* Set functions. */
307 ws->base.unref = amdgpu_winsys_unref;
308 ws->base.destroy = amdgpu_winsys_destroy;
309 ws->base.query_info = amdgpu_winsys_query_info;
310 ws->base.cs_request_feature = amdgpu_cs_request_feature;
311 ws->base.query_value = amdgpu_query_value;
312 ws->base.read_registers = amdgpu_read_registers;
313 ws->base.get_chip_name = amdgpu_get_chip_name;
314
315 amdgpu_bo_init_functions(ws);
316 amdgpu_cs_init_functions(ws);
317 amdgpu_surface_init_functions(ws);
318
319 LIST_INITHEAD(&ws->global_bo_list);
320 (void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
321 (void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
322
323 if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1,
324 UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
325 amdgpu_winsys_destroy(&ws->base);
326 simple_mtx_unlock(&dev_tab_mutex);
327 return NULL;
328 }
329
330 /* Create the screen at the end. The winsys must be initialized
331 * completely.
332 *
333 * Alternatively, we could create the screen based on "ws->gen"
334 * and link all drivers into one binary blob. */
335 ws->base.screen = screen_create(&ws->base, config);
336 if (!ws->base.screen) {
337 amdgpu_winsys_destroy(&ws->base);
338 simple_mtx_unlock(&dev_tab_mutex);
339 return NULL;
340 }
341
342 util_hash_table_set(dev_tab, dev, ws);
343
344 if (ws->reserve_vmid) {
345 r = amdgpu_vm_reserve_vmid(dev, 0);
346 if (r) {
347 fprintf(stderr, "amdgpu: amdgpu_vm_reserve_vmid failed. (%i)\n", r);
348 goto fail_cache;
349 }
350 }
351
352 /* We must unlock the mutex once the winsys is fully initialized, so that
353 * other threads attempting to create the winsys from the same fd will
354 * get a fully initialized winsys and not just half-way initialized. */
355 simple_mtx_unlock(&dev_tab_mutex);
356
357 return &ws->base;
358
359 fail_cache:
360 pb_cache_deinit(&ws->bo_cache);
361 do_winsys_deinit(ws);
362 fail_alloc:
363 FREE(ws);
364 fail:
365 simple_mtx_unlock(&dev_tab_mutex);
366 return NULL;
367 }
368