1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include "amdgpu.h"
36 #include "atom.h"
37
38 /*
39 * Rings
40 * Most engines on the GPU are fed via ring buffers. Ring
41 * buffers are areas of GPU accessible memory that the host
42 * writes commands into and the GPU reads commands out of.
43 * There is a rptr (read pointer) that determines where the
44 * GPU is currently reading, and a wptr (write pointer)
45 * which determines where the host has written. When the
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
48 * wptr. The GPU then starts fetching commands and executes
49 * them until the pointers are equal again.
50 */
51
52 /**
53 * amdgpu_ring_alloc - allocate space on the ring buffer
54 *
55 * @ring: amdgpu_ring structure holding ring information
56 * @ndw: number of dwords to allocate in the ring buffer
57 *
58 * Allocate @ndw dwords in the ring buffer (all asics).
59 * Returns 0 on success, error on failure.
60 */
amdgpu_ring_alloc(struct amdgpu_ring * ring,unsigned ndw)61 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
62 {
63 /* Align requested size with padding so unlock_commit can
64 * pad safely */
65 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
66
67 /* Make sure we aren't trying to allocate more space
68 * than the maximum for one submission
69 */
70 if (WARN_ON_ONCE(ndw > ring->max_dw))
71 return -ENOMEM;
72
73 ring->count_dw = ndw;
74 ring->wptr_old = ring->wptr;
75
76 if (ring->funcs->begin_use)
77 ring->funcs->begin_use(ring);
78
79 return 0;
80 }
81
82 /** amdgpu_ring_insert_nop - insert NOP packets
83 *
84 * @ring: amdgpu_ring structure holding ring information
85 * @count: the number of NOP packets to insert
86 *
87 * This is the generic insert_nop function for rings except SDMA
88 */
amdgpu_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)89 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
90 {
91 int i;
92
93 for (i = 0; i < count; i++)
94 amdgpu_ring_write(ring, ring->funcs->nop);
95 }
96
97 /**
98 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
99 *
100 * @ring: amdgpu_ring structure holding ring information
101 * @ib: IB to add NOP packets to
102 *
103 * This is the generic pad_ib function for rings except SDMA
104 */
amdgpu_ring_generic_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)105 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
106 {
107 while (ib->length_dw & ring->funcs->align_mask)
108 ib->ptr[ib->length_dw++] = ring->funcs->nop;
109 }
110
111 /**
112 * amdgpu_ring_commit - tell the GPU to execute the new
113 * commands on the ring buffer
114 *
115 * @ring: amdgpu_ring structure holding ring information
116 *
117 * Update the wptr (write pointer) to tell the GPU to
118 * execute new commands on the ring buffer (all asics).
119 */
amdgpu_ring_commit(struct amdgpu_ring * ring)120 void amdgpu_ring_commit(struct amdgpu_ring *ring)
121 {
122 uint32_t count;
123
124 /* We pad to match fetch size */
125 count = ring->funcs->align_mask + 1 -
126 (ring->wptr & ring->funcs->align_mask);
127 count %= ring->funcs->align_mask + 1;
128 ring->funcs->insert_nop(ring, count);
129
130 mb();
131 amdgpu_ring_set_wptr(ring);
132
133 if (ring->funcs->end_use)
134 ring->funcs->end_use(ring);
135 }
136
137 /**
138 * amdgpu_ring_undo - reset the wptr
139 *
140 * @ring: amdgpu_ring structure holding ring information
141 *
142 * Reset the driver's copy of the wptr (all asics).
143 */
amdgpu_ring_undo(struct amdgpu_ring * ring)144 void amdgpu_ring_undo(struct amdgpu_ring *ring)
145 {
146 ring->wptr = ring->wptr_old;
147
148 if (ring->funcs->end_use)
149 ring->funcs->end_use(ring);
150 }
151
152 /**
153 * amdgpu_ring_init - init driver ring struct.
154 *
155 * @adev: amdgpu_device pointer
156 * @ring: amdgpu_ring structure holding ring information
157 * @max_dw: maximum number of dw for ring alloc
158 * @irq_src: interrupt source to use for this ring
159 * @irq_type: interrupt type to use for this ring
160 * @hw_prio: ring priority (NORMAL/HIGH)
161 * @sched_score: optional score atomic shared with other schedulers
162 *
163 * Initialize the driver information for the selected ring (all asics).
164 * Returns 0 on success, error on failure.
165 */
amdgpu_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int max_dw,struct amdgpu_irq_src * irq_src,unsigned int irq_type,unsigned int hw_prio,atomic_t * sched_score)166 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
167 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
168 unsigned int irq_type, unsigned int hw_prio,
169 atomic_t *sched_score)
170 {
171 int r;
172 int sched_hw_submission = amdgpu_sched_hw_submission;
173 u32 *num_sched;
174 u32 hw_ip;
175
176 /* Set the hw submission limit higher for KIQ because
177 * it's used for a number of gfx/compute tasks by both
178 * KFD and KGD which may have outstanding fences and
179 * it doesn't really use the gpu scheduler anyway;
180 * KIQ tasks get submitted directly to the ring.
181 */
182 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
183 sched_hw_submission = max(sched_hw_submission, 256);
184 else if (ring == &adev->sdma.instance[0].page)
185 sched_hw_submission = 256;
186
187 if (ring->adev == NULL) {
188 if (adev->num_rings >= AMDGPU_MAX_RINGS)
189 return -EINVAL;
190
191 ring->adev = adev;
192 ring->idx = adev->num_rings++;
193 adev->rings[ring->idx] = ring;
194 r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
195 sched_score);
196 if (r)
197 return r;
198 }
199
200 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
201 if (r) {
202 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
203 return r;
204 }
205
206 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
207 if (r) {
208 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
209 return r;
210 }
211
212 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
213 if (r) {
214 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
215 return r;
216 }
217
218 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
219 if (r) {
220 dev_err(adev->dev,
221 "(%d) ring trail_fence_offs wb alloc failed\n", r);
222 return r;
223 }
224 ring->trail_fence_gpu_addr =
225 adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
226 ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
227
228 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
229 if (r) {
230 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
231 return r;
232 }
233 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
234 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
235 /* always set cond_exec_polling to CONTINUE */
236 *ring->cond_exe_cpu_addr = 1;
237
238 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
239 if (r) {
240 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
241 return r;
242 }
243
244 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
245
246 ring->buf_mask = (ring->ring_size / 4) - 1;
247 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
248 0xffffffffffffffff : ring->buf_mask;
249 /* Allocate ring buffer */
250 if (ring->ring_obj == NULL) {
251 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
252 AMDGPU_GEM_DOMAIN_GTT,
253 &ring->ring_obj,
254 &ring->gpu_addr,
255 (void **)&ring->ring);
256 if (r) {
257 dev_err(adev->dev, "(%d) ring create failed\n", r);
258 return r;
259 }
260 amdgpu_ring_clear_ring(ring);
261 }
262
263 ring->max_dw = max_dw;
264 ring->hw_prio = hw_prio;
265
266 if (!ring->no_scheduler) {
267 hw_ip = ring->funcs->type;
268 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
269 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
270 &ring->sched;
271 }
272
273 return 0;
274 }
275
276 /**
277 * amdgpu_ring_fini - tear down the driver ring struct.
278 *
279 * @ring: amdgpu_ring structure holding ring information
280 *
281 * Tear down the driver information for the selected ring (all asics).
282 */
amdgpu_ring_fini(struct amdgpu_ring * ring)283 void amdgpu_ring_fini(struct amdgpu_ring *ring)
284 {
285
286 /* Not to finish a ring which is not initialized */
287 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
288 return;
289
290 ring->sched.ready = false;
291
292 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
293 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
294
295 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
296 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
297
298 amdgpu_bo_free_kernel(&ring->ring_obj,
299 &ring->gpu_addr,
300 (void **)&ring->ring);
301
302 dma_fence_put(ring->vmid_wait);
303 ring->vmid_wait = NULL;
304 ring->me = 0;
305
306 ring->adev->rings[ring->idx] = NULL;
307 }
308
309 /**
310 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
311 *
312 * @ring: ring to write to
313 * @reg0: register to write
314 * @reg1: register to wait on
315 * @ref: reference value to write/wait on
316 * @mask: mask to wait on
317 *
318 * Helper for rings that don't support write and wait in a
319 * single oneshot packet.
320 */
amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)321 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
322 uint32_t reg0, uint32_t reg1,
323 uint32_t ref, uint32_t mask)
324 {
325 amdgpu_ring_emit_wreg(ring, reg0, ref);
326 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
327 }
328
329 /**
330 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
331 *
332 * @ring: ring to try the recovery on
333 * @vmid: VMID we try to get going again
334 * @fence: timedout fence
335 *
336 * Tries to get a ring proceeding again when it is stuck.
337 */
amdgpu_ring_soft_recovery(struct amdgpu_ring * ring,unsigned int vmid,struct dma_fence * fence)338 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
339 struct dma_fence *fence)
340 {
341 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
342
343 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
344 return false;
345
346 atomic_inc(&ring->adev->gpu_reset_counter);
347 while (!dma_fence_is_signaled(fence) &&
348 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
349 ring->funcs->soft_recovery(ring, vmid);
350
351 return dma_fence_is_signaled(fence);
352 }
353
354 /*
355 * Debugfs info
356 */
357 #if defined(CONFIG_DEBUG_FS)
358
359 /* Layout of file is 12 bytes consisting of
360 * - rptr
361 * - wptr
362 * - driver's copy of wptr
363 *
364 * followed by n-words of ring data
365 */
amdgpu_debugfs_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)366 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
367 size_t size, loff_t *pos)
368 {
369 struct amdgpu_ring *ring = file_inode(f)->i_private;
370 int r, i;
371 uint32_t value, result, early[3];
372
373 if (*pos & 3 || size & 3)
374 return -EINVAL;
375
376 result = 0;
377
378 if (*pos < 12) {
379 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
380 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
381 early[2] = ring->wptr & ring->buf_mask;
382 for (i = *pos / 4; i < 3 && size; i++) {
383 r = put_user(early[i], (uint32_t *)buf);
384 if (r)
385 return r;
386 buf += 4;
387 result += 4;
388 size -= 4;
389 *pos += 4;
390 }
391 }
392
393 while (size) {
394 if (*pos >= (ring->ring_size + 12))
395 return result;
396
397 value = ring->ring[(*pos - 12)/4];
398 r = put_user(value, (uint32_t *)buf);
399 if (r)
400 return r;
401 buf += 4;
402 result += 4;
403 size -= 4;
404 *pos += 4;
405 }
406
407 return result;
408 }
409
410 static const struct file_operations amdgpu_debugfs_ring_fops = {
411 .owner = THIS_MODULE,
412 .read = amdgpu_debugfs_ring_read,
413 .llseek = default_llseek
414 };
415
416 #endif
417
amdgpu_debugfs_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring)418 int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
419 struct amdgpu_ring *ring)
420 {
421 #if defined(CONFIG_DEBUG_FS)
422 struct drm_minor *minor = adev_to_drm(adev)->primary;
423 struct dentry *ent, *root = minor->debugfs_root;
424 char name[32];
425
426 sprintf(name, "amdgpu_ring_%s", ring->name);
427
428 ent = debugfs_create_file(name,
429 S_IFREG | S_IRUGO, root,
430 ring, &amdgpu_debugfs_ring_fops);
431 if (IS_ERR(ent))
432 return PTR_ERR(ent);
433
434 i_size_write(ent->d_inode, ring->ring_size + 12);
435 ring->ent = ent;
436 #endif
437 return 0;
438 }
439
440 /**
441 * amdgpu_ring_test_helper - tests ring and set sched readiness status
442 *
443 * @ring: ring to try the recovery on
444 *
445 * Tests ring and set sched readiness status
446 *
447 * Returns 0 on success, error on failure.
448 */
amdgpu_ring_test_helper(struct amdgpu_ring * ring)449 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
450 {
451 struct amdgpu_device *adev = ring->adev;
452 int r;
453
454 r = amdgpu_ring_test_ring(ring);
455 if (r)
456 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
457 ring->name, r);
458 else
459 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
460 ring->name);
461
462 ring->sched.ready = !r;
463 return r;
464 }
465