Lines Matching refs:rdev
62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) in radeon_fence_write() argument
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_write()
65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { in radeon_fence_write()
83 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) in radeon_fence_read() argument
85 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_read()
88 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { in radeon_fence_read()
108 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) in radeon_fence_schedule_check() argument
115 &rdev->fence_drv[ring].lockup_work, in radeon_fence_schedule_check()
129 int radeon_fence_emit(struct radeon_device *rdev, in radeon_fence_emit() argument
140 (*fence)->rdev = rdev; in radeon_fence_emit()
141 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_emit()
145 &rdev->fence_queue.lock, in radeon_fence_emit()
146 rdev->fence_context + ring, in radeon_fence_emit()
148 radeon_fence_ring_emit(rdev, ring, *fence); in radeon_fence_emit()
149 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); in radeon_fence_emit()
150 radeon_fence_schedule_check(rdev, ring); in radeon_fence_emit()
172 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); in radeon_fence_check_signaled()
181 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); in radeon_fence_check_signaled()
182 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); in radeon_fence_check_signaled()
199 static bool radeon_fence_activity(struct radeon_device *rdev, int ring) in radeon_fence_activity() argument
226 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); in radeon_fence_activity()
228 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_activity()
229 seq = radeon_fence_read(rdev, ring); in radeon_fence_activity()
253 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); in radeon_fence_activity()
256 radeon_fence_schedule_check(rdev, ring); in radeon_fence_activity()
272 struct radeon_device *rdev; in radeon_fence_check_lockup() local
277 rdev = fence_drv->rdev; in radeon_fence_check_lockup()
278 ring = fence_drv - &rdev->fence_drv[0]; in radeon_fence_check_lockup()
280 if (!down_read_trylock(&rdev->exclusive_lock)) { in radeon_fence_check_lockup()
282 radeon_fence_schedule_check(rdev, ring); in radeon_fence_check_lockup()
286 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) { in radeon_fence_check_lockup()
290 spin_lock_irqsave(&rdev->irq.lock, irqflags); in radeon_fence_check_lockup()
291 radeon_irq_set(rdev); in radeon_fence_check_lockup()
292 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); in radeon_fence_check_lockup()
295 if (radeon_fence_activity(rdev, ring)) in radeon_fence_check_lockup()
296 wake_up_all(&rdev->fence_queue); in radeon_fence_check_lockup()
298 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { in radeon_fence_check_lockup()
301 dev_warn(rdev->dev, "GPU lockup (current fence id " in radeon_fence_check_lockup()
307 rdev->needs_reset = true; in radeon_fence_check_lockup()
308 wake_up_all(&rdev->fence_queue); in radeon_fence_check_lockup()
310 up_read(&rdev->exclusive_lock); in radeon_fence_check_lockup()
322 void radeon_fence_process(struct radeon_device *rdev, int ring) in radeon_fence_process() argument
324 if (radeon_fence_activity(rdev, ring)) in radeon_fence_process()
325 wake_up_all(&rdev->fence_queue); in radeon_fence_process()
342 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, in radeon_fence_seq_signaled() argument
345 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { in radeon_fence_seq_signaled()
349 radeon_fence_process(rdev, ring); in radeon_fence_seq_signaled()
350 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { in radeon_fence_seq_signaled()
359 struct radeon_device *rdev = fence->rdev; in radeon_fence_is_signaled() local
363 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { in radeon_fence_is_signaled()
367 if (down_read_trylock(&rdev->exclusive_lock)) { in radeon_fence_is_signaled()
368 radeon_fence_process(rdev, ring); in radeon_fence_is_signaled()
369 up_read(&rdev->exclusive_lock); in radeon_fence_is_signaled()
371 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { in radeon_fence_is_signaled()
389 struct radeon_device *rdev = fence->rdev; in radeon_fence_enable_signaling() local
391 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) in radeon_fence_enable_signaling()
394 if (down_read_trylock(&rdev->exclusive_lock)) { in radeon_fence_enable_signaling()
395 radeon_irq_kms_sw_irq_get(rdev, fence->ring); in radeon_fence_enable_signaling()
397 if (radeon_fence_activity(rdev, fence->ring)) in radeon_fence_enable_signaling()
398 wake_up_all_locked(&rdev->fence_queue); in radeon_fence_enable_signaling()
401 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { in radeon_fence_enable_signaling()
402 radeon_irq_kms_sw_irq_put(rdev, fence->ring); in radeon_fence_enable_signaling()
403 up_read(&rdev->exclusive_lock); in radeon_fence_enable_signaling()
407 up_read(&rdev->exclusive_lock); in radeon_fence_enable_signaling()
410 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) in radeon_fence_enable_signaling()
411 rdev->fence_drv[fence->ring].delayed_irq = true; in radeon_fence_enable_signaling()
412 radeon_fence_schedule_check(rdev, fence->ring); in radeon_fence_enable_signaling()
418 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); in radeon_fence_enable_signaling()
438 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { in radeon_fence_signaled()
460 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) in radeon_fence_any_seq_signaled() argument
465 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) in radeon_fence_any_seq_signaled()
488 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, in radeon_fence_wait_seq_timeout() argument
495 if (radeon_fence_any_seq_signaled(rdev, target_seq)) in radeon_fence_wait_seq_timeout()
503 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); in radeon_fence_wait_seq_timeout()
504 radeon_irq_kms_sw_irq_get(rdev, i); in radeon_fence_wait_seq_timeout()
508 r = wait_event_interruptible_timeout(rdev->fence_queue, ( in radeon_fence_wait_seq_timeout()
509 radeon_fence_any_seq_signaled(rdev, target_seq) in radeon_fence_wait_seq_timeout()
510 || rdev->needs_reset), timeout); in radeon_fence_wait_seq_timeout()
512 r = wait_event_timeout(rdev->fence_queue, ( in radeon_fence_wait_seq_timeout()
513 radeon_fence_any_seq_signaled(rdev, target_seq) in radeon_fence_wait_seq_timeout()
514 || rdev->needs_reset), timeout); in radeon_fence_wait_seq_timeout()
517 if (rdev->needs_reset) in radeon_fence_wait_seq_timeout()
524 radeon_irq_kms_sw_irq_put(rdev, i); in radeon_fence_wait_seq_timeout()
525 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); in radeon_fence_wait_seq_timeout()
560 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); in radeon_fence_wait_timeout()
605 int radeon_fence_wait_any(struct radeon_device *rdev, in radeon_fence_wait_any() argument
628 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); in radeon_fence_wait_any()
645 int radeon_fence_wait_next(struct radeon_device *rdev, int ring) in radeon_fence_wait_next() argument
650 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; in radeon_fence_wait_next()
651 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { in radeon_fence_wait_next()
656 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); in radeon_fence_wait_next()
672 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) in radeon_fence_wait_empty() argument
677 seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_wait_empty()
681 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); in radeon_fence_wait_empty()
686 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", in radeon_fence_wait_empty()
733 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) in radeon_fence_count_emitted() argument
740 radeon_fence_process(rdev, ring); in radeon_fence_count_emitted()
741 emitted = rdev->fence_drv[ring].sync_seq[ring] in radeon_fence_count_emitted()
742 - atomic64_read(&rdev->fence_drv[ring].last_seq); in radeon_fence_count_emitted()
774 fdrv = &fence->rdev->fence_drv[dst_ring]; in radeon_fence_need_sync()
805 src = &fence->rdev->fence_drv[fence->ring]; in radeon_fence_note_sync()
806 dst = &fence->rdev->fence_drv[dst_ring]; in radeon_fence_note_sync()
827 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) in radeon_fence_driver_start_ring() argument
832 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); in radeon_fence_driver_start_ring()
833 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { in radeon_fence_driver_start_ring()
834 rdev->fence_drv[ring].scratch_reg = 0; in radeon_fence_driver_start_ring()
837 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; in radeon_fence_driver_start_ring()
838 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + in radeon_fence_driver_start_ring()
843 index = ALIGN(rdev->uvd_fw->size, 8); in radeon_fence_driver_start_ring()
844 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; in radeon_fence_driver_start_ring()
845 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; in radeon_fence_driver_start_ring()
849 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); in radeon_fence_driver_start_ring()
851 dev_err(rdev->dev, "fence failed to get scratch register\n"); in radeon_fence_driver_start_ring()
855 rdev->fence_drv[ring].scratch_reg - in radeon_fence_driver_start_ring()
856 rdev->scratch.reg_base; in radeon_fence_driver_start_ring()
857 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; in radeon_fence_driver_start_ring()
858 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; in radeon_fence_driver_start_ring()
860 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); in radeon_fence_driver_start_ring()
861 rdev->fence_drv[ring].initialized = true; in radeon_fence_driver_start_ring()
862 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", in radeon_fence_driver_start_ring()
863 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); in radeon_fence_driver_start_ring()
877 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) in radeon_fence_driver_init_ring() argument
881 rdev->fence_drv[ring].scratch_reg = -1; in radeon_fence_driver_init_ring()
882 rdev->fence_drv[ring].cpu_addr = NULL; in radeon_fence_driver_init_ring()
883 rdev->fence_drv[ring].gpu_addr = 0; in radeon_fence_driver_init_ring()
885 rdev->fence_drv[ring].sync_seq[i] = 0; in radeon_fence_driver_init_ring()
886 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); in radeon_fence_driver_init_ring()
887 rdev->fence_drv[ring].initialized = false; in radeon_fence_driver_init_ring()
888 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, in radeon_fence_driver_init_ring()
890 rdev->fence_drv[ring].rdev = rdev; in radeon_fence_driver_init_ring()
905 int radeon_fence_driver_init(struct radeon_device *rdev) in radeon_fence_driver_init() argument
909 init_waitqueue_head(&rdev->fence_queue); in radeon_fence_driver_init()
911 radeon_fence_driver_init_ring(rdev, ring); in radeon_fence_driver_init()
913 if (radeon_debugfs_fence_init(rdev)) { in radeon_fence_driver_init()
914 dev_err(rdev->dev, "fence debugfs file creation failed\n"); in radeon_fence_driver_init()
927 void radeon_fence_driver_fini(struct radeon_device *rdev) in radeon_fence_driver_fini() argument
931 mutex_lock(&rdev->ring_lock); in radeon_fence_driver_fini()
933 if (!rdev->fence_drv[ring].initialized) in radeon_fence_driver_fini()
935 r = radeon_fence_wait_empty(rdev, ring); in radeon_fence_driver_fini()
938 radeon_fence_driver_force_completion(rdev, ring); in radeon_fence_driver_fini()
940 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); in radeon_fence_driver_fini()
941 wake_up_all(&rdev->fence_queue); in radeon_fence_driver_fini()
942 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); in radeon_fence_driver_fini()
943 rdev->fence_drv[ring].initialized = false; in radeon_fence_driver_fini()
945 mutex_unlock(&rdev->ring_lock); in radeon_fence_driver_fini()
957 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) in radeon_fence_driver_force_completion() argument
959 if (rdev->fence_drv[ring].initialized) { in radeon_fence_driver_force_completion()
960 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); in radeon_fence_driver_force_completion()
961 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); in radeon_fence_driver_force_completion()
974 struct radeon_device *rdev = dev->dev_private; in radeon_debugfs_fence_info() local
978 if (!rdev->fence_drv[i].initialized) in radeon_debugfs_fence_info()
981 radeon_fence_process(rdev, i); in radeon_debugfs_fence_info()
985 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); in radeon_debugfs_fence_info()
987 rdev->fence_drv[i].sync_seq[i]); in radeon_debugfs_fence_info()
990 if (i != j && rdev->fence_drv[j].initialized) in radeon_debugfs_fence_info()
992 j, rdev->fence_drv[i].sync_seq[j]); in radeon_debugfs_fence_info()
1007 struct radeon_device *rdev = dev->dev_private; in radeon_debugfs_gpu_reset() local
1009 down_read(&rdev->exclusive_lock); in radeon_debugfs_gpu_reset()
1010 seq_printf(m, "%d\n", rdev->needs_reset); in radeon_debugfs_gpu_reset()
1011 rdev->needs_reset = true; in radeon_debugfs_gpu_reset()
1012 wake_up_all(&rdev->fence_queue); in radeon_debugfs_gpu_reset()
1013 up_read(&rdev->exclusive_lock); in radeon_debugfs_gpu_reset()
1024 int radeon_debugfs_fence_init(struct radeon_device *rdev) in radeon_debugfs_fence_init() argument
1027 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); in radeon_debugfs_fence_init()
1077 struct radeon_device *rdev = fence->rdev; in radeon_fence_default_wait() local
1098 if (rdev->needs_reset) { in radeon_fence_default_wait()