• Home
  • Raw
  • Download

Lines Matching refs:gpu

23 static int enable_pwrrail(struct msm_gpu *gpu)  in enable_pwrrail()  argument
25 struct drm_device *dev = gpu->dev; in enable_pwrrail()
28 if (gpu->gpu_reg) { in enable_pwrrail()
29 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
36 if (gpu->gpu_cx) { in enable_pwrrail()
37 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
47 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
49 if (gpu->gpu_cx) in disable_pwrrail()
50 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
51 if (gpu->gpu_reg) in disable_pwrrail()
52 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
56 static int enable_clk(struct msm_gpu *gpu) in enable_clk() argument
58 if (gpu->core_clk && gpu->fast_rate) in enable_clk()
59 clk_set_rate(gpu->core_clk, gpu->fast_rate); in enable_clk()
62 if (gpu->rbbmtimer_clk) in enable_clk()
63 clk_set_rate(gpu->rbbmtimer_clk, 19200000); in enable_clk()
65 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in enable_clk()
68 static int disable_clk(struct msm_gpu *gpu) in disable_clk() argument
70 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in disable_clk()
77 if (gpu->core_clk) in disable_clk()
78 clk_set_rate(gpu->core_clk, 27000000); in disable_clk()
80 if (gpu->rbbmtimer_clk) in disable_clk()
81 clk_set_rate(gpu->rbbmtimer_clk, 0); in disable_clk()
86 static int enable_axi(struct msm_gpu *gpu) in enable_axi() argument
88 return clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
91 static int disable_axi(struct msm_gpu *gpu) in disable_axi() argument
93 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
97 int msm_gpu_pm_resume(struct msm_gpu *gpu) in msm_gpu_pm_resume() argument
101 DBG("%s", gpu->name); in msm_gpu_pm_resume()
104 ret = enable_pwrrail(gpu); in msm_gpu_pm_resume()
108 ret = enable_clk(gpu); in msm_gpu_pm_resume()
112 ret = enable_axi(gpu); in msm_gpu_pm_resume()
116 msm_devfreq_resume(gpu); in msm_gpu_pm_resume()
118 gpu->needs_hw_init = true; in msm_gpu_pm_resume()
123 int msm_gpu_pm_suspend(struct msm_gpu *gpu) in msm_gpu_pm_suspend() argument
127 DBG("%s", gpu->name); in msm_gpu_pm_suspend()
130 msm_devfreq_suspend(gpu); in msm_gpu_pm_suspend()
132 ret = disable_axi(gpu); in msm_gpu_pm_suspend()
136 ret = disable_clk(gpu); in msm_gpu_pm_suspend()
140 ret = disable_pwrrail(gpu); in msm_gpu_pm_suspend()
144 gpu->suspend_count++; in msm_gpu_pm_suspend()
149 int msm_gpu_hw_init(struct msm_gpu *gpu) in msm_gpu_hw_init() argument
153 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_hw_init()
155 if (!gpu->needs_hw_init) in msm_gpu_hw_init()
158 disable_irq(gpu->irq); in msm_gpu_hw_init()
159 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init()
161 gpu->needs_hw_init = false; in msm_gpu_hw_init()
162 enable_irq(gpu->irq); in msm_gpu_hw_init()
167 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in update_fences() argument
189 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_read() local
194 state = msm_gpu_crashstate_get(gpu); in msm_gpu_devcoredump_read()
215 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read()
217 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_read()
224 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_free() local
226 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_free()
262 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
268 if (!gpu->funcs->gpu_state_get) in msm_gpu_crashstate_capture()
272 if (gpu->crashstate) in msm_gpu_crashstate_capture()
275 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture()
282 state->fault_info = gpu->fault_info; in msm_gpu_crashstate_capture()
317 gpu->crashstate = state; in msm_gpu_crashstate_capture()
320 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, in msm_gpu_crashstate_capture()
324 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
352 static void retire_submits(struct msm_gpu *gpu);
356 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); in recover_worker() local
357 struct drm_device *dev = gpu->dev; in recover_worker()
360 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in recover_worker()
364 mutex_lock(&gpu->lock); in recover_worker()
366 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
373 gpu->global_faults++; in recover_worker()
385 gpu->name, comm, cmd); in recover_worker()
395 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
396 msm_gpu_crashstate_capture(gpu, submit, comm, cmd); in recover_worker()
397 pm_runtime_put_sync(&gpu->pdev->dev); in recover_worker()
407 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
408 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
419 update_fences(gpu, ring, fence); in recover_worker()
422 if (msm_gpu_active(gpu)) { in recover_worker()
424 retire_submits(gpu); in recover_worker()
426 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
427 gpu->funcs->recover(gpu); in recover_worker()
428 pm_runtime_put_sync(&gpu->pdev->dev); in recover_worker()
434 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
435 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
440 gpu->funcs->submit(gpu, submit); in recover_worker()
445 mutex_unlock(&gpu->lock); in recover_worker()
447 msm_gpu_retire(gpu); in recover_worker()
452 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work); in fault_worker() local
454 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in fault_worker()
457 mutex_lock(&gpu->lock); in fault_worker()
481 pm_runtime_get_sync(&gpu->pdev->dev); in fault_worker()
482 msm_gpu_crashstate_capture(gpu, submit, comm, cmd); in fault_worker()
483 pm_runtime_put_sync(&gpu->pdev->dev); in fault_worker()
489 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info)); in fault_worker()
490 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in fault_worker()
492 mutex_unlock(&gpu->lock); in fault_worker()
495 static void hangcheck_timer_reset(struct msm_gpu *gpu) in hangcheck_timer_reset() argument
497 struct msm_drm_private *priv = gpu->dev->dev_private; in hangcheck_timer_reset()
498 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
504 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); in hangcheck_handler() local
505 struct drm_device *dev = gpu->dev; in hangcheck_handler()
506 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in hangcheck_handler()
516 gpu->name, ring->id); in hangcheck_handler()
518 gpu->name, fence); in hangcheck_handler()
520 gpu->name, ring->seqno); in hangcheck_handler()
522 kthread_queue_work(gpu->worker, &gpu->recover_work); in hangcheck_handler()
527 hangcheck_timer_reset(gpu); in hangcheck_handler()
530 msm_gpu_retire(gpu); in hangcheck_handler()
538 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) in update_hw_cntrs() argument
540 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
541 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
544 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
545 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
549 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
552 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
553 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
558 static void update_sw_cntrs(struct msm_gpu *gpu) in update_sw_cntrs() argument
564 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
565 if (!gpu->perfcntr_active) in update_sw_cntrs()
569 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
571 gpu->totaltime += elapsed; in update_sw_cntrs()
572 if (gpu->last_sample.active) in update_sw_cntrs()
573 gpu->activetime += elapsed; in update_sw_cntrs()
575 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
576 gpu->last_sample.time = time; in update_sw_cntrs()
579 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
582 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) in msm_gpu_perfcntr_start() argument
586 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_start()
588 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
590 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
591 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
592 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
593 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
594 update_hw_cntrs(gpu, 0, NULL); in msm_gpu_perfcntr_start()
595 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
598 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) in msm_gpu_perfcntr_stop() argument
600 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
601 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_stop()
605 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, in msm_gpu_perfcntr_sample() argument
611 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
613 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
618 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
619 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
621 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
623 ret = update_hw_cntrs(gpu, ncntrs, cntrs); in msm_gpu_perfcntr_sample()
626 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
635 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in retire_submit() argument
659 pm_runtime_mark_last_busy(&gpu->pdev->dev); in retire_submit()
666 mutex_lock(&gpu->active_lock); in retire_submit()
667 gpu->active_submits--; in retire_submit()
668 WARN_ON(gpu->active_submits < 0); in retire_submit()
669 if (!gpu->active_submits) in retire_submit()
670 msm_devfreq_idle(gpu); in retire_submit()
671 mutex_unlock(&gpu->active_lock); in retire_submit()
673 pm_runtime_put_autosuspend(&gpu->pdev->dev); in retire_submit()
678 static void retire_submits(struct msm_gpu *gpu) in retire_submits() argument
683 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits()
684 struct msm_ringbuffer *ring = gpu->rb[i]; in retire_submits()
701 retire_submit(gpu, ring, submit); in retire_submits()
711 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); in retire_worker() local
713 retire_submits(gpu); in retire_worker()
717 void msm_gpu_retire(struct msm_gpu *gpu) in msm_gpu_retire() argument
721 for (i = 0; i < gpu->nr_rings; i++) in msm_gpu_retire()
722 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); in msm_gpu_retire()
724 kthread_queue_work(gpu->worker, &gpu->retire_work); in msm_gpu_retire()
725 update_sw_cntrs(gpu); in msm_gpu_retire()
729 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in msm_gpu_submit() argument
731 struct drm_device *dev = gpu->dev; in msm_gpu_submit()
736 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_submit()
738 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_submit()
740 msm_gpu_hw_init(gpu); in msm_gpu_submit()
746 update_sw_cntrs(gpu); in msm_gpu_submit()
759 mutex_lock(&gpu->active_lock); in msm_gpu_submit()
760 if (!gpu->active_submits) in msm_gpu_submit()
761 msm_devfreq_active(gpu); in msm_gpu_submit()
762 gpu->active_submits++; in msm_gpu_submit()
763 mutex_unlock(&gpu->active_lock); in msm_gpu_submit()
765 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
768 hangcheck_timer_reset(gpu); in msm_gpu_submit()
777 struct msm_gpu *gpu = data; in irq_handler() local
778 return gpu->funcs->irq(gpu); in irq_handler()
781 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) in get_clocks() argument
783 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks); in get_clocks()
786 gpu->nr_clocks = 0; in get_clocks()
790 gpu->nr_clocks = ret; in get_clocks()
792 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
793 gpu->nr_clocks, "core"); in get_clocks()
795 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
796 gpu->nr_clocks, "rbbmtimer"); in get_clocks()
803 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task) in msm_gpu_create_private_address_space() argument
806 if (!gpu) in msm_gpu_create_private_address_space()
813 if (gpu->funcs->create_private_address_space) { in msm_gpu_create_private_address_space()
814 aspace = gpu->funcs->create_private_address_space(gpu); in msm_gpu_create_private_address_space()
820 aspace = msm_gem_address_space_get(gpu->aspace); in msm_gpu_create_private_address_space()
826 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, in msm_gpu_init() argument
833 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
834 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
836 gpu->dev = drm; in msm_gpu_init()
837 gpu->funcs = funcs; in msm_gpu_init()
838 gpu->name = name; in msm_gpu_init()
840 gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name); in msm_gpu_init()
841 if (IS_ERR(gpu->worker)) { in msm_gpu_init()
842 ret = PTR_ERR(gpu->worker); in msm_gpu_init()
843 gpu->worker = NULL; in msm_gpu_init()
847 sched_set_fifo_low(gpu->worker->task); in msm_gpu_init()
849 INIT_LIST_HEAD(&gpu->active_list); in msm_gpu_init()
850 mutex_init(&gpu->active_lock); in msm_gpu_init()
851 mutex_init(&gpu->lock); in msm_gpu_init()
852 kthread_init_work(&gpu->retire_work, retire_worker); in msm_gpu_init()
853 kthread_init_work(&gpu->recover_work, recover_worker); in msm_gpu_init()
854 kthread_init_work(&gpu->fault_work, fault_worker); in msm_gpu_init()
856 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); in msm_gpu_init()
858 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
862 gpu->mmio = msm_ioremap(pdev, config->ioname, name); in msm_gpu_init()
863 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
864 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
869 gpu->irq = platform_get_irq(pdev, 0); in msm_gpu_init()
870 if (gpu->irq < 0) { in msm_gpu_init()
871 ret = gpu->irq; in msm_gpu_init()
876 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
877 IRQF_TRIGGER_HIGH, gpu->name, gpu); in msm_gpu_init()
879 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
883 ret = get_clocks(pdev, gpu); in msm_gpu_init()
887 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); in msm_gpu_init()
888 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
889 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
890 gpu->ebi1_clk = NULL; in msm_gpu_init()
893 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
894 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
895 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
896 gpu->gpu_reg = NULL; in msm_gpu_init()
898 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
899 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
900 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
901 gpu->gpu_cx = NULL; in msm_gpu_init()
903 gpu->pdev = pdev; in msm_gpu_init()
904 platform_set_drvdata(pdev, &gpu->adreno_smmu); in msm_gpu_init()
906 msm_devfreq_init(gpu); in msm_gpu_init()
909 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); in msm_gpu_init()
911 if (gpu->aspace == NULL) in msm_gpu_init()
913 else if (IS_ERR(gpu->aspace)) { in msm_gpu_init()
914 ret = PTR_ERR(gpu->aspace); in msm_gpu_init()
920 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo, in msm_gpu_init()
929 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs"); in msm_gpu_init()
931 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init()
933 ARRAY_SIZE(gpu->rb)); in msm_gpu_init()
934 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init()
939 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); in msm_gpu_init()
941 if (IS_ERR(gpu->rb[i])) { in msm_gpu_init()
942 ret = PTR_ERR(gpu->rb[i]); in msm_gpu_init()
952 gpu->nr_rings = nr_rings; in msm_gpu_init()
957 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_init()
958 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_init()
959 gpu->rb[i] = NULL; in msm_gpu_init()
962 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace); in msm_gpu_init()
968 void msm_gpu_cleanup(struct msm_gpu *gpu) in msm_gpu_cleanup() argument
972 DBG("%s", gpu->name); in msm_gpu_cleanup()
974 WARN_ON(!list_empty(&gpu->active_list)); in msm_gpu_cleanup()
976 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_cleanup()
977 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_cleanup()
978 gpu->rb[i] = NULL; in msm_gpu_cleanup()
981 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace); in msm_gpu_cleanup()
983 if (!IS_ERR_OR_NULL(gpu->aspace)) { in msm_gpu_cleanup()
984 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); in msm_gpu_cleanup()
985 msm_gem_address_space_put(gpu->aspace); in msm_gpu_cleanup()
988 if (gpu->worker) { in msm_gpu_cleanup()
989 kthread_destroy_worker(gpu->worker); in msm_gpu_cleanup()
992 msm_devfreq_cleanup(gpu); in msm_gpu_cleanup()