• Home
  • Raw
  • Download

Lines Matching full:gpu

37 	struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));  in msm_devfreq_target()  local
45 clk_set_rate(gpu->core_clk, *freq); in msm_devfreq_target()
54 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); in msm_devfreq_get_dev_status() local
59 status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk); in msm_devfreq_get_dev_status()
60 gpu->funcs->gpu_busy(gpu, &cycles); in msm_devfreq_get_dev_status()
62 status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq; in msm_devfreq_get_dev_status()
64 gpu->devfreq.busy_cycles = cycles; in msm_devfreq_get_dev_status()
67 status->total_time = ktime_us_delta(time, gpu->devfreq.time); in msm_devfreq_get_dev_status()
68 gpu->devfreq.time = time; in msm_devfreq_get_dev_status()
75 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); in msm_devfreq_get_cur_freq() local
77 *freq = (unsigned long) clk_get_rate(gpu->core_clk); in msm_devfreq_get_cur_freq()
89 static void msm_devfreq_init(struct msm_gpu *gpu) in msm_devfreq_init() argument
92 if (!gpu->funcs->gpu_busy || !gpu->core_clk) in msm_devfreq_init()
95 msm_devfreq_profile.initial_freq = gpu->fast_rate; in msm_devfreq_init()
102 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev, in msm_devfreq_init()
105 if (IS_ERR(gpu->devfreq.devfreq)) { in msm_devfreq_init()
106 dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); in msm_devfreq_init()
107 gpu->devfreq.devfreq = NULL; in msm_devfreq_init()
111 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
113 struct drm_device *dev = gpu->dev; in enable_pwrrail()
116 if (gpu->gpu_reg) { in enable_pwrrail()
117 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
124 if (gpu->gpu_cx) { in enable_pwrrail()
125 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
135 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
137 if (gpu->gpu_cx) in disable_pwrrail()
138 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
139 if (gpu->gpu_reg) in disable_pwrrail()
140 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
144 static int enable_clk(struct msm_gpu *gpu) in enable_clk() argument
146 if (gpu->core_clk && gpu->fast_rate) in enable_clk()
147 clk_set_rate(gpu->core_clk, gpu->fast_rate); in enable_clk()
150 if (gpu->rbbmtimer_clk) in enable_clk()
151 clk_set_rate(gpu->rbbmtimer_clk, 19200000); in enable_clk()
153 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in enable_clk()
156 static int disable_clk(struct msm_gpu *gpu) in disable_clk() argument
158 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in disable_clk()
165 if (gpu->core_clk) in disable_clk()
166 clk_set_rate(gpu->core_clk, 27000000); in disable_clk()
168 if (gpu->rbbmtimer_clk) in disable_clk()
169 clk_set_rate(gpu->rbbmtimer_clk, 0); in disable_clk()
174 static int enable_axi(struct msm_gpu *gpu) in enable_axi() argument
176 if (gpu->ebi1_clk) in enable_axi()
177 clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
181 static int disable_axi(struct msm_gpu *gpu) in disable_axi() argument
183 if (gpu->ebi1_clk) in disable_axi()
184 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
188 int msm_gpu_pm_resume(struct msm_gpu *gpu) in msm_gpu_pm_resume() argument
192 DBG("%s", gpu->name); in msm_gpu_pm_resume()
194 ret = enable_pwrrail(gpu); in msm_gpu_pm_resume()
198 ret = enable_clk(gpu); in msm_gpu_pm_resume()
202 ret = enable_axi(gpu); in msm_gpu_pm_resume()
206 if (gpu->devfreq.devfreq) { in msm_gpu_pm_resume()
207 gpu->devfreq.busy_cycles = 0; in msm_gpu_pm_resume()
208 gpu->devfreq.time = ktime_get(); in msm_gpu_pm_resume()
210 devfreq_resume_device(gpu->devfreq.devfreq); in msm_gpu_pm_resume()
213 gpu->needs_hw_init = true; in msm_gpu_pm_resume()
218 int msm_gpu_pm_suspend(struct msm_gpu *gpu) in msm_gpu_pm_suspend() argument
222 DBG("%s", gpu->name); in msm_gpu_pm_suspend()
224 if (gpu->devfreq.devfreq) in msm_gpu_pm_suspend()
225 devfreq_suspend_device(gpu->devfreq.devfreq); in msm_gpu_pm_suspend()
227 ret = disable_axi(gpu); in msm_gpu_pm_suspend()
231 ret = disable_clk(gpu); in msm_gpu_pm_suspend()
235 ret = disable_pwrrail(gpu); in msm_gpu_pm_suspend()
242 int msm_gpu_hw_init(struct msm_gpu *gpu) in msm_gpu_hw_init() argument
246 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex)); in msm_gpu_hw_init()
248 if (!gpu->needs_hw_init) in msm_gpu_hw_init()
251 disable_irq(gpu->irq); in msm_gpu_hw_init()
252 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init()
254 gpu->needs_hw_init = false; in msm_gpu_hw_init()
255 enable_irq(gpu->irq); in msm_gpu_hw_init()
264 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_read() local
269 state = msm_gpu_crashstate_get(gpu); in msm_gpu_devcoredump_read()
290 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read()
292 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_read()
299 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_free() local
301 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_free()
335 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
341 if (gpu->crashstate) in msm_gpu_crashstate_capture()
344 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture()
364 gpu->crashstate = state; in msm_gpu_crashstate_capture()
367 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, in msm_gpu_crashstate_capture()
371 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
378 * Hangcheck detection for locked gpu:
381 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in update_fences() argument
400 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex)); in find_submit()
409 static void retire_submits(struct msm_gpu *gpu);
413 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); in recover_worker() local
414 struct drm_device *dev = gpu->dev; in recover_worker()
417 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in recover_worker()
423 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
451 gpu->name, comm, cmd); in recover_worker()
460 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
461 msm_gpu_crashstate_capture(gpu, submit, comm, cmd); in recover_worker()
462 pm_runtime_put_sync(&gpu->pdev->dev); in recover_worker()
472 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
473 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
484 update_fences(gpu, ring, fence); in recover_worker()
487 if (msm_gpu_active(gpu)) { in recover_worker()
489 retire_submits(gpu); in recover_worker()
491 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
492 gpu->funcs->recover(gpu); in recover_worker()
493 pm_runtime_put_sync(&gpu->pdev->dev); in recover_worker()
499 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
500 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
503 gpu->funcs->submit(gpu, submit, NULL); in recover_worker()
509 msm_gpu_retire(gpu); in recover_worker()
512 static void hangcheck_timer_reset(struct msm_gpu *gpu) in hangcheck_timer_reset() argument
514 DBG("%s", gpu->name); in hangcheck_timer_reset()
515 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
521 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); in hangcheck_handler() local
522 struct drm_device *dev = gpu->dev; in hangcheck_handler()
524 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in hangcheck_handler()
533 dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", in hangcheck_handler()
534 gpu->name, ring->id); in hangcheck_handler()
536 gpu->name, fence); in hangcheck_handler()
538 gpu->name, ring->seqno); in hangcheck_handler()
540 queue_work(priv->wq, &gpu->recover_work); in hangcheck_handler()
545 hangcheck_timer_reset(gpu); in hangcheck_handler()
548 queue_work(priv->wq, &gpu->retire_work); in hangcheck_handler()
556 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) in update_hw_cntrs() argument
558 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
559 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
562 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
563 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
567 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
570 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
571 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
576 static void update_sw_cntrs(struct msm_gpu *gpu) in update_sw_cntrs() argument
582 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
583 if (!gpu->perfcntr_active) in update_sw_cntrs()
587 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
589 gpu->totaltime += elapsed; in update_sw_cntrs()
590 if (gpu->last_sample.active) in update_sw_cntrs()
591 gpu->activetime += elapsed; in update_sw_cntrs()
593 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
594 gpu->last_sample.time = time; in update_sw_cntrs()
597 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
600 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) in msm_gpu_perfcntr_start() argument
604 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_start()
606 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
608 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
609 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
610 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
611 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
612 update_hw_cntrs(gpu, 0, NULL); in msm_gpu_perfcntr_start()
613 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
616 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) in msm_gpu_perfcntr_stop() argument
618 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
619 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_stop()
623 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, in msm_gpu_perfcntr_sample() argument
629 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
631 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
636 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
637 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
639 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
641 ret = update_hw_cntrs(gpu, ncntrs, cntrs); in msm_gpu_perfcntr_sample()
644 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
653 static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in retire_submit() argument
661 msm_gem_put_iova(&msm_obj->base, gpu->aspace); in retire_submit()
665 pm_runtime_mark_last_busy(&gpu->pdev->dev); in retire_submit()
666 pm_runtime_put_autosuspend(&gpu->pdev->dev); in retire_submit()
670 static void retire_submits(struct msm_gpu *gpu) in retire_submits() argument
672 struct drm_device *dev = gpu->dev; in retire_submits()
679 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits()
680 struct msm_ringbuffer *ring = gpu->rb[i]; in retire_submits()
684 retire_submit(gpu, submit); in retire_submits()
691 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); in retire_worker() local
692 struct drm_device *dev = gpu->dev; in retire_worker()
695 for (i = 0; i < gpu->nr_rings; i++) in retire_worker()
696 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); in retire_worker()
699 retire_submits(gpu); in retire_worker()
704 void msm_gpu_retire(struct msm_gpu *gpu) in msm_gpu_retire() argument
706 struct msm_drm_private *priv = gpu->dev->dev_private; in msm_gpu_retire()
707 queue_work(priv->wq, &gpu->retire_work); in msm_gpu_retire()
708 update_sw_cntrs(gpu); in msm_gpu_retire()
711 /* add bo's to gpu's ring, and kick gpu: */
712 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, in msm_gpu_submit() argument
715 struct drm_device *dev = gpu->dev; in msm_gpu_submit()
722 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_submit()
724 msm_gpu_hw_init(gpu); in msm_gpu_submit()
732 update_sw_cntrs(gpu); in msm_gpu_submit()
741 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); in msm_gpu_submit()
746 submit->gpu->aspace, &iova); in msm_gpu_submit()
749 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); in msm_gpu_submit()
751 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); in msm_gpu_submit()
754 gpu->funcs->submit(gpu, submit, ctx); in msm_gpu_submit()
757 hangcheck_timer_reset(gpu); in msm_gpu_submit()
766 struct msm_gpu *gpu = data; in irq_handler() local
767 return gpu->funcs->irq(gpu); in irq_handler()
770 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) in get_clocks() argument
772 int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks); in get_clocks()
775 gpu->nr_clocks = 0; in get_clocks()
779 gpu->nr_clocks = ret; in get_clocks()
781 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
782 gpu->nr_clocks, "core"); in get_clocks()
784 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
785 gpu->nr_clocks, "rbbmtimer"); in get_clocks()
791 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, in msm_gpu_create_address_space() argument
810 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); in msm_gpu_create_address_space()
812 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); in msm_gpu_create_address_space()
814 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n", in msm_gpu_create_address_space()
830 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, in msm_gpu_init() argument
837 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
838 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
840 gpu->dev = drm; in msm_gpu_init()
841 gpu->funcs = funcs; in msm_gpu_init()
842 gpu->name = name; in msm_gpu_init()
844 INIT_LIST_HEAD(&gpu->active_list); in msm_gpu_init()
845 INIT_WORK(&gpu->retire_work, retire_worker); in msm_gpu_init()
846 INIT_WORK(&gpu->recover_work, recover_worker); in msm_gpu_init()
849 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); in msm_gpu_init()
851 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
855 gpu->mmio = msm_ioremap(pdev, config->ioname, name); in msm_gpu_init()
856 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
857 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
862 gpu->irq = platform_get_irq_byname(pdev, config->irqname); in msm_gpu_init()
863 if (gpu->irq < 0) { in msm_gpu_init()
864 ret = gpu->irq; in msm_gpu_init()
869 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
870 IRQF_TRIGGER_HIGH, gpu->name, gpu); in msm_gpu_init()
872 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
876 ret = get_clocks(pdev, gpu); in msm_gpu_init()
880 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); in msm_gpu_init()
881 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
882 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
883 gpu->ebi1_clk = NULL; in msm_gpu_init()
886 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
887 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
888 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
889 gpu->gpu_reg = NULL; in msm_gpu_init()
891 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
892 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
893 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
894 gpu->gpu_cx = NULL; in msm_gpu_init()
896 gpu->pdev = pdev; in msm_gpu_init()
897 platform_set_drvdata(pdev, gpu); in msm_gpu_init()
899 msm_devfreq_init(gpu); in msm_gpu_init()
901 gpu->aspace = msm_gpu_create_address_space(gpu, pdev, in msm_gpu_init()
904 if (gpu->aspace == NULL) in msm_gpu_init()
906 else if (IS_ERR(gpu->aspace)) { in msm_gpu_init()
907 ret = PTR_ERR(gpu->aspace); in msm_gpu_init()
911 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), in msm_gpu_init()
912 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, in msm_gpu_init()
921 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init()
923 ARRAY_SIZE(gpu->rb)); in msm_gpu_init()
924 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init()
929 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); in msm_gpu_init()
931 if (IS_ERR(gpu->rb[i])) { in msm_gpu_init()
932 ret = PTR_ERR(gpu->rb[i]); in msm_gpu_init()
942 gpu->nr_rings = nr_rings; in msm_gpu_init()
947 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_init()
948 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_init()
949 gpu->rb[i] = NULL; in msm_gpu_init()
952 if (gpu->memptrs_bo) { in msm_gpu_init()
953 msm_gem_put_vaddr(gpu->memptrs_bo); in msm_gpu_init()
954 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); in msm_gpu_init()
955 drm_gem_object_put_unlocked(gpu->memptrs_bo); in msm_gpu_init()
962 void msm_gpu_cleanup(struct msm_gpu *gpu) in msm_gpu_cleanup() argument
966 DBG("%s", gpu->name); in msm_gpu_cleanup()
968 WARN_ON(!list_empty(&gpu->active_list)); in msm_gpu_cleanup()
970 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_cleanup()
971 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_cleanup()
972 gpu->rb[i] = NULL; in msm_gpu_cleanup()
975 if (gpu->memptrs_bo) { in msm_gpu_cleanup()
976 msm_gem_put_vaddr(gpu->memptrs_bo); in msm_gpu_cleanup()
977 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); in msm_gpu_cleanup()
978 drm_gem_object_put_unlocked(gpu->memptrs_bo); in msm_gpu_cleanup()
981 if (!IS_ERR_OR_NULL(gpu->aspace)) { in msm_gpu_cleanup()
982 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, in msm_gpu_cleanup()
984 msm_gem_address_space_put(gpu->aspace); in msm_gpu_cleanup()