• Home
  • Raw
  • Download

Lines Matching refs:gpu

29 static void bs_init(struct msm_gpu *gpu)  in bs_init()  argument
31 if (gpu->bus_scale_table) { in bs_init()
32 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); in bs_init()
33 DBG("bus scale client: %08x", gpu->bsc); in bs_init()
37 static void bs_fini(struct msm_gpu *gpu) in bs_fini() argument
39 if (gpu->bsc) { in bs_fini()
40 msm_bus_scale_unregister_client(gpu->bsc); in bs_fini()
41 gpu->bsc = 0; in bs_fini()
45 static void bs_set(struct msm_gpu *gpu, int idx) in bs_set() argument
47 if (gpu->bsc) { in bs_set()
49 msm_bus_scale_client_update_request(gpu->bsc, idx); in bs_set()
53 static void bs_init(struct msm_gpu *gpu) {} in bs_init() argument
54 static void bs_fini(struct msm_gpu *gpu) {} in bs_fini() argument
55 static void bs_set(struct msm_gpu *gpu, int idx) {} in bs_set() argument
58 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
60 struct drm_device *dev = gpu->dev; in enable_pwrrail()
63 if (gpu->gpu_reg) { in enable_pwrrail()
64 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
71 if (gpu->gpu_cx) { in enable_pwrrail()
72 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
82 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
84 if (gpu->gpu_cx) in disable_pwrrail()
85 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
86 if (gpu->gpu_reg) in disable_pwrrail()
87 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
91 static int enable_clk(struct msm_gpu *gpu) in enable_clk() argument
97 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { in enable_clk()
98 if (gpu->grp_clks[i]) { in enable_clk()
99 clk_prepare(gpu->grp_clks[i]); in enable_clk()
100 rate_clk = gpu->grp_clks[i]; in enable_clk()
104 if (rate_clk && gpu->fast_rate) in enable_clk()
105 clk_set_rate(rate_clk, gpu->fast_rate); in enable_clk()
107 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) in enable_clk()
108 if (gpu->grp_clks[i]) in enable_clk()
109 clk_enable(gpu->grp_clks[i]); in enable_clk()
114 static int disable_clk(struct msm_gpu *gpu) in disable_clk() argument
120 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { in disable_clk()
121 if (gpu->grp_clks[i]) { in disable_clk()
122 clk_disable(gpu->grp_clks[i]); in disable_clk()
123 rate_clk = gpu->grp_clks[i]; in disable_clk()
127 if (rate_clk && gpu->slow_rate) in disable_clk()
128 clk_set_rate(rate_clk, gpu->slow_rate); in disable_clk()
130 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) in disable_clk()
131 if (gpu->grp_clks[i]) in disable_clk()
132 clk_unprepare(gpu->grp_clks[i]); in disable_clk()
137 static int enable_axi(struct msm_gpu *gpu) in enable_axi() argument
139 if (gpu->ebi1_clk) in enable_axi()
140 clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
141 if (gpu->bus_freq) in enable_axi()
142 bs_set(gpu, gpu->bus_freq); in enable_axi()
146 static int disable_axi(struct msm_gpu *gpu) in disable_axi() argument
148 if (gpu->ebi1_clk) in disable_axi()
149 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
150 if (gpu->bus_freq) in disable_axi()
151 bs_set(gpu, 0); in disable_axi()
155 int msm_gpu_pm_resume(struct msm_gpu *gpu) in msm_gpu_pm_resume() argument
157 struct drm_device *dev = gpu->dev; in msm_gpu_pm_resume()
160 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); in msm_gpu_pm_resume()
164 if (gpu->active_cnt++ > 0) in msm_gpu_pm_resume()
167 if (WARN_ON(gpu->active_cnt <= 0)) in msm_gpu_pm_resume()
170 ret = enable_pwrrail(gpu); in msm_gpu_pm_resume()
174 ret = enable_clk(gpu); in msm_gpu_pm_resume()
178 ret = enable_axi(gpu); in msm_gpu_pm_resume()
185 int msm_gpu_pm_suspend(struct msm_gpu *gpu) in msm_gpu_pm_suspend() argument
187 struct drm_device *dev = gpu->dev; in msm_gpu_pm_suspend()
190 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); in msm_gpu_pm_suspend()
194 if (--gpu->active_cnt > 0) in msm_gpu_pm_suspend()
197 if (WARN_ON(gpu->active_cnt < 0)) in msm_gpu_pm_suspend()
200 ret = disable_axi(gpu); in msm_gpu_pm_suspend()
204 ret = disable_clk(gpu); in msm_gpu_pm_suspend()
208 ret = disable_pwrrail(gpu); in msm_gpu_pm_suspend()
221 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); in inactive_worker() local
222 struct drm_device *dev = gpu->dev; in inactive_worker()
224 if (gpu->inactive) in inactive_worker()
227 DBG("%s: inactive!\n", gpu->name); in inactive_worker()
229 if (!(msm_gpu_active(gpu) || gpu->inactive)) { in inactive_worker()
230 disable_axi(gpu); in inactive_worker()
231 disable_clk(gpu); in inactive_worker()
232 gpu->inactive = true; in inactive_worker()
239 struct msm_gpu *gpu = (struct msm_gpu *)data; in inactive_handler() local
240 struct msm_drm_private *priv = gpu->dev->dev_private; in inactive_handler()
242 queue_work(priv->wq, &gpu->inactive_work); in inactive_handler()
246 static void inactive_cancel(struct msm_gpu *gpu) in inactive_cancel() argument
248 DBG("%s", gpu->name); in inactive_cancel()
249 del_timer(&gpu->inactive_timer); in inactive_cancel()
250 if (gpu->inactive) { in inactive_cancel()
251 enable_clk(gpu); in inactive_cancel()
252 enable_axi(gpu); in inactive_cancel()
253 gpu->inactive = false; in inactive_cancel()
257 static void inactive_start(struct msm_gpu *gpu) in inactive_start() argument
259 DBG("%s", gpu->name); in inactive_start()
260 mod_timer(&gpu->inactive_timer, in inactive_start()
270 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); in recover_worker() local
271 struct drm_device *dev = gpu->dev; in recover_worker()
273 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
276 if (msm_gpu_active(gpu)) { in recover_worker()
277 inactive_cancel(gpu); in recover_worker()
278 gpu->funcs->recover(gpu); in recover_worker()
282 msm_gpu_retire(gpu); in recover_worker()
285 static void hangcheck_timer_reset(struct msm_gpu *gpu) in hangcheck_timer_reset() argument
287 DBG("%s", gpu->name); in hangcheck_timer_reset()
288 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
294 struct msm_gpu *gpu = (struct msm_gpu *)data; in hangcheck_handler() local
295 struct drm_device *dev = gpu->dev; in hangcheck_handler()
297 uint32_t fence = gpu->funcs->last_fence(gpu); in hangcheck_handler()
299 if (fence != gpu->hangcheck_fence) { in hangcheck_handler()
301 gpu->hangcheck_fence = fence; in hangcheck_handler()
302 } else if (fence < gpu->submitted_fence) { in hangcheck_handler()
304 gpu->hangcheck_fence = fence; in hangcheck_handler()
306 gpu->name); in hangcheck_handler()
308 gpu->name, fence); in hangcheck_handler()
310 gpu->name, gpu->submitted_fence); in hangcheck_handler()
311 queue_work(priv->wq, &gpu->recover_work); in hangcheck_handler()
315 if (gpu->submitted_fence > gpu->hangcheck_fence) in hangcheck_handler()
316 hangcheck_timer_reset(gpu); in hangcheck_handler()
319 queue_work(priv->wq, &gpu->retire_work); in hangcheck_handler()
327 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) in update_hw_cntrs() argument
329 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
330 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
333 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
334 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
338 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
341 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
342 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
347 static void update_sw_cntrs(struct msm_gpu *gpu) in update_sw_cntrs() argument
353 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
354 if (!gpu->perfcntr_active) in update_sw_cntrs()
358 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
360 gpu->totaltime += elapsed; in update_sw_cntrs()
361 if (gpu->last_sample.active) in update_sw_cntrs()
362 gpu->activetime += elapsed; in update_sw_cntrs()
364 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
365 gpu->last_sample.time = time; in update_sw_cntrs()
368 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
371 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) in msm_gpu_perfcntr_start() argument
375 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
377 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
378 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
379 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
380 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
381 update_hw_cntrs(gpu, 0, NULL); in msm_gpu_perfcntr_start()
382 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
385 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) in msm_gpu_perfcntr_stop() argument
387 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
391 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, in msm_gpu_perfcntr_sample() argument
397 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
399 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
404 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
405 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
407 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
409 ret = update_hw_cntrs(gpu, ncntrs, cntrs); in msm_gpu_perfcntr_sample()
412 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
423 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); in retire_worker() local
424 struct drm_device *dev = gpu->dev; in retire_worker()
425 uint32_t fence = gpu->funcs->last_fence(gpu); in retire_worker()
427 msm_update_fence(gpu->dev, fence); in retire_worker()
431 while (!list_empty(&gpu->active_list)) { in retire_worker()
434 obj = list_first_entry(&gpu->active_list, in retire_worker()
441 msm_gem_put_iova(&obj->base, gpu->id); in retire_worker()
450 if (!msm_gpu_active(gpu)) in retire_worker()
451 inactive_start(gpu); in retire_worker()
455 void msm_gpu_retire(struct msm_gpu *gpu) in msm_gpu_retire() argument
457 struct msm_drm_private *priv = gpu->dev->dev_private; in msm_gpu_retire()
458 queue_work(priv->wq, &gpu->retire_work); in msm_gpu_retire()
459 update_sw_cntrs(gpu); in msm_gpu_retire()
463 int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, in msm_gpu_submit() argument
466 struct drm_device *dev = gpu->dev; in msm_gpu_submit()
472 gpu->submitted_fence = submit->fence; in msm_gpu_submit()
474 inactive_cancel(gpu); in msm_gpu_submit()
478 gpu->submitted_fence = submit->fence; in msm_gpu_submit()
480 update_sw_cntrs(gpu); in msm_gpu_submit()
482 ret = gpu->funcs->submit(gpu, submit, ctx); in msm_gpu_submit()
491 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); in msm_gpu_submit()
499 submit->gpu->id, &iova); in msm_gpu_submit()
503 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); in msm_gpu_submit()
506 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); in msm_gpu_submit()
508 hangcheck_timer_reset(gpu); in msm_gpu_submit()
519 struct msm_gpu *gpu = data; in irq_handler() local
520 return gpu->funcs->irq(gpu); in irq_handler()
528 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, in msm_gpu_init() argument
534 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
535 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
537 gpu->dev = drm; in msm_gpu_init()
538 gpu->funcs = funcs; in msm_gpu_init()
539 gpu->name = name; in msm_gpu_init()
540 gpu->inactive = true; in msm_gpu_init()
542 INIT_LIST_HEAD(&gpu->active_list); in msm_gpu_init()
543 INIT_WORK(&gpu->retire_work, retire_worker); in msm_gpu_init()
544 INIT_WORK(&gpu->inactive_work, inactive_worker); in msm_gpu_init()
545 INIT_WORK(&gpu->recover_work, recover_worker); in msm_gpu_init()
547 setup_timer(&gpu->inactive_timer, inactive_handler, in msm_gpu_init()
548 (unsigned long)gpu); in msm_gpu_init()
549 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, in msm_gpu_init()
550 (unsigned long)gpu); in msm_gpu_init()
552 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
554 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); in msm_gpu_init()
557 gpu->mmio = msm_ioremap(pdev, ioname, name); in msm_gpu_init()
558 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
559 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
564 gpu->irq = platform_get_irq_byname(pdev, irqname); in msm_gpu_init()
565 if (gpu->irq < 0) { in msm_gpu_init()
566 ret = gpu->irq; in msm_gpu_init()
571 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
572 IRQF_TRIGGER_HIGH, gpu->name, gpu); in msm_gpu_init()
574 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
580 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); in msm_gpu_init()
581 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); in msm_gpu_init()
582 if (IS_ERR(gpu->grp_clks[i])) in msm_gpu_init()
583 gpu->grp_clks[i] = NULL; in msm_gpu_init()
586 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); in msm_gpu_init()
587 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
588 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
589 gpu->ebi1_clk = NULL; in msm_gpu_init()
592 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
593 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
594 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
595 gpu->gpu_reg = NULL; in msm_gpu_init()
597 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
598 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
599 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
600 gpu->gpu_cx = NULL; in msm_gpu_init()
609 gpu->mmu = msm_iommu_new(&pdev->dev, iommu); in msm_gpu_init()
613 gpu->id = msm_register_mmu(drm, gpu->mmu); in msm_gpu_init()
618 gpu->rb = msm_ringbuffer_new(gpu, ringsz); in msm_gpu_init()
620 if (IS_ERR(gpu->rb)) { in msm_gpu_init()
621 ret = PTR_ERR(gpu->rb); in msm_gpu_init()
622 gpu->rb = NULL; in msm_gpu_init()
627 bs_init(gpu); in msm_gpu_init()
635 void msm_gpu_cleanup(struct msm_gpu *gpu) in msm_gpu_cleanup() argument
637 DBG("%s", gpu->name); in msm_gpu_cleanup()
639 WARN_ON(!list_empty(&gpu->active_list)); in msm_gpu_cleanup()
641 bs_fini(gpu); in msm_gpu_cleanup()
643 if (gpu->rb) { in msm_gpu_cleanup()
644 if (gpu->rb_iova) in msm_gpu_cleanup()
645 msm_gem_put_iova(gpu->rb->bo, gpu->id); in msm_gpu_cleanup()
646 msm_ringbuffer_destroy(gpu->rb); in msm_gpu_cleanup()
649 if (gpu->mmu) in msm_gpu_cleanup()
650 gpu->mmu->funcs->destroy(gpu->mmu); in msm_gpu_cleanup()