1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include "msm_gpu.h"
8 #include "msm_gem.h"
9 #include "msm_mmu.h"
10 #include "msm_fence.h"
11 #include "msm_gpu_trace.h"
12 #include "adreno/adreno_gpu.h"
13
14 #include <generated/utsrelease.h>
15 #include <linux/string_helpers.h>
16 #include <linux/devcoredump.h>
17 #include <linux/sched/task.h>
18
19 /*
20 * Power Management:
21 */
22
enable_pwrrail(struct msm_gpu * gpu)23 static int enable_pwrrail(struct msm_gpu *gpu)
24 {
25 struct drm_device *dev = gpu->dev;
26 int ret = 0;
27
28 if (gpu->gpu_reg) {
29 ret = regulator_enable(gpu->gpu_reg);
30 if (ret) {
31 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
32 return ret;
33 }
34 }
35
36 if (gpu->gpu_cx) {
37 ret = regulator_enable(gpu->gpu_cx);
38 if (ret) {
39 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
40 return ret;
41 }
42 }
43
44 return 0;
45 }
46
disable_pwrrail(struct msm_gpu * gpu)47 static int disable_pwrrail(struct msm_gpu *gpu)
48 {
49 if (gpu->gpu_cx)
50 regulator_disable(gpu->gpu_cx);
51 if (gpu->gpu_reg)
52 regulator_disable(gpu->gpu_reg);
53 return 0;
54 }
55
enable_clk(struct msm_gpu * gpu)56 static int enable_clk(struct msm_gpu *gpu)
57 {
58 if (gpu->core_clk && gpu->fast_rate)
59 clk_set_rate(gpu->core_clk, gpu->fast_rate);
60
61 /* Set the RBBM timer rate to 19.2Mhz */
62 if (gpu->rbbmtimer_clk)
63 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
64
65 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
66 }
67
disable_clk(struct msm_gpu * gpu)68 static int disable_clk(struct msm_gpu *gpu)
69 {
70 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
71
72 /*
73 * Set the clock to a deliberately low rate. On older targets the clock
74 * speed had to be non zero to avoid problems. On newer targets this
75 * will be rounded down to zero anyway so it all works out.
76 */
77 if (gpu->core_clk)
78 clk_set_rate(gpu->core_clk, 27000000);
79
80 if (gpu->rbbmtimer_clk)
81 clk_set_rate(gpu->rbbmtimer_clk, 0);
82
83 return 0;
84 }
85
enable_axi(struct msm_gpu * gpu)86 static int enable_axi(struct msm_gpu *gpu)
87 {
88 return clk_prepare_enable(gpu->ebi1_clk);
89 }
90
disable_axi(struct msm_gpu * gpu)91 static int disable_axi(struct msm_gpu *gpu)
92 {
93 clk_disable_unprepare(gpu->ebi1_clk);
94 return 0;
95 }
96
msm_gpu_pm_resume(struct msm_gpu * gpu)97 int msm_gpu_pm_resume(struct msm_gpu *gpu)
98 {
99 int ret;
100
101 DBG("%s", gpu->name);
102 trace_msm_gpu_resume(0);
103
104 ret = enable_pwrrail(gpu);
105 if (ret)
106 return ret;
107
108 ret = enable_clk(gpu);
109 if (ret)
110 return ret;
111
112 ret = enable_axi(gpu);
113 if (ret)
114 return ret;
115
116 msm_devfreq_resume(gpu);
117
118 gpu->needs_hw_init = true;
119
120 return 0;
121 }
122
msm_gpu_pm_suspend(struct msm_gpu * gpu)123 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
124 {
125 int ret;
126
127 DBG("%s", gpu->name);
128 trace_msm_gpu_suspend(0);
129
130 msm_devfreq_suspend(gpu);
131
132 ret = disable_axi(gpu);
133 if (ret)
134 return ret;
135
136 ret = disable_clk(gpu);
137 if (ret)
138 return ret;
139
140 ret = disable_pwrrail(gpu);
141 if (ret)
142 return ret;
143
144 gpu->suspend_count++;
145
146 return 0;
147 }
148
msm_gpu_hw_init(struct msm_gpu * gpu)149 int msm_gpu_hw_init(struct msm_gpu *gpu)
150 {
151 int ret;
152
153 WARN_ON(!mutex_is_locked(&gpu->lock));
154
155 if (!gpu->needs_hw_init)
156 return 0;
157
158 disable_irq(gpu->irq);
159 ret = gpu->funcs->hw_init(gpu);
160 if (!ret)
161 gpu->needs_hw_init = false;
162 enable_irq(gpu->irq);
163
164 return ret;
165 }
166
update_fences(struct msm_gpu * gpu,struct msm_ringbuffer * ring,uint32_t fence)167 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
168 uint32_t fence)
169 {
170 struct msm_gem_submit *submit;
171 unsigned long flags;
172
173 spin_lock_irqsave(&ring->submit_lock, flags);
174 list_for_each_entry(submit, &ring->submits, node) {
175 if (submit->seqno > fence)
176 break;
177
178 msm_update_fence(submit->ring->fctx,
179 submit->hw_fence->seqno);
180 dma_fence_signal(submit->hw_fence);
181 }
182 spin_unlock_irqrestore(&ring->submit_lock, flags);
183 }
184
185 #ifdef CONFIG_DEV_COREDUMP
msm_gpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)186 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
187 size_t count, void *data, size_t datalen)
188 {
189 struct msm_gpu *gpu = data;
190 struct drm_print_iterator iter;
191 struct drm_printer p;
192 struct msm_gpu_state *state;
193
194 state = msm_gpu_crashstate_get(gpu);
195 if (!state)
196 return 0;
197
198 iter.data = buffer;
199 iter.offset = 0;
200 iter.start = offset;
201 iter.remain = count;
202
203 p = drm_coredump_printer(&iter);
204
205 drm_printf(&p, "---\n");
206 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
207 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
208 drm_printf(&p, "time: %lld.%09ld\n",
209 state->time.tv_sec, state->time.tv_nsec);
210 if (state->comm)
211 drm_printf(&p, "comm: %s\n", state->comm);
212 if (state->cmd)
213 drm_printf(&p, "cmdline: %s\n", state->cmd);
214
215 gpu->funcs->show(gpu, state, &p);
216
217 msm_gpu_crashstate_put(gpu);
218
219 return count - iter.remain;
220 }
221
msm_gpu_devcoredump_free(void * data)222 static void msm_gpu_devcoredump_free(void *data)
223 {
224 struct msm_gpu *gpu = data;
225
226 msm_gpu_crashstate_put(gpu);
227 }
228
msm_gpu_crashstate_get_bo(struct msm_gpu_state * state,struct msm_gem_object * obj,u64 iova,u32 flags)229 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
230 struct msm_gem_object *obj, u64 iova, u32 flags)
231 {
232 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
233
234 /* Don't record write only objects */
235 state_bo->size = obj->base.size;
236 state_bo->iova = iova;
237
238 /* Only store data for non imported buffer objects marked for read */
239 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
240 void *ptr;
241
242 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
243 if (!state_bo->data)
244 goto out;
245
246 msm_gem_lock(&obj->base);
247 ptr = msm_gem_get_vaddr_active(&obj->base);
248 msm_gem_unlock(&obj->base);
249 if (IS_ERR(ptr)) {
250 kvfree(state_bo->data);
251 state_bo->data = NULL;
252 goto out;
253 }
254
255 memcpy(state_bo->data, ptr, obj->base.size);
256 msm_gem_put_vaddr(&obj->base);
257 }
258 out:
259 state->nr_bos++;
260 }
261
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,char * comm,char * cmd)262 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
263 struct msm_gem_submit *submit, char *comm, char *cmd)
264 {
265 struct msm_gpu_state *state;
266
267 /* Check if the target supports capturing crash state */
268 if (!gpu->funcs->gpu_state_get)
269 return;
270
271 /* Only save one crash state at a time */
272 if (gpu->crashstate)
273 return;
274
275 state = gpu->funcs->gpu_state_get(gpu);
276 if (IS_ERR_OR_NULL(state))
277 return;
278
279 /* Fill in the additional crash state information */
280 state->comm = kstrdup(comm, GFP_KERNEL);
281 state->cmd = kstrdup(cmd, GFP_KERNEL);
282 state->fault_info = gpu->fault_info;
283
284 if (submit) {
285 int i, nr = 0;
286
287 /* count # of buffers to dump: */
288 for (i = 0; i < submit->nr_bos; i++)
289 if (should_dump(submit, i))
290 nr++;
291 /* always dump cmd bo's, but don't double count them: */
292 for (i = 0; i < submit->nr_cmds; i++)
293 if (!should_dump(submit, submit->cmd[i].idx))
294 nr++;
295
296 state->bos = kcalloc(nr,
297 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
298
299 for (i = 0; state->bos && i < submit->nr_bos; i++) {
300 if (should_dump(submit, i)) {
301 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
302 submit->bos[i].iova, submit->bos[i].flags);
303 }
304 }
305
306 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
307 int idx = submit->cmd[i].idx;
308
309 if (!should_dump(submit, submit->cmd[i].idx)) {
310 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
311 submit->bos[idx].iova, submit->bos[idx].flags);
312 }
313 }
314 }
315
316 /* Set the active crash state to be dumped on failure */
317 gpu->crashstate = state;
318
319 /* FIXME: Release the crashstate if this errors out? */
320 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
321 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
322 }
323 #else
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,char * comm,char * cmd)324 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
325 struct msm_gem_submit *submit, char *comm, char *cmd)
326 {
327 }
328 #endif
329
330 /*
331 * Hangcheck detection for locked gpu:
332 */
333
334 static struct msm_gem_submit *
find_submit(struct msm_ringbuffer * ring,uint32_t fence)335 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
336 {
337 struct msm_gem_submit *submit;
338 unsigned long flags;
339
340 spin_lock_irqsave(&ring->submit_lock, flags);
341 list_for_each_entry(submit, &ring->submits, node) {
342 if (submit->seqno == fence) {
343 spin_unlock_irqrestore(&ring->submit_lock, flags);
344 return submit;
345 }
346 }
347 spin_unlock_irqrestore(&ring->submit_lock, flags);
348
349 return NULL;
350 }
351
352 static void retire_submits(struct msm_gpu *gpu);
353
recover_worker(struct kthread_work * work)354 static void recover_worker(struct kthread_work *work)
355 {
356 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
357 struct drm_device *dev = gpu->dev;
358 struct msm_drm_private *priv = dev->dev_private;
359 struct msm_gem_submit *submit;
360 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
361 char *comm = NULL, *cmd = NULL;
362 int i;
363
364 mutex_lock(&gpu->lock);
365
366 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
367
368 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
369 if (submit) {
370 struct task_struct *task;
371
372 /* Increment the fault counts */
373 gpu->global_faults++;
374 submit->queue->faults++;
375
376 task = get_pid_task(submit->pid, PIDTYPE_PID);
377 if (task) {
378 comm = kstrdup(task->comm, GFP_KERNEL);
379 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
380 put_task_struct(task);
381 }
382
383 if (comm && cmd) {
384 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
385 gpu->name, comm, cmd);
386
387 msm_rd_dump_submit(priv->hangrd, submit,
388 "offending task: %s (%s)", comm, cmd);
389 } else {
390 msm_rd_dump_submit(priv->hangrd, submit, NULL);
391 }
392 }
393
394 /* Record the crash state */
395 pm_runtime_get_sync(&gpu->pdev->dev);
396 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
397 pm_runtime_put_sync(&gpu->pdev->dev);
398
399 kfree(cmd);
400 kfree(comm);
401
402 /*
403 * Update all the rings with the latest and greatest fence.. this
404 * needs to happen after msm_rd_dump_submit() to ensure that the
405 * bo's referenced by the offending submit are still around.
406 */
407 for (i = 0; i < gpu->nr_rings; i++) {
408 struct msm_ringbuffer *ring = gpu->rb[i];
409
410 uint32_t fence = ring->memptrs->fence;
411
412 /*
413 * For the current (faulting?) ring/submit advance the fence by
414 * one more to clear the faulting submit
415 */
416 if (ring == cur_ring)
417 fence++;
418
419 update_fences(gpu, ring, fence);
420 }
421
422 if (msm_gpu_active(gpu)) {
423 /* retire completed submits, plus the one that hung: */
424 retire_submits(gpu);
425
426 pm_runtime_get_sync(&gpu->pdev->dev);
427 gpu->funcs->recover(gpu);
428 pm_runtime_put_sync(&gpu->pdev->dev);
429
430 /*
431 * Replay all remaining submits starting with highest priority
432 * ring
433 */
434 for (i = 0; i < gpu->nr_rings; i++) {
435 struct msm_ringbuffer *ring = gpu->rb[i];
436 unsigned long flags;
437
438 spin_lock_irqsave(&ring->submit_lock, flags);
439 list_for_each_entry(submit, &ring->submits, node)
440 gpu->funcs->submit(gpu, submit);
441 spin_unlock_irqrestore(&ring->submit_lock, flags);
442 }
443 }
444
445 mutex_unlock(&gpu->lock);
446
447 msm_gpu_retire(gpu);
448 }
449
fault_worker(struct kthread_work * work)450 static void fault_worker(struct kthread_work *work)
451 {
452 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
453 struct msm_gem_submit *submit;
454 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
455 char *comm = NULL, *cmd = NULL;
456
457 mutex_lock(&gpu->lock);
458
459 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
460 if (submit && submit->fault_dumped)
461 goto resume_smmu;
462
463 if (submit) {
464 struct task_struct *task;
465
466 task = get_pid_task(submit->pid, PIDTYPE_PID);
467 if (task) {
468 comm = kstrdup(task->comm, GFP_KERNEL);
469 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
470 put_task_struct(task);
471 }
472
473 /*
474 * When we get GPU iova faults, we can get 1000s of them,
475 * but we really only want to log the first one.
476 */
477 submit->fault_dumped = true;
478 }
479
480 /* Record the crash state */
481 pm_runtime_get_sync(&gpu->pdev->dev);
482 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
483 pm_runtime_put_sync(&gpu->pdev->dev);
484
485 kfree(cmd);
486 kfree(comm);
487
488 resume_smmu:
489 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
490 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
491
492 mutex_unlock(&gpu->lock);
493 }
494
hangcheck_timer_reset(struct msm_gpu * gpu)495 static void hangcheck_timer_reset(struct msm_gpu *gpu)
496 {
497 struct msm_drm_private *priv = gpu->dev->dev_private;
498 mod_timer(&gpu->hangcheck_timer,
499 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
500 }
501
hangcheck_handler(struct timer_list * t)502 static void hangcheck_handler(struct timer_list *t)
503 {
504 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
505 struct drm_device *dev = gpu->dev;
506 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
507 uint32_t fence = ring->memptrs->fence;
508
509 if (fence != ring->hangcheck_fence) {
510 /* some progress has been made.. ya! */
511 ring->hangcheck_fence = fence;
512 } else if (fence < ring->seqno) {
513 /* no progress and not done.. hung! */
514 ring->hangcheck_fence = fence;
515 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
516 gpu->name, ring->id);
517 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
518 gpu->name, fence);
519 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
520 gpu->name, ring->seqno);
521
522 kthread_queue_work(gpu->worker, &gpu->recover_work);
523 }
524
525 /* if still more pending work, reset the hangcheck timer: */
526 if (ring->seqno > ring->hangcheck_fence)
527 hangcheck_timer_reset(gpu);
528
529 /* workaround for missing irq: */
530 msm_gpu_retire(gpu);
531 }
532
533 /*
534 * Performance Counters:
535 */
536
537 /* called under perf_lock */
update_hw_cntrs(struct msm_gpu * gpu,uint32_t ncntrs,uint32_t * cntrs)538 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
539 {
540 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
541 int i, n = min(ncntrs, gpu->num_perfcntrs);
542
543 /* read current values: */
544 for (i = 0; i < gpu->num_perfcntrs; i++)
545 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
546
547 /* update cntrs: */
548 for (i = 0; i < n; i++)
549 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
550
551 /* save current values: */
552 for (i = 0; i < gpu->num_perfcntrs; i++)
553 gpu->last_cntrs[i] = current_cntrs[i];
554
555 return n;
556 }
557
update_sw_cntrs(struct msm_gpu * gpu)558 static void update_sw_cntrs(struct msm_gpu *gpu)
559 {
560 ktime_t time;
561 uint32_t elapsed;
562 unsigned long flags;
563
564 spin_lock_irqsave(&gpu->perf_lock, flags);
565 if (!gpu->perfcntr_active)
566 goto out;
567
568 time = ktime_get();
569 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
570
571 gpu->totaltime += elapsed;
572 if (gpu->last_sample.active)
573 gpu->activetime += elapsed;
574
575 gpu->last_sample.active = msm_gpu_active(gpu);
576 gpu->last_sample.time = time;
577
578 out:
579 spin_unlock_irqrestore(&gpu->perf_lock, flags);
580 }
581
msm_gpu_perfcntr_start(struct msm_gpu * gpu)582 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
583 {
584 unsigned long flags;
585
586 pm_runtime_get_sync(&gpu->pdev->dev);
587
588 spin_lock_irqsave(&gpu->perf_lock, flags);
589 /* we could dynamically enable/disable perfcntr registers too.. */
590 gpu->last_sample.active = msm_gpu_active(gpu);
591 gpu->last_sample.time = ktime_get();
592 gpu->activetime = gpu->totaltime = 0;
593 gpu->perfcntr_active = true;
594 update_hw_cntrs(gpu, 0, NULL);
595 spin_unlock_irqrestore(&gpu->perf_lock, flags);
596 }
597
msm_gpu_perfcntr_stop(struct msm_gpu * gpu)598 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
599 {
600 gpu->perfcntr_active = false;
601 pm_runtime_put_sync(&gpu->pdev->dev);
602 }
603
604 /* returns -errno or # of cntrs sampled */
msm_gpu_perfcntr_sample(struct msm_gpu * gpu,uint32_t * activetime,uint32_t * totaltime,uint32_t ncntrs,uint32_t * cntrs)605 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
606 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
607 {
608 unsigned long flags;
609 int ret;
610
611 spin_lock_irqsave(&gpu->perf_lock, flags);
612
613 if (!gpu->perfcntr_active) {
614 ret = -EINVAL;
615 goto out;
616 }
617
618 *activetime = gpu->activetime;
619 *totaltime = gpu->totaltime;
620
621 gpu->activetime = gpu->totaltime = 0;
622
623 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
624
625 out:
626 spin_unlock_irqrestore(&gpu->perf_lock, flags);
627
628 return ret;
629 }
630
631 /*
632 * Cmdstream submission/retirement:
633 */
634
retire_submit(struct msm_gpu * gpu,struct msm_ringbuffer * ring,struct msm_gem_submit * submit)635 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
636 struct msm_gem_submit *submit)
637 {
638 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
639 volatile struct msm_gpu_submit_stats *stats;
640 u64 elapsed, clock = 0;
641 unsigned long flags;
642
643 stats = &ring->memptrs->stats[index];
644 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
645 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
646 do_div(elapsed, 192);
647
648 /* Calculate the clock frequency from the number of CP cycles */
649 if (elapsed) {
650 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
651 do_div(clock, elapsed);
652 }
653
654 trace_msm_gpu_submit_retired(submit, elapsed, clock,
655 stats->alwayson_start, stats->alwayson_end);
656
657 msm_submit_retire(submit);
658
659 pm_runtime_mark_last_busy(&gpu->pdev->dev);
660
661 spin_lock_irqsave(&ring->submit_lock, flags);
662 list_del(&submit->node);
663 spin_unlock_irqrestore(&ring->submit_lock, flags);
664
665 /* Update devfreq on transition from active->idle: */
666 mutex_lock(&gpu->active_lock);
667 gpu->active_submits--;
668 WARN_ON(gpu->active_submits < 0);
669 if (!gpu->active_submits)
670 msm_devfreq_idle(gpu);
671 mutex_unlock(&gpu->active_lock);
672
673 pm_runtime_put_autosuspend(&gpu->pdev->dev);
674
675 msm_gem_submit_put(submit);
676 }
677
retire_submits(struct msm_gpu * gpu)678 static void retire_submits(struct msm_gpu *gpu)
679 {
680 int i;
681
682 /* Retire the commits starting with highest priority */
683 for (i = 0; i < gpu->nr_rings; i++) {
684 struct msm_ringbuffer *ring = gpu->rb[i];
685
686 while (true) {
687 struct msm_gem_submit *submit = NULL;
688 unsigned long flags;
689
690 spin_lock_irqsave(&ring->submit_lock, flags);
691 submit = list_first_entry_or_null(&ring->submits,
692 struct msm_gem_submit, node);
693 spin_unlock_irqrestore(&ring->submit_lock, flags);
694
695 /*
696 * If no submit, we are done. If submit->fence hasn't
697 * been signalled, then later submits are not signalled
698 * either, so we are also done.
699 */
700 if (submit && dma_fence_is_signaled(submit->hw_fence)) {
701 retire_submit(gpu, ring, submit);
702 } else {
703 break;
704 }
705 }
706 }
707 }
708
retire_worker(struct kthread_work * work)709 static void retire_worker(struct kthread_work *work)
710 {
711 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
712
713 retire_submits(gpu);
714 }
715
716 /* call from irq handler to schedule work to retire bo's */
msm_gpu_retire(struct msm_gpu * gpu)717 void msm_gpu_retire(struct msm_gpu *gpu)
718 {
719 int i;
720
721 for (i = 0; i < gpu->nr_rings; i++)
722 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
723
724 kthread_queue_work(gpu->worker, &gpu->retire_work);
725 update_sw_cntrs(gpu);
726 }
727
728 /* add bo's to gpu's ring, and kick gpu: */
msm_gpu_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)729 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
730 {
731 struct drm_device *dev = gpu->dev;
732 struct msm_drm_private *priv = dev->dev_private;
733 struct msm_ringbuffer *ring = submit->ring;
734 unsigned long flags;
735
736 WARN_ON(!mutex_is_locked(&gpu->lock));
737
738 pm_runtime_get_sync(&gpu->pdev->dev);
739
740 msm_gpu_hw_init(gpu);
741
742 submit->seqno = ++ring->seqno;
743
744 msm_rd_dump_submit(priv->rd, submit, NULL);
745
746 update_sw_cntrs(gpu);
747
748 /*
749 * ring->submits holds a ref to the submit, to deal with the case
750 * that a submit completes before msm_ioctl_gem_submit() returns.
751 */
752 msm_gem_submit_get(submit);
753
754 spin_lock_irqsave(&ring->submit_lock, flags);
755 list_add_tail(&submit->node, &ring->submits);
756 spin_unlock_irqrestore(&ring->submit_lock, flags);
757
758 /* Update devfreq on transition from idle->active: */
759 mutex_lock(&gpu->active_lock);
760 if (!gpu->active_submits)
761 msm_devfreq_active(gpu);
762 gpu->active_submits++;
763 mutex_unlock(&gpu->active_lock);
764
765 gpu->funcs->submit(gpu, submit);
766 priv->lastctx = submit->queue->ctx;
767
768 hangcheck_timer_reset(gpu);
769 }
770
771 /*
772 * Init/Cleanup:
773 */
774
irq_handler(int irq,void * data)775 static irqreturn_t irq_handler(int irq, void *data)
776 {
777 struct msm_gpu *gpu = data;
778 return gpu->funcs->irq(gpu);
779 }
780
get_clocks(struct platform_device * pdev,struct msm_gpu * gpu)781 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
782 {
783 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
784
785 if (ret < 1) {
786 gpu->nr_clocks = 0;
787 return ret;
788 }
789
790 gpu->nr_clocks = ret;
791
792 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
793 gpu->nr_clocks, "core");
794
795 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
796 gpu->nr_clocks, "rbbmtimer");
797
798 return 0;
799 }
800
801 /* Return a new address space for a msm_drm_private instance */
802 struct msm_gem_address_space *
msm_gpu_create_private_address_space(struct msm_gpu * gpu,struct task_struct * task)803 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
804 {
805 struct msm_gem_address_space *aspace = NULL;
806 if (!gpu)
807 return NULL;
808
809 /*
810 * If the target doesn't support private address spaces then return
811 * the global one
812 */
813 if (gpu->funcs->create_private_address_space) {
814 aspace = gpu->funcs->create_private_address_space(gpu);
815 if (!IS_ERR(aspace))
816 aspace->pid = get_pid(task_pid(task));
817 }
818
819 if (IS_ERR_OR_NULL(aspace))
820 aspace = msm_gem_address_space_get(gpu->aspace);
821
822 return aspace;
823 }
824
msm_gpu_init(struct drm_device * drm,struct platform_device * pdev,struct msm_gpu * gpu,const struct msm_gpu_funcs * funcs,const char * name,struct msm_gpu_config * config)825 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
826 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
827 const char *name, struct msm_gpu_config *config)
828 {
829 int i, ret, nr_rings = config->nr_rings;
830 void *memptrs;
831 uint64_t memptrs_iova;
832
833 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
834 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
835
836 gpu->dev = drm;
837 gpu->funcs = funcs;
838 gpu->name = name;
839
840 gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
841 if (IS_ERR(gpu->worker)) {
842 ret = PTR_ERR(gpu->worker);
843 gpu->worker = NULL;
844 goto fail;
845 }
846
847 sched_set_fifo_low(gpu->worker->task);
848
849 INIT_LIST_HEAD(&gpu->active_list);
850 mutex_init(&gpu->active_lock);
851 mutex_init(&gpu->lock);
852 kthread_init_work(&gpu->retire_work, retire_worker);
853 kthread_init_work(&gpu->recover_work, recover_worker);
854 kthread_init_work(&gpu->fault_work, fault_worker);
855
856 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
857
858 spin_lock_init(&gpu->perf_lock);
859
860
861 /* Map registers: */
862 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
863 if (IS_ERR(gpu->mmio)) {
864 ret = PTR_ERR(gpu->mmio);
865 goto fail;
866 }
867
868 /* Get Interrupt: */
869 gpu->irq = platform_get_irq(pdev, 0);
870 if (gpu->irq < 0) {
871 ret = gpu->irq;
872 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
873 goto fail;
874 }
875
876 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
877 IRQF_TRIGGER_HIGH, gpu->name, gpu);
878 if (ret) {
879 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
880 goto fail;
881 }
882
883 ret = get_clocks(pdev, gpu);
884 if (ret)
885 goto fail;
886
887 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
888 DBG("ebi1_clk: %p", gpu->ebi1_clk);
889 if (IS_ERR(gpu->ebi1_clk))
890 gpu->ebi1_clk = NULL;
891
892 /* Acquire regulators: */
893 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
894 DBG("gpu_reg: %p", gpu->gpu_reg);
895 if (IS_ERR(gpu->gpu_reg))
896 gpu->gpu_reg = NULL;
897
898 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
899 DBG("gpu_cx: %p", gpu->gpu_cx);
900 if (IS_ERR(gpu->gpu_cx))
901 gpu->gpu_cx = NULL;
902
903 gpu->pdev = pdev;
904 platform_set_drvdata(pdev, &gpu->adreno_smmu);
905
906 msm_devfreq_init(gpu);
907
908
909 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
910
911 if (gpu->aspace == NULL)
912 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
913 else if (IS_ERR(gpu->aspace)) {
914 ret = PTR_ERR(gpu->aspace);
915 goto fail;
916 }
917
918 memptrs = msm_gem_kernel_new(drm,
919 sizeof(struct msm_rbmemptrs) * nr_rings,
920 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
921 &memptrs_iova);
922
923 if (IS_ERR(memptrs)) {
924 ret = PTR_ERR(memptrs);
925 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
926 goto fail;
927 }
928
929 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
930
931 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
932 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
933 ARRAY_SIZE(gpu->rb));
934 nr_rings = ARRAY_SIZE(gpu->rb);
935 }
936
937 /* Create ringbuffer(s): */
938 for (i = 0; i < nr_rings; i++) {
939 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
940
941 if (IS_ERR(gpu->rb[i])) {
942 ret = PTR_ERR(gpu->rb[i]);
943 DRM_DEV_ERROR(drm->dev,
944 "could not create ringbuffer %d: %d\n", i, ret);
945 goto fail;
946 }
947
948 memptrs += sizeof(struct msm_rbmemptrs);
949 memptrs_iova += sizeof(struct msm_rbmemptrs);
950 }
951
952 gpu->nr_rings = nr_rings;
953
954 return 0;
955
956 fail:
957 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
958 msm_ringbuffer_destroy(gpu->rb[i]);
959 gpu->rb[i] = NULL;
960 }
961
962 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
963
964 platform_set_drvdata(pdev, NULL);
965 return ret;
966 }
967
msm_gpu_cleanup(struct msm_gpu * gpu)968 void msm_gpu_cleanup(struct msm_gpu *gpu)
969 {
970 int i;
971
972 DBG("%s", gpu->name);
973
974 WARN_ON(!list_empty(&gpu->active_list));
975
976 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
977 msm_ringbuffer_destroy(gpu->rb[i]);
978 gpu->rb[i] = NULL;
979 }
980
981 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
982
983 if (!IS_ERR_OR_NULL(gpu->aspace)) {
984 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
985 msm_gem_address_space_put(gpu->aspace);
986 }
987
988 if (gpu->worker) {
989 kthread_destroy_worker(gpu->worker);
990 }
991
992 msm_devfreq_cleanup(gpu);
993 }
994