1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3
4 #include <linux/dma-buf-map.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/pm_runtime.h>
9
10 #include "lima_devfreq.h"
11 #include "lima_drv.h"
12 #include "lima_sched.h"
13 #include "lima_vm.h"
14 #include "lima_mmu.h"
15 #include "lima_l2_cache.h"
16 #include "lima_gem.h"
17 #include "lima_trace.h"
18
19 struct lima_fence {
20 struct dma_fence base;
21 struct lima_sched_pipe *pipe;
22 };
23
24 static struct kmem_cache *lima_fence_slab;
25 static int lima_fence_slab_refcnt;
26
lima_sched_slab_init(void)27 int lima_sched_slab_init(void)
28 {
29 if (!lima_fence_slab) {
30 lima_fence_slab = kmem_cache_create(
31 "lima_fence", sizeof(struct lima_fence), 0,
32 SLAB_HWCACHE_ALIGN, NULL);
33 if (!lima_fence_slab)
34 return -ENOMEM;
35 }
36
37 lima_fence_slab_refcnt++;
38 return 0;
39 }
40
lima_sched_slab_fini(void)41 void lima_sched_slab_fini(void)
42 {
43 if (!--lima_fence_slab_refcnt) {
44 kmem_cache_destroy(lima_fence_slab);
45 lima_fence_slab = NULL;
46 }
47 }
48
to_lima_fence(struct dma_fence * fence)49 static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
50 {
51 return container_of(fence, struct lima_fence, base);
52 }
53
lima_fence_get_driver_name(struct dma_fence * fence)54 static const char *lima_fence_get_driver_name(struct dma_fence *fence)
55 {
56 return "lima";
57 }
58
lima_fence_get_timeline_name(struct dma_fence * fence)59 static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
60 {
61 struct lima_fence *f = to_lima_fence(fence);
62
63 return f->pipe->base.name;
64 }
65
lima_fence_release_rcu(struct rcu_head * rcu)66 static void lima_fence_release_rcu(struct rcu_head *rcu)
67 {
68 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
69 struct lima_fence *fence = to_lima_fence(f);
70
71 kmem_cache_free(lima_fence_slab, fence);
72 }
73
lima_fence_release(struct dma_fence * fence)74 static void lima_fence_release(struct dma_fence *fence)
75 {
76 struct lima_fence *f = to_lima_fence(fence);
77
78 call_rcu(&f->base.rcu, lima_fence_release_rcu);
79 }
80
81 static const struct dma_fence_ops lima_fence_ops = {
82 .get_driver_name = lima_fence_get_driver_name,
83 .get_timeline_name = lima_fence_get_timeline_name,
84 .release = lima_fence_release,
85 };
86
lima_fence_create(struct lima_sched_pipe * pipe)87 static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
88 {
89 struct lima_fence *fence;
90
91 fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
92 if (!fence)
93 return NULL;
94
95 fence->pipe = pipe;
96 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
97 pipe->fence_context, ++pipe->fence_seqno);
98
99 return fence;
100 }
101
to_lima_task(struct drm_sched_job * job)102 static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
103 {
104 return container_of(job, struct lima_sched_task, base);
105 }
106
to_lima_pipe(struct drm_gpu_scheduler * sched)107 static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
108 {
109 return container_of(sched, struct lima_sched_pipe, base);
110 }
111
lima_sched_task_init(struct lima_sched_task * task,struct lima_sched_context * context,struct lima_bo ** bos,int num_bos,struct lima_vm * vm)112 int lima_sched_task_init(struct lima_sched_task *task,
113 struct lima_sched_context *context,
114 struct lima_bo **bos, int num_bos,
115 struct lima_vm *vm)
116 {
117 int err, i;
118
119 task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
120 if (!task->bos)
121 return -ENOMEM;
122
123 for (i = 0; i < num_bos; i++)
124 drm_gem_object_get(&bos[i]->base.base);
125
126 err = drm_sched_job_init(&task->base, &context->base, vm);
127 if (err) {
128 kfree(task->bos);
129 return err;
130 }
131
132 task->num_bos = num_bos;
133 task->vm = lima_vm_get(vm);
134
135 xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
136
137 return 0;
138 }
139
lima_sched_task_fini(struct lima_sched_task * task)140 void lima_sched_task_fini(struct lima_sched_task *task)
141 {
142 struct dma_fence *fence;
143 unsigned long index;
144 int i;
145
146 drm_sched_job_cleanup(&task->base);
147
148 xa_for_each(&task->deps, index, fence) {
149 dma_fence_put(fence);
150 }
151 xa_destroy(&task->deps);
152
153 if (task->bos) {
154 for (i = 0; i < task->num_bos; i++)
155 drm_gem_object_put(&task->bos[i]->base.base);
156 kfree(task->bos);
157 }
158
159 lima_vm_put(task->vm);
160 }
161
lima_sched_context_init(struct lima_sched_pipe * pipe,struct lima_sched_context * context,atomic_t * guilty)162 int lima_sched_context_init(struct lima_sched_pipe *pipe,
163 struct lima_sched_context *context,
164 atomic_t *guilty)
165 {
166 struct drm_gpu_scheduler *sched = &pipe->base;
167
168 return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
169 &sched, 1, guilty);
170 }
171
lima_sched_context_fini(struct lima_sched_pipe * pipe,struct lima_sched_context * context)172 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
173 struct lima_sched_context *context)
174 {
175 drm_sched_entity_fini(&context->base);
176 }
177
lima_sched_context_queue_task(struct lima_sched_context * context,struct lima_sched_task * task)178 struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
179 struct lima_sched_task *task)
180 {
181 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
182
183 trace_lima_task_submit(task);
184 drm_sched_entity_push_job(&task->base, &context->base);
185 return fence;
186 }
187
lima_sched_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)188 static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
189 struct drm_sched_entity *entity)
190 {
191 struct lima_sched_task *task = to_lima_task(job);
192
193 if (!xa_empty(&task->deps))
194 return xa_erase(&task->deps, task->last_dep++);
195
196 return NULL;
197 }
198
lima_pm_busy(struct lima_device * ldev)199 static int lima_pm_busy(struct lima_device *ldev)
200 {
201 int ret;
202
203 /* resume GPU if it has been suspended by runtime PM */
204 ret = pm_runtime_resume_and_get(ldev->dev);
205 if (ret < 0)
206 return ret;
207
208 lima_devfreq_record_busy(&ldev->devfreq);
209 return 0;
210 }
211
lima_pm_idle(struct lima_device * ldev)212 static void lima_pm_idle(struct lima_device *ldev)
213 {
214 lima_devfreq_record_idle(&ldev->devfreq);
215
216 /* GPU can do auto runtime suspend */
217 pm_runtime_mark_last_busy(ldev->dev);
218 pm_runtime_put_autosuspend(ldev->dev);
219 }
220
lima_sched_run_job(struct drm_sched_job * job)221 static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
222 {
223 struct lima_sched_task *task = to_lima_task(job);
224 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
225 struct lima_device *ldev = pipe->ldev;
226 struct lima_fence *fence;
227 int i, err;
228
229 /* after GPU reset */
230 if (job->s_fence->finished.error < 0)
231 return NULL;
232
233 fence = lima_fence_create(pipe);
234 if (!fence)
235 return NULL;
236
237 err = lima_pm_busy(ldev);
238 if (err < 0) {
239 dma_fence_put(&fence->base);
240 return NULL;
241 }
242
243 task->fence = &fence->base;
244
245 /* for caller usage of the fence, otherwise irq handler
246 * may consume the fence before caller use it
247 */
248 dma_fence_get(task->fence);
249
250 pipe->current_task = task;
251
252 /* this is needed for MMU to work correctly, otherwise GP/PP
253 * will hang or page fault for unknown reason after running for
254 * a while.
255 *
256 * Need to investigate:
257 * 1. is it related to TLB
258 * 2. how much performance will be affected by L2 cache flush
259 * 3. can we reduce the calling of this function because all
260 * GP/PP use the same L2 cache on mali400
261 *
262 * TODO:
263 * 1. move this to task fini to save some wait time?
264 * 2. when GP/PP use different l2 cache, need PP wait GP l2
265 * cache flush?
266 */
267 for (i = 0; i < pipe->num_l2_cache; i++)
268 lima_l2_cache_flush(pipe->l2_cache[i]);
269
270 lima_vm_put(pipe->current_vm);
271 pipe->current_vm = lima_vm_get(task->vm);
272
273 if (pipe->bcast_mmu)
274 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
275 else {
276 for (i = 0; i < pipe->num_mmu; i++)
277 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
278 }
279
280 trace_lima_task_run(task);
281
282 pipe->error = false;
283 pipe->task_run(pipe, task);
284
285 return task->fence;
286 }
287
lima_sched_build_error_task_list(struct lima_sched_task * task)288 static void lima_sched_build_error_task_list(struct lima_sched_task *task)
289 {
290 struct lima_sched_error_task *et;
291 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
292 struct lima_ip *ip = pipe->processor[0];
293 int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
294 struct lima_device *dev = ip->dev;
295 struct lima_sched_context *sched_ctx =
296 container_of(task->base.entity,
297 struct lima_sched_context, base);
298 struct lima_ctx *ctx =
299 container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
300 struct lima_dump_task *dt;
301 struct lima_dump_chunk *chunk;
302 struct lima_dump_chunk_pid *pid_chunk;
303 struct lima_dump_chunk_buffer *buffer_chunk;
304 u32 size, task_size, mem_size;
305 int i;
306 struct dma_buf_map map;
307 int ret;
308
309 mutex_lock(&dev->error_task_list_lock);
310
311 if (dev->dump.num_tasks >= lima_max_error_tasks) {
312 dev_info(dev->dev, "fail to save task state from %s pid %d: "
313 "error task list is full\n", ctx->pname, ctx->pid);
314 goto out;
315 }
316
317 /* frame chunk */
318 size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
319 /* process name chunk */
320 size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
321 /* pid chunk */
322 size += sizeof(struct lima_dump_chunk);
323 /* buffer chunks */
324 for (i = 0; i < task->num_bos; i++) {
325 struct lima_bo *bo = task->bos[i];
326
327 size += sizeof(struct lima_dump_chunk);
328 size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
329 }
330
331 task_size = size + sizeof(struct lima_dump_task);
332 mem_size = task_size + sizeof(*et);
333 et = kvmalloc(mem_size, GFP_KERNEL);
334 if (!et) {
335 dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
336 mem_size);
337 goto out;
338 }
339
340 et->data = et + 1;
341 et->size = task_size;
342
343 dt = et->data;
344 memset(dt, 0, sizeof(*dt));
345 dt->id = pipe_id;
346 dt->size = size;
347
348 chunk = (struct lima_dump_chunk *)(dt + 1);
349 memset(chunk, 0, sizeof(*chunk));
350 chunk->id = LIMA_DUMP_CHUNK_FRAME;
351 chunk->size = pipe->frame_size;
352 memcpy(chunk + 1, task->frame, pipe->frame_size);
353 dt->num_chunks++;
354
355 chunk = (void *)(chunk + 1) + chunk->size;
356 memset(chunk, 0, sizeof(*chunk));
357 chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
358 chunk->size = sizeof(ctx->pname);
359 memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
360 dt->num_chunks++;
361
362 pid_chunk = (void *)(chunk + 1) + chunk->size;
363 memset(pid_chunk, 0, sizeof(*pid_chunk));
364 pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
365 pid_chunk->pid = ctx->pid;
366 dt->num_chunks++;
367
368 buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
369 for (i = 0; i < task->num_bos; i++) {
370 struct lima_bo *bo = task->bos[i];
371 void *data;
372
373 memset(buffer_chunk, 0, sizeof(*buffer_chunk));
374 buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
375 buffer_chunk->va = lima_vm_get_va(task->vm, bo);
376
377 if (bo->heap_size) {
378 buffer_chunk->size = bo->heap_size;
379
380 data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
381 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
382 if (!data) {
383 kvfree(et);
384 goto out;
385 }
386
387 memcpy(buffer_chunk + 1, data, buffer_chunk->size);
388
389 vunmap(data);
390 } else {
391 buffer_chunk->size = lima_bo_size(bo);
392
393 ret = drm_gem_shmem_vmap(&bo->base, &map);
394 if (ret) {
395 kvfree(et);
396 goto out;
397 }
398
399 memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
400
401 drm_gem_shmem_vunmap(&bo->base, &map);
402 }
403
404 buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
405 dt->num_chunks++;
406 }
407
408 list_add(&et->list, &dev->error_task_list);
409 dev->dump.size += et->size;
410 dev->dump.num_tasks++;
411
412 dev_info(dev->dev, "save error task state success\n");
413
414 out:
415 mutex_unlock(&dev->error_task_list_lock);
416 }
417
lima_sched_timedout_job(struct drm_sched_job * job)418 static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
419 {
420 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
421 struct lima_sched_task *task = to_lima_task(job);
422 struct lima_device *ldev = pipe->ldev;
423
424 if (!pipe->error)
425 DRM_ERROR("lima job timeout\n");
426
427 drm_sched_stop(&pipe->base, &task->base);
428
429 drm_sched_increase_karma(&task->base);
430
431 lima_sched_build_error_task_list(task);
432
433 pipe->task_error(pipe);
434
435 if (pipe->bcast_mmu)
436 lima_mmu_page_fault_resume(pipe->bcast_mmu);
437 else {
438 int i;
439
440 for (i = 0; i < pipe->num_mmu; i++)
441 lima_mmu_page_fault_resume(pipe->mmu[i]);
442 }
443
444 lima_vm_put(pipe->current_vm);
445 pipe->current_vm = NULL;
446 pipe->current_task = NULL;
447
448 lima_pm_idle(ldev);
449
450 drm_sched_resubmit_jobs(&pipe->base);
451 drm_sched_start(&pipe->base, true);
452
453 return DRM_GPU_SCHED_STAT_NOMINAL;
454 }
455
lima_sched_free_job(struct drm_sched_job * job)456 static void lima_sched_free_job(struct drm_sched_job *job)
457 {
458 struct lima_sched_task *task = to_lima_task(job);
459 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
460 struct lima_vm *vm = task->vm;
461 struct lima_bo **bos = task->bos;
462 int i;
463
464 dma_fence_put(task->fence);
465
466 for (i = 0; i < task->num_bos; i++)
467 lima_vm_bo_del(vm, bos[i]);
468
469 lima_sched_task_fini(task);
470 kmem_cache_free(pipe->task_slab, task);
471 }
472
473 static const struct drm_sched_backend_ops lima_sched_ops = {
474 .dependency = lima_sched_dependency,
475 .run_job = lima_sched_run_job,
476 .timedout_job = lima_sched_timedout_job,
477 .free_job = lima_sched_free_job,
478 };
479
lima_sched_recover_work(struct work_struct * work)480 static void lima_sched_recover_work(struct work_struct *work)
481 {
482 struct lima_sched_pipe *pipe =
483 container_of(work, struct lima_sched_pipe, recover_work);
484 int i;
485
486 for (i = 0; i < pipe->num_l2_cache; i++)
487 lima_l2_cache_flush(pipe->l2_cache[i]);
488
489 if (pipe->bcast_mmu) {
490 lima_mmu_flush_tlb(pipe->bcast_mmu);
491 } else {
492 for (i = 0; i < pipe->num_mmu; i++)
493 lima_mmu_flush_tlb(pipe->mmu[i]);
494 }
495
496 if (pipe->task_recover(pipe))
497 drm_sched_fault(&pipe->base);
498 }
499
lima_sched_pipe_init(struct lima_sched_pipe * pipe,const char * name)500 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
501 {
502 unsigned int timeout = lima_sched_timeout_ms > 0 ?
503 lima_sched_timeout_ms : 500;
504
505 pipe->fence_context = dma_fence_context_alloc(1);
506 spin_lock_init(&pipe->fence_lock);
507
508 INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
509
510 return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
511 lima_job_hang_limit,
512 msecs_to_jiffies(timeout), NULL,
513 NULL, name);
514 }
515
lima_sched_pipe_fini(struct lima_sched_pipe * pipe)516 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
517 {
518 drm_sched_fini(&pipe->base);
519 }
520
lima_sched_pipe_task_done(struct lima_sched_pipe * pipe)521 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
522 {
523 struct lima_sched_task *task = pipe->current_task;
524 struct lima_device *ldev = pipe->ldev;
525
526 if (pipe->error) {
527 if (task && task->recoverable)
528 schedule_work(&pipe->recover_work);
529 else
530 drm_sched_fault(&pipe->base);
531 } else {
532 pipe->task_fini(pipe);
533 dma_fence_signal(task->fence);
534
535 lima_pm_idle(ldev);
536 }
537 }
538