1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10
11 #include <linux/build_bug.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-resv.h>
16 #include <linux/firmware.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/iosys-map.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24
25 #include "panthor_devfreq.h"
26 #include "panthor_device.h"
27 #include "panthor_fw.h"
28 #include "panthor_gem.h"
29 #include "panthor_gpu.h"
30 #include "panthor_heap.h"
31 #include "panthor_mmu.h"
32 #include "panthor_regs.h"
33 #include "panthor_sched.h"
34
35 /**
36 * DOC: Scheduler
37 *
38 * Mali CSF hardware adopts a firmware-assisted scheduling model, where
39 * the firmware takes care of scheduling aspects, to some extent.
40 *
41 * The scheduling happens at the scheduling group level, each group
42 * contains 1 to N queues (N is FW/hardware dependent, and exposed
43 * through the firmware interface). Each queue is assigned a command
44 * stream ring buffer, which serves as a way to get jobs submitted to
45 * the GPU, among other things.
46 *
47 * The firmware can schedule a maximum of M groups (M is FW/hardware
48 * dependent, and exposed through the firmware interface). Passed
49 * this maximum number of groups, the kernel must take care of
50 * rotating the groups passed to the firmware so every group gets
51 * a chance to have his queues scheduled for execution.
52 *
53 * The current implementation only supports with kernel-mode queues.
54 * In other terms, userspace doesn't have access to the ring-buffer.
55 * Instead, userspace passes indirect command stream buffers that are
56 * called from the queue ring-buffer by the kernel using a pre-defined
57 * sequence of command stream instructions to ensure the userspace driver
58 * always gets consistent results (cache maintenance,
59 * synchronization, ...).
60 *
61 * We rely on the drm_gpu_scheduler framework to deal with job
62 * dependencies and submission. As any other driver dealing with a
63 * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
64 * entity has its own job scheduler. When a job is ready to be executed
65 * (all its dependencies are met), it is pushed to the appropriate
66 * queue ring-buffer, and the group is scheduled for execution if it
67 * wasn't already active.
68 *
69 * Kernel-side group scheduling is timeslice-based. When we have less
70 * groups than there are slots, the periodic tick is disabled and we
71 * just let the FW schedule the active groups. When there are more
72 * groups than slots, we let each group a chance to execute stuff for
73 * a given amount of time, and then re-evaluate and pick new groups
74 * to schedule. The group selection algorithm is based on
75 * priority+round-robin.
76 *
77 * Even though user-mode queues is out of the scope right now, the
78 * current design takes them into account by avoiding any guess on the
79 * group/queue state that would be based on information we wouldn't have
80 * if userspace was in charge of the ring-buffer. That's also one of the
81 * reason we don't do 'cooperative' scheduling (encoding FW group slot
82 * reservation as dma_fence that would be returned from the
83 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
84 * a queue of waiters, ordered by job submission order). This approach
85 * would work for kernel-mode queues, but would make user-mode queues a
86 * lot more complicated to retrofit.
87 */
88
89 #define JOB_TIMEOUT_MS 5000
90
91 #define MIN_CS_PER_CSG 8
92
93 #define MIN_CSGS 3
94 #define MAX_CSG_PRIO 0xf
95
96 #define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64))
97 #define MAX_INSTRS_PER_JOB 24
98
99 struct panthor_group;
100
101 /**
102 * struct panthor_csg_slot - Command stream group slot
103 *
104 * This represents a FW slot for a scheduling group.
105 */
106 struct panthor_csg_slot {
107 /** @group: Scheduling group bound to this slot. */
108 struct panthor_group *group;
109
110 /** @priority: Group priority. */
111 u8 priority;
112
113 /**
114 * @idle: True if the group bound to this slot is idle.
115 *
116 * A group is idle when it has nothing waiting for execution on
117 * all its queues, or when queues are blocked waiting for something
118 * to happen (synchronization object).
119 */
120 bool idle;
121 };
122
123 /**
124 * enum panthor_csg_priority - Group priority
125 */
126 enum panthor_csg_priority {
127 /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
128 PANTHOR_CSG_PRIORITY_LOW = 0,
129
130 /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
131 PANTHOR_CSG_PRIORITY_MEDIUM,
132
133 /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
134 PANTHOR_CSG_PRIORITY_HIGH,
135
136 /**
137 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
138 *
139 * Real-time priority allows one to preempt scheduling of other
140 * non-real-time groups. When such a group becomes executable,
141 * it will evict the group with the lowest non-rt priority if
142 * there's no free group slot available.
143 *
144 * Currently not exposed to userspace.
145 */
146 PANTHOR_CSG_PRIORITY_RT,
147
148 /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
149 PANTHOR_CSG_PRIORITY_COUNT,
150 };
151
152 /**
153 * struct panthor_scheduler - Object used to manage the scheduler
154 */
155 struct panthor_scheduler {
156 /** @ptdev: Device. */
157 struct panthor_device *ptdev;
158
159 /**
160 * @wq: Workqueue used by our internal scheduler logic and
161 * drm_gpu_scheduler.
162 *
163 * Used for the scheduler tick, group update or other kind of FW
164 * event processing that can't be handled in the threaded interrupt
165 * path. Also passed to the drm_gpu_scheduler instances embedded
166 * in panthor_queue.
167 */
168 struct workqueue_struct *wq;
169
170 /**
171 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
172 *
173 * We have a queue dedicated to heap chunk allocation works to avoid
174 * blocking the rest of the scheduler if the allocation tries to
175 * reclaim memory.
176 */
177 struct workqueue_struct *heap_alloc_wq;
178
179 /** @tick_work: Work executed on a scheduling tick. */
180 struct delayed_work tick_work;
181
182 /**
183 * @sync_upd_work: Work used to process synchronization object updates.
184 *
185 * We use this work to unblock queues/groups that were waiting on a
186 * synchronization object.
187 */
188 struct work_struct sync_upd_work;
189
190 /**
191 * @fw_events_work: Work used to process FW events outside the interrupt path.
192 *
193 * Even if the interrupt is threaded, we need any event processing
194 * that require taking the panthor_scheduler::lock to be processed
195 * outside the interrupt path so we don't block the tick logic when
196 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
197 * event processing requires taking this lock, we just delegate all
198 * FW event processing to the scheduler workqueue.
199 */
200 struct work_struct fw_events_work;
201
202 /**
203 * @fw_events: Bitmask encoding pending FW events.
204 */
205 atomic_t fw_events;
206
207 /**
208 * @resched_target: When the next tick should occur.
209 *
210 * Expressed in jiffies.
211 */
212 u64 resched_target;
213
214 /**
215 * @last_tick: When the last tick occurred.
216 *
217 * Expressed in jiffies.
218 */
219 u64 last_tick;
220
221 /** @tick_period: Tick period in jiffies. */
222 u64 tick_period;
223
224 /**
225 * @lock: Lock protecting access to all the scheduler fields.
226 *
227 * Should be taken in the tick work, the irq handler, and anywhere the @groups
228 * fields are touched.
229 */
230 struct mutex lock;
231
232 /** @groups: Various lists used to classify groups. */
233 struct {
234 /**
235 * @runnable: Runnable group lists.
236 *
237 * When a group has queues that want to execute something,
238 * its panthor_group::run_node should be inserted here.
239 *
240 * One list per-priority.
241 */
242 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
243
244 /**
245 * @idle: Idle group lists.
246 *
247 * When all queues of a group are idle (either because they
248 * have nothing to execute, or because they are blocked), the
249 * panthor_group::run_node field should be inserted here.
250 *
251 * One list per-priority.
252 */
253 struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
254
255 /**
256 * @waiting: List of groups whose queues are blocked on a
257 * synchronization object.
258 *
259 * Insert panthor_group::wait_node here when a group is waiting
260 * for synchronization objects to be signaled.
261 *
262 * This list is evaluated in the @sync_upd_work work.
263 */
264 struct list_head waiting;
265 } groups;
266
267 /**
268 * @csg_slots: FW command stream group slots.
269 */
270 struct panthor_csg_slot csg_slots[MAX_CSGS];
271
272 /** @csg_slot_count: Number of command stream group slots exposed by the FW. */
273 u32 csg_slot_count;
274
275 /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
276 u32 cs_slot_count;
277
278 /** @as_slot_count: Number of address space slots supported by the MMU. */
279 u32 as_slot_count;
280
281 /** @used_csg_slot_count: Number of command stream group slot currently used. */
282 u32 used_csg_slot_count;
283
284 /** @sb_slot_count: Number of scoreboard slots. */
285 u32 sb_slot_count;
286
287 /**
288 * @might_have_idle_groups: True if an active group might have become idle.
289 *
290 * This will force a tick, so other runnable groups can be scheduled if one
291 * or more active groups became idle.
292 */
293 bool might_have_idle_groups;
294
295 /** @pm: Power management related fields. */
296 struct {
297 /** @has_ref: True if the scheduler owns a runtime PM reference. */
298 bool has_ref;
299 } pm;
300
301 /** @reset: Reset related fields. */
302 struct {
303 /** @lock: Lock protecting the other reset fields. */
304 struct mutex lock;
305
306 /**
307 * @in_progress: True if a reset is in progress.
308 *
309 * Set to true in panthor_sched_pre_reset() and back to false in
310 * panthor_sched_post_reset().
311 */
312 atomic_t in_progress;
313
314 /**
315 * @stopped_groups: List containing all groups that were stopped
316 * before a reset.
317 *
318 * Insert panthor_group::run_node in the pre_reset path.
319 */
320 struct list_head stopped_groups;
321 } reset;
322 };
323
324 /**
325 * struct panthor_syncobj_32b - 32-bit FW synchronization object
326 */
327 struct panthor_syncobj_32b {
328 /** @seqno: Sequence number. */
329 u32 seqno;
330
331 /**
332 * @status: Status.
333 *
334 * Not zero on failure.
335 */
336 u32 status;
337 };
338
339 /**
340 * struct panthor_syncobj_64b - 64-bit FW synchronization object
341 */
342 struct panthor_syncobj_64b {
343 /** @seqno: Sequence number. */
344 u64 seqno;
345
346 /**
347 * @status: Status.
348 *
349 * Not zero on failure.
350 */
351 u32 status;
352
353 /** @pad: MBZ. */
354 u32 pad;
355 };
356
357 /**
358 * struct panthor_queue - Execution queue
359 */
360 struct panthor_queue {
361 /** @scheduler: DRM scheduler used for this queue. */
362 struct drm_gpu_scheduler scheduler;
363
364 /** @entity: DRM scheduling entity used for this queue. */
365 struct drm_sched_entity entity;
366
367 /**
368 * @remaining_time: Time remaining before the job timeout expires.
369 *
370 * The job timeout is suspended when the queue is not scheduled by the
371 * FW. Every time we suspend the timer, we need to save the remaining
372 * time so we can restore it later on.
373 */
374 unsigned long remaining_time;
375
376 /** @timeout_suspended: True if the job timeout was suspended. */
377 bool timeout_suspended;
378
379 /**
380 * @doorbell_id: Doorbell assigned to this queue.
381 *
382 * Right now, all groups share the same doorbell, and the doorbell ID
383 * is assigned to group_slot + 1 when the group is assigned a slot. But
384 * we might decide to provide fine grained doorbell assignment at some
385 * point, so don't have to wake up all queues in a group every time one
386 * of them is updated.
387 */
388 u8 doorbell_id;
389
390 /**
391 * @priority: Priority of the queue inside the group.
392 *
393 * Must be less than 16 (Only 4 bits available).
394 */
395 u8 priority;
396 #define CSF_MAX_QUEUE_PRIO GENMASK(3, 0)
397
398 /** @ringbuf: Command stream ring-buffer. */
399 struct panthor_kernel_bo *ringbuf;
400
401 /** @iface: Firmware interface. */
402 struct {
403 /** @mem: FW memory allocated for this interface. */
404 struct panthor_kernel_bo *mem;
405
406 /** @input: Input interface. */
407 struct panthor_fw_ringbuf_input_iface *input;
408
409 /** @output: Output interface. */
410 const struct panthor_fw_ringbuf_output_iface *output;
411
412 /** @input_fw_va: FW virtual address of the input interface buffer. */
413 u32 input_fw_va;
414
415 /** @output_fw_va: FW virtual address of the output interface buffer. */
416 u32 output_fw_va;
417 } iface;
418
419 /**
420 * @syncwait: Stores information about the synchronization object this
421 * queue is waiting on.
422 */
423 struct {
424 /** @gpu_va: GPU address of the synchronization object. */
425 u64 gpu_va;
426
427 /** @ref: Reference value to compare against. */
428 u64 ref;
429
430 /** @gt: True if this is a greater-than test. */
431 bool gt;
432
433 /** @sync64: True if this is a 64-bit sync object. */
434 bool sync64;
435
436 /** @bo: Buffer object holding the synchronization object. */
437 struct drm_gem_object *obj;
438
439 /** @offset: Offset of the synchronization object inside @bo. */
440 u64 offset;
441
442 /**
443 * @kmap: Kernel mapping of the buffer object holding the
444 * synchronization object.
445 */
446 void *kmap;
447 } syncwait;
448
449 /** @fence_ctx: Fence context fields. */
450 struct {
451 /** @lock: Used to protect access to all fences allocated by this context. */
452 spinlock_t lock;
453
454 /**
455 * @id: Fence context ID.
456 *
457 * Allocated with dma_fence_context_alloc().
458 */
459 u64 id;
460
461 /** @seqno: Sequence number of the last initialized fence. */
462 atomic64_t seqno;
463
464 /**
465 * @last_fence: Fence of the last submitted job.
466 *
467 * We return this fence when we get an empty command stream.
468 * This way, we are guaranteed that all earlier jobs have completed
469 * when drm_sched_job::s_fence::finished without having to feed
470 * the CS ring buffer with a dummy job that only signals the fence.
471 */
472 struct dma_fence *last_fence;
473
474 /**
475 * @in_flight_jobs: List containing all in-flight jobs.
476 *
477 * Used to keep track and signal panthor_job::done_fence when the
478 * synchronization object attached to the queue is signaled.
479 */
480 struct list_head in_flight_jobs;
481 } fence_ctx;
482
483 /** @profiling: Job profiling data slots and access information. */
484 struct {
485 /** @slots: Kernel BO holding the slots. */
486 struct panthor_kernel_bo *slots;
487
488 /** @slot_count: Number of jobs ringbuffer can hold at once. */
489 u32 slot_count;
490
491 /** @seqno: Index of the next available profiling information slot. */
492 u32 seqno;
493 } profiling;
494 };
495
496 /**
497 * enum panthor_group_state - Scheduling group state.
498 */
499 enum panthor_group_state {
500 /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
501 PANTHOR_CS_GROUP_CREATED,
502
503 /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
504 PANTHOR_CS_GROUP_ACTIVE,
505
506 /**
507 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
508 * inactive/suspended right now.
509 */
510 PANTHOR_CS_GROUP_SUSPENDED,
511
512 /**
513 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
514 *
515 * Can no longer be scheduled. The only allowed action is a destruction.
516 */
517 PANTHOR_CS_GROUP_TERMINATED,
518
519 /**
520 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
521 *
522 * The FW returned an inconsistent state. The group is flagged unusable
523 * and can no longer be scheduled. The only allowed action is a
524 * destruction.
525 *
526 * When that happens, we also schedule a FW reset, to start from a fresh
527 * state.
528 */
529 PANTHOR_CS_GROUP_UNKNOWN_STATE,
530 };
531
532 /**
533 * struct panthor_group - Scheduling group object
534 */
535 struct panthor_group {
536 /** @refcount: Reference count */
537 struct kref refcount;
538
539 /** @ptdev: Device. */
540 struct panthor_device *ptdev;
541
542 /** @vm: VM bound to the group. */
543 struct panthor_vm *vm;
544
545 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
546 u64 compute_core_mask;
547
548 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
549 u64 fragment_core_mask;
550
551 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
552 u64 tiler_core_mask;
553
554 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
555 u8 max_compute_cores;
556
557 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
558 u8 max_fragment_cores;
559
560 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
561 u8 max_tiler_cores;
562
563 /** @priority: Group priority (check panthor_csg_priority). */
564 u8 priority;
565
566 /** @blocked_queues: Bitmask reflecting the blocked queues. */
567 u32 blocked_queues;
568
569 /** @idle_queues: Bitmask reflecting the idle queues. */
570 u32 idle_queues;
571
572 /** @fatal_lock: Lock used to protect access to fatal fields. */
573 spinlock_t fatal_lock;
574
575 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
576 u32 fatal_queues;
577
578 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
579 atomic_t tiler_oom;
580
581 /** @queue_count: Number of queues in this group. */
582 u32 queue_count;
583
584 /** @queues: Queues owned by this group. */
585 struct panthor_queue *queues[MAX_CS_PER_CSG];
586
587 /**
588 * @csg_id: ID of the FW group slot.
589 *
590 * -1 when the group is not scheduled/active.
591 */
592 int csg_id;
593
594 /**
595 * @destroyed: True when the group has been destroyed.
596 *
597 * If a group is destroyed it becomes useless: no further jobs can be submitted
598 * to its queues. We simply wait for all references to be dropped so we can
599 * release the group object.
600 */
601 bool destroyed;
602
603 /**
604 * @timedout: True when a timeout occurred on any of the queues owned by
605 * this group.
606 *
607 * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
608 * and the group can't be suspended, this also leads to a timeout. In any case,
609 * any timeout situation is unrecoverable, and the group becomes useless. We
610 * simply wait for all references to be dropped so we can release the group
611 * object.
612 */
613 bool timedout;
614
615 /**
616 * @syncobjs: Pool of per-queue synchronization objects.
617 *
618 * One sync object per queue. The position of the sync object is
619 * determined by the queue index.
620 */
621 struct panthor_kernel_bo *syncobjs;
622
623 /** @state: Group state. */
624 enum panthor_group_state state;
625
626 /**
627 * @suspend_buf: Suspend buffer.
628 *
629 * Stores the state of the group and its queues when a group is suspended.
630 * Used at resume time to restore the group in its previous state.
631 *
632 * The size of the suspend buffer is exposed through the FW interface.
633 */
634 struct panthor_kernel_bo *suspend_buf;
635
636 /**
637 * @protm_suspend_buf: Protection mode suspend buffer.
638 *
639 * Stores the state of the group and its queues when a group that's in
640 * protection mode is suspended.
641 *
642 * Used at resume time to restore the group in its previous state.
643 *
644 * The size of the protection mode suspend buffer is exposed through the
645 * FW interface.
646 */
647 struct panthor_kernel_bo *protm_suspend_buf;
648
649 /** @sync_upd_work: Work used to check/signal job fences. */
650 struct work_struct sync_upd_work;
651
652 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
653 struct work_struct tiler_oom_work;
654
655 /** @term_work: Work used to finish the group termination procedure. */
656 struct work_struct term_work;
657
658 /**
659 * @release_work: Work used to release group resources.
660 *
661 * We need to postpone the group release to avoid a deadlock when
662 * the last ref is released in the tick work.
663 */
664 struct work_struct release_work;
665
666 /**
667 * @run_node: Node used to insert the group in the
668 * panthor_group::groups::{runnable,idle} and
669 * panthor_group::reset.stopped_groups lists.
670 */
671 struct list_head run_node;
672
673 /**
674 * @wait_node: Node used to insert the group in the
675 * panthor_group::groups::waiting list.
676 */
677 struct list_head wait_node;
678 };
679
680 struct panthor_job_profiling_data {
681 struct {
682 u64 before;
683 u64 after;
684 } cycles;
685
686 struct {
687 u64 before;
688 u64 after;
689 } time;
690 };
691
692 /**
693 * group_queue_work() - Queue a group work
694 * @group: Group to queue the work for.
695 * @wname: Work name.
696 *
697 * Grabs a ref and queue a work item to the scheduler workqueue. If
698 * the work was already queued, we release the reference we grabbed.
699 *
700 * Work callbacks must release the reference we grabbed here.
701 */
702 #define group_queue_work(group, wname) \
703 do { \
704 group_get(group); \
705 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
706 group_put(group); \
707 } while (0)
708
709 /**
710 * sched_queue_work() - Queue a scheduler work.
711 * @sched: Scheduler object.
712 * @wname: Work name.
713 *
714 * Conditionally queues a scheduler work if no reset is pending/in-progress.
715 */
716 #define sched_queue_work(sched, wname) \
717 do { \
718 if (!atomic_read(&(sched)->reset.in_progress) && \
719 !panthor_device_reset_is_pending((sched)->ptdev)) \
720 queue_work((sched)->wq, &(sched)->wname ## _work); \
721 } while (0)
722
723 /**
724 * sched_queue_delayed_work() - Queue a scheduler delayed work.
725 * @sched: Scheduler object.
726 * @wname: Work name.
727 * @delay: Work delay in jiffies.
728 *
729 * Conditionally queues a scheduler delayed work if no reset is
730 * pending/in-progress.
731 */
732 #define sched_queue_delayed_work(sched, wname, delay) \
733 do { \
734 if (!atomic_read(&sched->reset.in_progress) && \
735 !panthor_device_reset_is_pending((sched)->ptdev)) \
736 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
737 } while (0)
738
739 /*
740 * We currently set the maximum of groups per file to an arbitrary low value.
741 * But this can be updated if we need more.
742 */
743 #define MAX_GROUPS_PER_POOL 128
744
745 /**
746 * struct panthor_group_pool - Group pool
747 *
748 * Each file get assigned a group pool.
749 */
750 struct panthor_group_pool {
751 /** @xa: Xarray used to manage group handles. */
752 struct xarray xa;
753 };
754
755 /**
756 * struct panthor_job - Used to manage GPU job
757 */
758 struct panthor_job {
759 /** @base: Inherit from drm_sched_job. */
760 struct drm_sched_job base;
761
762 /** @refcount: Reference count. */
763 struct kref refcount;
764
765 /** @group: Group of the queue this job will be pushed to. */
766 struct panthor_group *group;
767
768 /** @queue_idx: Index of the queue inside @group. */
769 u32 queue_idx;
770
771 /** @call_info: Information about the userspace command stream call. */
772 struct {
773 /** @start: GPU address of the userspace command stream. */
774 u64 start;
775
776 /** @size: Size of the userspace command stream. */
777 u32 size;
778
779 /**
780 * @latest_flush: Flush ID at the time the userspace command
781 * stream was built.
782 *
783 * Needed for the flush reduction mechanism.
784 */
785 u32 latest_flush;
786 } call_info;
787
788 /** @ringbuf: Position of this job is in the ring buffer. */
789 struct {
790 /** @start: Start offset. */
791 u64 start;
792
793 /** @end: End offset. */
794 u64 end;
795 } ringbuf;
796
797 /**
798 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
799 * list.
800 */
801 struct list_head node;
802
803 /** @done_fence: Fence signaled when the job is finished or cancelled. */
804 struct dma_fence *done_fence;
805
806 /** @profiling: Job profiling information. */
807 struct {
808 /** @mask: Current device job profiling enablement bitmask. */
809 u32 mask;
810
811 /** @slot: Job index in the profiling slots BO. */
812 u32 slot;
813 } profiling;
814 };
815
816 static void
panthor_queue_put_syncwait_obj(struct panthor_queue * queue)817 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
818 {
819 if (queue->syncwait.kmap) {
820 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
821
822 drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
823 queue->syncwait.kmap = NULL;
824 }
825
826 drm_gem_object_put(queue->syncwait.obj);
827 queue->syncwait.obj = NULL;
828 }
829
830 static void *
panthor_queue_get_syncwait_obj(struct panthor_group * group,struct panthor_queue * queue)831 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
832 {
833 struct panthor_device *ptdev = group->ptdev;
834 struct panthor_gem_object *bo;
835 struct iosys_map map;
836 int ret;
837
838 if (queue->syncwait.kmap)
839 return queue->syncwait.kmap + queue->syncwait.offset;
840
841 bo = panthor_vm_get_bo_for_va(group->vm,
842 queue->syncwait.gpu_va,
843 &queue->syncwait.offset);
844 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
845 goto err_put_syncwait_obj;
846
847 queue->syncwait.obj = &bo->base.base;
848 ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
849 if (drm_WARN_ON(&ptdev->base, ret))
850 goto err_put_syncwait_obj;
851
852 queue->syncwait.kmap = map.vaddr;
853 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
854 goto err_put_syncwait_obj;
855
856 return queue->syncwait.kmap + queue->syncwait.offset;
857
858 err_put_syncwait_obj:
859 panthor_queue_put_syncwait_obj(queue);
860 return NULL;
861 }
862
group_free_queue(struct panthor_group * group,struct panthor_queue * queue)863 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
864 {
865 if (IS_ERR_OR_NULL(queue))
866 return;
867
868 drm_sched_entity_destroy(&queue->entity);
869
870 if (queue->scheduler.ops)
871 drm_sched_fini(&queue->scheduler);
872
873 panthor_queue_put_syncwait_obj(queue);
874
875 panthor_kernel_bo_destroy(queue->ringbuf);
876 panthor_kernel_bo_destroy(queue->iface.mem);
877 panthor_kernel_bo_destroy(queue->profiling.slots);
878
879 /* Release the last_fence we were holding, if any. */
880 dma_fence_put(queue->fence_ctx.last_fence);
881
882 kfree(queue);
883 }
884
group_release_work(struct work_struct * work)885 static void group_release_work(struct work_struct *work)
886 {
887 struct panthor_group *group = container_of(work,
888 struct panthor_group,
889 release_work);
890 u32 i;
891
892 for (i = 0; i < group->queue_count; i++)
893 group_free_queue(group, group->queues[i]);
894
895 panthor_kernel_bo_destroy(group->suspend_buf);
896 panthor_kernel_bo_destroy(group->protm_suspend_buf);
897 panthor_kernel_bo_destroy(group->syncobjs);
898
899 panthor_vm_put(group->vm);
900 kfree(group);
901 }
902
group_release(struct kref * kref)903 static void group_release(struct kref *kref)
904 {
905 struct panthor_group *group = container_of(kref,
906 struct panthor_group,
907 refcount);
908 struct panthor_device *ptdev = group->ptdev;
909
910 drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
911 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
912 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
913
914 queue_work(panthor_cleanup_wq, &group->release_work);
915 }
916
group_put(struct panthor_group * group)917 static void group_put(struct panthor_group *group)
918 {
919 if (group)
920 kref_put(&group->refcount, group_release);
921 }
922
923 static struct panthor_group *
group_get(struct panthor_group * group)924 group_get(struct panthor_group *group)
925 {
926 if (group)
927 kref_get(&group->refcount);
928
929 return group;
930 }
931
932 /**
933 * group_bind_locked() - Bind a group to a group slot
934 * @group: Group.
935 * @csg_id: Slot.
936 *
937 * Return: 0 on success, a negative error code otherwise.
938 */
939 static int
group_bind_locked(struct panthor_group * group,u32 csg_id)940 group_bind_locked(struct panthor_group *group, u32 csg_id)
941 {
942 struct panthor_device *ptdev = group->ptdev;
943 struct panthor_csg_slot *csg_slot;
944 int ret;
945
946 lockdep_assert_held(&ptdev->scheduler->lock);
947
948 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
949 ptdev->scheduler->csg_slots[csg_id].group))
950 return -EINVAL;
951
952 ret = panthor_vm_active(group->vm);
953 if (ret)
954 return ret;
955
956 csg_slot = &ptdev->scheduler->csg_slots[csg_id];
957 group_get(group);
958 group->csg_id = csg_id;
959
960 /* Dummy doorbell allocation: doorbell is assigned to the group and
961 * all queues use the same doorbell.
962 *
963 * TODO: Implement LRU-based doorbell assignment, so the most often
964 * updated queues get their own doorbell, thus avoiding useless checks
965 * on queues belonging to the same group that are rarely updated.
966 */
967 for (u32 i = 0; i < group->queue_count; i++)
968 group->queues[i]->doorbell_id = csg_id + 1;
969
970 csg_slot->group = group;
971
972 return 0;
973 }
974
975 /**
976 * group_unbind_locked() - Unbind a group from a slot.
977 * @group: Group to unbind.
978 *
979 * Return: 0 on success, a negative error code otherwise.
980 */
981 static int
group_unbind_locked(struct panthor_group * group)982 group_unbind_locked(struct panthor_group *group)
983 {
984 struct panthor_device *ptdev = group->ptdev;
985 struct panthor_csg_slot *slot;
986
987 lockdep_assert_held(&ptdev->scheduler->lock);
988
989 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
990 return -EINVAL;
991
992 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
993 return -EINVAL;
994
995 slot = &ptdev->scheduler->csg_slots[group->csg_id];
996 panthor_vm_idle(group->vm);
997 group->csg_id = -1;
998
999 /* Tiler OOM events will be re-issued next time the group is scheduled. */
1000 atomic_set(&group->tiler_oom, 0);
1001 cancel_work(&group->tiler_oom_work);
1002
1003 for (u32 i = 0; i < group->queue_count; i++)
1004 group->queues[i]->doorbell_id = -1;
1005
1006 slot->group = NULL;
1007
1008 group_put(group);
1009 return 0;
1010 }
1011
1012 /**
1013 * cs_slot_prog_locked() - Program a queue slot
1014 * @ptdev: Device.
1015 * @csg_id: Group slot ID.
1016 * @cs_id: Queue slot ID.
1017 *
1018 * Program a queue slot with the queue information so things can start being
1019 * executed on this queue.
1020 *
1021 * The group slot must have a group bound to it already (group_bind_locked()).
1022 */
1023 static void
cs_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1024 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1025 {
1026 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1027 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1028
1029 lockdep_assert_held(&ptdev->scheduler->lock);
1030
1031 queue->iface.input->extract = queue->iface.output->extract;
1032 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1033
1034 cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
1035 cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
1036 cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
1037 cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
1038 cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1039 CS_CONFIG_DOORBELL(queue->doorbell_id);
1040 cs_iface->input->ack_irq_mask = ~0;
1041 panthor_fw_update_reqs(cs_iface, req,
1042 CS_IDLE_SYNC_WAIT |
1043 CS_IDLE_EMPTY |
1044 CS_STATE_START |
1045 CS_EXTRACT_EVENT,
1046 CS_IDLE_SYNC_WAIT |
1047 CS_IDLE_EMPTY |
1048 CS_STATE_MASK |
1049 CS_EXTRACT_EVENT);
1050 if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
1051 drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
1052 queue->timeout_suspended = false;
1053 }
1054 }
1055
1056 /**
1057 * cs_slot_reset_locked() - Reset a queue slot
1058 * @ptdev: Device.
1059 * @csg_id: Group slot.
1060 * @cs_id: Queue slot.
1061 *
1062 * Change the queue slot state to STOP and suspend the queue timeout if
1063 * the queue is not blocked.
1064 *
1065 * The group slot must have a group bound to it (group_bind_locked()).
1066 */
1067 static int
cs_slot_reset_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1068 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1069 {
1070 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1071 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1072 struct panthor_queue *queue = group->queues[cs_id];
1073
1074 lockdep_assert_held(&ptdev->scheduler->lock);
1075
1076 panthor_fw_update_reqs(cs_iface, req,
1077 CS_STATE_STOP,
1078 CS_STATE_MASK);
1079
1080 /* If the queue is blocked, we want to keep the timeout running, so
1081 * we can detect unbounded waits and kill the group when that happens.
1082 */
1083 if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1084 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1085 queue->timeout_suspended = true;
1086 WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1087 }
1088
1089 return 0;
1090 }
1091
1092 /**
1093 * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1094 * @ptdev: Device.
1095 * @csg_id: Group slot ID.
1096 *
1097 * Group slot priority update happens asynchronously. When we receive a
1098 * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1099 * reflect it to our panthor_csg_slot object.
1100 */
1101 static void
csg_slot_sync_priority_locked(struct panthor_device * ptdev,u32 csg_id)1102 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1103 {
1104 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1105 struct panthor_fw_csg_iface *csg_iface;
1106
1107 lockdep_assert_held(&ptdev->scheduler->lock);
1108
1109 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1110 csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1111 }
1112
1113 /**
1114 * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1115 * @ptdev: Device.
1116 * @csg_id: Group slot.
1117 * @cs_id: Queue slot.
1118 *
1119 * Queue state is updated on group suspend or STATUS_UPDATE event.
1120 */
1121 static void
cs_slot_sync_queue_state_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1122 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1123 {
1124 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1125 struct panthor_queue *queue = group->queues[cs_id];
1126 struct panthor_fw_cs_iface *cs_iface =
1127 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1128
1129 u32 status_wait_cond;
1130
1131 switch (cs_iface->output->status_blocked_reason) {
1132 case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1133 if (queue->iface.input->insert == queue->iface.output->extract &&
1134 cs_iface->output->status_scoreboards == 0)
1135 group->idle_queues |= BIT(cs_id);
1136 break;
1137
1138 case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1139 if (list_empty(&group->wait_node)) {
1140 list_move_tail(&group->wait_node,
1141 &group->ptdev->scheduler->groups.waiting);
1142 }
1143
1144 /* The queue is only blocked if there's no deferred operation
1145 * pending, which can be checked through the scoreboard status.
1146 */
1147 if (!cs_iface->output->status_scoreboards)
1148 group->blocked_queues |= BIT(cs_id);
1149
1150 queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1151 queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1152 status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1153 queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1154 if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1155 u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1156
1157 queue->syncwait.sync64 = true;
1158 queue->syncwait.ref |= sync_val_hi << 32;
1159 } else {
1160 queue->syncwait.sync64 = false;
1161 }
1162 break;
1163
1164 default:
1165 /* Other reasons are not blocking. Consider the queue as runnable
1166 * in those cases.
1167 */
1168 break;
1169 }
1170 }
1171
1172 static void
csg_slot_sync_queues_state_locked(struct panthor_device * ptdev,u32 csg_id)1173 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1174 {
1175 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1176 struct panthor_group *group = csg_slot->group;
1177 u32 i;
1178
1179 lockdep_assert_held(&ptdev->scheduler->lock);
1180
1181 group->idle_queues = 0;
1182 group->blocked_queues = 0;
1183
1184 for (i = 0; i < group->queue_count; i++) {
1185 if (group->queues[i])
1186 cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1187 }
1188 }
1189
1190 static void
csg_slot_sync_state_locked(struct panthor_device * ptdev,u32 csg_id)1191 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1192 {
1193 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1194 struct panthor_fw_csg_iface *csg_iface;
1195 struct panthor_group *group;
1196 enum panthor_group_state new_state, old_state;
1197 u32 csg_state;
1198
1199 lockdep_assert_held(&ptdev->scheduler->lock);
1200
1201 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1202 group = csg_slot->group;
1203
1204 if (!group)
1205 return;
1206
1207 old_state = group->state;
1208 csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1209 switch (csg_state) {
1210 case CSG_STATE_START:
1211 case CSG_STATE_RESUME:
1212 new_state = PANTHOR_CS_GROUP_ACTIVE;
1213 break;
1214 case CSG_STATE_TERMINATE:
1215 new_state = PANTHOR_CS_GROUP_TERMINATED;
1216 break;
1217 case CSG_STATE_SUSPEND:
1218 new_state = PANTHOR_CS_GROUP_SUSPENDED;
1219 break;
1220 default:
1221 /* The unknown state might be caused by a FW state corruption,
1222 * which means the group metadata can't be trusted anymore, and
1223 * the SUSPEND operation might propagate the corruption to the
1224 * suspend buffers. Flag the group state as unknown to make
1225 * sure it's unusable after that point.
1226 */
1227 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1228 csg_id, csg_state);
1229 new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1230 break;
1231 }
1232
1233 if (old_state == new_state)
1234 return;
1235
1236 /* The unknown state might be caused by a FW issue, reset the FW to
1237 * take a fresh start.
1238 */
1239 if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1240 panthor_device_schedule_reset(ptdev);
1241
1242 if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1243 csg_slot_sync_queues_state_locked(ptdev, csg_id);
1244
1245 if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1246 u32 i;
1247
1248 /* Reset the queue slots so we start from a clean
1249 * state when starting/resuming a new group on this
1250 * CSG slot. No wait needed here, and no ringbell
1251 * either, since the CS slot will only be re-used
1252 * on the next CSG start operation.
1253 */
1254 for (i = 0; i < group->queue_count; i++) {
1255 if (group->queues[i])
1256 cs_slot_reset_locked(ptdev, csg_id, i);
1257 }
1258 }
1259
1260 group->state = new_state;
1261 }
1262
1263 static int
csg_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 priority)1264 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1265 {
1266 struct panthor_fw_csg_iface *csg_iface;
1267 struct panthor_csg_slot *csg_slot;
1268 struct panthor_group *group;
1269 u32 queue_mask = 0, i;
1270
1271 lockdep_assert_held(&ptdev->scheduler->lock);
1272
1273 if (priority > MAX_CSG_PRIO)
1274 return -EINVAL;
1275
1276 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1277 return -EINVAL;
1278
1279 csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1280 group = csg_slot->group;
1281 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1282 return 0;
1283
1284 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1285
1286 for (i = 0; i < group->queue_count; i++) {
1287 if (group->queues[i]) {
1288 cs_slot_prog_locked(ptdev, csg_id, i);
1289 queue_mask |= BIT(i);
1290 }
1291 }
1292
1293 csg_iface->input->allow_compute = group->compute_core_mask;
1294 csg_iface->input->allow_fragment = group->fragment_core_mask;
1295 csg_iface->input->allow_other = group->tiler_core_mask;
1296 csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1297 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1298 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1299 CSG_EP_REQ_PRIORITY(priority);
1300 csg_iface->input->config = panthor_vm_as(group->vm);
1301
1302 if (group->suspend_buf)
1303 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1304 else
1305 csg_iface->input->suspend_buf = 0;
1306
1307 if (group->protm_suspend_buf) {
1308 csg_iface->input->protm_suspend_buf =
1309 panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1310 } else {
1311 csg_iface->input->protm_suspend_buf = 0;
1312 }
1313
1314 csg_iface->input->ack_irq_mask = ~0;
1315 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1316 return 0;
1317 }
1318
1319 static void
cs_slot_process_fatal_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1320 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1321 u32 csg_id, u32 cs_id)
1322 {
1323 struct panthor_scheduler *sched = ptdev->scheduler;
1324 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1325 struct panthor_group *group = csg_slot->group;
1326 struct panthor_fw_cs_iface *cs_iface;
1327 u32 fatal;
1328 u64 info;
1329
1330 lockdep_assert_held(&sched->lock);
1331
1332 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1333 fatal = cs_iface->output->fatal;
1334 info = cs_iface->output->fatal_info;
1335
1336 if (group)
1337 group->fatal_queues |= BIT(cs_id);
1338
1339 if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1340 /* If this exception is unrecoverable, queue a reset, and make
1341 * sure we stop scheduling groups until the reset has happened.
1342 */
1343 panthor_device_schedule_reset(ptdev);
1344 cancel_delayed_work(&sched->tick_work);
1345 } else {
1346 sched_queue_delayed_work(sched, tick, 0);
1347 }
1348
1349 drm_warn(&ptdev->base,
1350 "CSG slot %d CS slot: %d\n"
1351 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1352 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1353 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1354 csg_id, cs_id,
1355 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1356 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1357 (unsigned int)CS_EXCEPTION_DATA(fatal),
1358 info);
1359 }
1360
1361 static void
cs_slot_process_fault_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1362 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1363 u32 csg_id, u32 cs_id)
1364 {
1365 struct panthor_scheduler *sched = ptdev->scheduler;
1366 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1367 struct panthor_group *group = csg_slot->group;
1368 struct panthor_queue *queue = group && cs_id < group->queue_count ?
1369 group->queues[cs_id] : NULL;
1370 struct panthor_fw_cs_iface *cs_iface;
1371 u32 fault;
1372 u64 info;
1373
1374 lockdep_assert_held(&sched->lock);
1375
1376 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1377 fault = cs_iface->output->fault;
1378 info = cs_iface->output->fault_info;
1379
1380 if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
1381 u64 cs_extract = queue->iface.output->extract;
1382 struct panthor_job *job;
1383
1384 spin_lock(&queue->fence_ctx.lock);
1385 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1386 if (cs_extract >= job->ringbuf.end)
1387 continue;
1388
1389 if (cs_extract < job->ringbuf.start)
1390 break;
1391
1392 dma_fence_set_error(job->done_fence, -EINVAL);
1393 }
1394 spin_unlock(&queue->fence_ctx.lock);
1395 }
1396
1397 drm_warn(&ptdev->base,
1398 "CSG slot %d CS slot: %d\n"
1399 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1400 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1401 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1402 csg_id, cs_id,
1403 (unsigned int)CS_EXCEPTION_TYPE(fault),
1404 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1405 (unsigned int)CS_EXCEPTION_DATA(fault),
1406 info);
1407 }
1408
group_process_tiler_oom(struct panthor_group * group,u32 cs_id)1409 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1410 {
1411 struct panthor_device *ptdev = group->ptdev;
1412 struct panthor_scheduler *sched = ptdev->scheduler;
1413 u32 renderpasses_in_flight, pending_frag_count;
1414 struct panthor_heap_pool *heaps = NULL;
1415 u64 heap_address, new_chunk_va = 0;
1416 u32 vt_start, vt_end, frag_end;
1417 int ret, csg_id;
1418
1419 mutex_lock(&sched->lock);
1420 csg_id = group->csg_id;
1421 if (csg_id >= 0) {
1422 struct panthor_fw_cs_iface *cs_iface;
1423
1424 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1425 heaps = panthor_vm_get_heap_pool(group->vm, false);
1426 heap_address = cs_iface->output->heap_address;
1427 vt_start = cs_iface->output->heap_vt_start;
1428 vt_end = cs_iface->output->heap_vt_end;
1429 frag_end = cs_iface->output->heap_frag_end;
1430 renderpasses_in_flight = vt_start - frag_end;
1431 pending_frag_count = vt_end - frag_end;
1432 }
1433 mutex_unlock(&sched->lock);
1434
1435 /* The group got scheduled out, we stop here. We will get a new tiler OOM event
1436 * when it's scheduled again.
1437 */
1438 if (unlikely(csg_id < 0))
1439 return 0;
1440
1441 if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1442 ret = -EINVAL;
1443 } else {
1444 /* We do the allocation without holding the scheduler lock to avoid
1445 * blocking the scheduling.
1446 */
1447 ret = panthor_heap_grow(heaps, heap_address,
1448 renderpasses_in_flight,
1449 pending_frag_count, &new_chunk_va);
1450 }
1451
1452 /* If the heap context doesn't have memory for us, we want to let the
1453 * FW try to reclaim memory by waiting for fragment jobs to land or by
1454 * executing the tiler OOM exception handler, which is supposed to
1455 * implement incremental rendering.
1456 */
1457 if (ret && ret != -ENOMEM) {
1458 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1459 group->fatal_queues |= BIT(cs_id);
1460 sched_queue_delayed_work(sched, tick, 0);
1461 goto out_put_heap_pool;
1462 }
1463
1464 mutex_lock(&sched->lock);
1465 csg_id = group->csg_id;
1466 if (csg_id >= 0) {
1467 struct panthor_fw_csg_iface *csg_iface;
1468 struct panthor_fw_cs_iface *cs_iface;
1469
1470 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1471 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1472
1473 cs_iface->input->heap_start = new_chunk_va;
1474 cs_iface->input->heap_end = new_chunk_va;
1475 panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1476 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1477 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1478 }
1479 mutex_unlock(&sched->lock);
1480
1481 /* We allocated a chunck, but couldn't link it to the heap
1482 * context because the group was scheduled out while we were
1483 * allocating memory. We need to return this chunk to the heap.
1484 */
1485 if (unlikely(csg_id < 0 && new_chunk_va))
1486 panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1487
1488 ret = 0;
1489
1490 out_put_heap_pool:
1491 panthor_heap_pool_put(heaps);
1492 return ret;
1493 }
1494
group_tiler_oom_work(struct work_struct * work)1495 static void group_tiler_oom_work(struct work_struct *work)
1496 {
1497 struct panthor_group *group =
1498 container_of(work, struct panthor_group, tiler_oom_work);
1499 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1500
1501 while (tiler_oom) {
1502 u32 cs_id = ffs(tiler_oom) - 1;
1503
1504 group_process_tiler_oom(group, cs_id);
1505 tiler_oom &= ~BIT(cs_id);
1506 }
1507
1508 group_put(group);
1509 }
1510
1511 static void
cs_slot_process_tiler_oom_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1512 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1513 u32 csg_id, u32 cs_id)
1514 {
1515 struct panthor_scheduler *sched = ptdev->scheduler;
1516 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1517 struct panthor_group *group = csg_slot->group;
1518
1519 lockdep_assert_held(&sched->lock);
1520
1521 if (drm_WARN_ON(&ptdev->base, !group))
1522 return;
1523
1524 atomic_or(BIT(cs_id), &group->tiler_oom);
1525
1526 /* We don't use group_queue_work() here because we want to queue the
1527 * work item to the heap_alloc_wq.
1528 */
1529 group_get(group);
1530 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1531 group_put(group);
1532 }
1533
cs_slot_process_irq_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1534 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1535 u32 csg_id, u32 cs_id)
1536 {
1537 struct panthor_fw_cs_iface *cs_iface;
1538 u32 req, ack, events;
1539
1540 lockdep_assert_held(&ptdev->scheduler->lock);
1541
1542 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1543 req = cs_iface->input->req;
1544 ack = cs_iface->output->ack;
1545 events = (req ^ ack) & CS_EVT_MASK;
1546
1547 if (events & CS_FATAL)
1548 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1549
1550 if (events & CS_FAULT)
1551 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1552
1553 if (events & CS_TILER_OOM)
1554 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1555
1556 /* We don't acknowledge the TILER_OOM event since its handling is
1557 * deferred to a separate work.
1558 */
1559 panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1560
1561 return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1562 }
1563
csg_slot_sync_idle_state_locked(struct panthor_device * ptdev,u32 csg_id)1564 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1565 {
1566 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1567 struct panthor_fw_csg_iface *csg_iface;
1568
1569 lockdep_assert_held(&ptdev->scheduler->lock);
1570
1571 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1572 csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1573 }
1574
csg_slot_process_idle_event_locked(struct panthor_device * ptdev,u32 csg_id)1575 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1576 {
1577 struct panthor_scheduler *sched = ptdev->scheduler;
1578
1579 lockdep_assert_held(&sched->lock);
1580
1581 sched->might_have_idle_groups = true;
1582
1583 /* Schedule a tick so we can evict idle groups and schedule non-idle
1584 * ones. This will also update runtime PM and devfreq busy/idle states,
1585 * so the device can lower its frequency or get suspended.
1586 */
1587 sched_queue_delayed_work(sched, tick, 0);
1588 }
1589
csg_slot_sync_update_locked(struct panthor_device * ptdev,u32 csg_id)1590 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1591 u32 csg_id)
1592 {
1593 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1594 struct panthor_group *group = csg_slot->group;
1595
1596 lockdep_assert_held(&ptdev->scheduler->lock);
1597
1598 if (group)
1599 group_queue_work(group, sync_upd);
1600
1601 sched_queue_work(ptdev->scheduler, sync_upd);
1602 }
1603
1604 static void
csg_slot_process_progress_timer_event_locked(struct panthor_device * ptdev,u32 csg_id)1605 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1606 {
1607 struct panthor_scheduler *sched = ptdev->scheduler;
1608 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1609 struct panthor_group *group = csg_slot->group;
1610
1611 lockdep_assert_held(&sched->lock);
1612
1613 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1614
1615 group = csg_slot->group;
1616 if (!drm_WARN_ON(&ptdev->base, !group))
1617 group->timedout = true;
1618
1619 sched_queue_delayed_work(sched, tick, 0);
1620 }
1621
sched_process_csg_irq_locked(struct panthor_device * ptdev,u32 csg_id)1622 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1623 {
1624 u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1625 struct panthor_fw_csg_iface *csg_iface;
1626 u32 ring_cs_db_mask = 0;
1627
1628 lockdep_assert_held(&ptdev->scheduler->lock);
1629
1630 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1631 return;
1632
1633 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1634 req = READ_ONCE(csg_iface->input->req);
1635 ack = READ_ONCE(csg_iface->output->ack);
1636 cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1637 cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1638 csg_events = (req ^ ack) & CSG_EVT_MASK;
1639
1640 /* There may not be any pending CSG/CS interrupts to process */
1641 if (req == ack && cs_irq_req == cs_irq_ack)
1642 return;
1643
1644 /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1645 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1646 * doesn't miss an interrupt for the CS in the race scenario where
1647 * whilst Host is servicing an interrupt for the CS, firmware sends
1648 * another interrupt for that CS.
1649 */
1650 csg_iface->input->cs_irq_ack = cs_irq_req;
1651
1652 panthor_fw_update_reqs(csg_iface, req, ack,
1653 CSG_SYNC_UPDATE |
1654 CSG_IDLE |
1655 CSG_PROGRESS_TIMER_EVENT);
1656
1657 if (csg_events & CSG_IDLE)
1658 csg_slot_process_idle_event_locked(ptdev, csg_id);
1659
1660 if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1661 csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1662
1663 cs_irqs = cs_irq_req ^ cs_irq_ack;
1664 while (cs_irqs) {
1665 u32 cs_id = ffs(cs_irqs) - 1;
1666
1667 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1668 ring_cs_db_mask |= BIT(cs_id);
1669
1670 cs_irqs &= ~BIT(cs_id);
1671 }
1672
1673 if (csg_events & CSG_SYNC_UPDATE)
1674 csg_slot_sync_update_locked(ptdev, csg_id);
1675
1676 if (ring_cs_db_mask)
1677 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1678
1679 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1680 }
1681
sched_process_idle_event_locked(struct panthor_device * ptdev)1682 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1683 {
1684 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1685
1686 lockdep_assert_held(&ptdev->scheduler->lock);
1687
1688 /* Acknowledge the idle event and schedule a tick. */
1689 panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1690 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1691 }
1692
1693 /**
1694 * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1695 * @ptdev: Device.
1696 */
sched_process_global_irq_locked(struct panthor_device * ptdev)1697 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1698 {
1699 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1700 u32 req, ack, evts;
1701
1702 lockdep_assert_held(&ptdev->scheduler->lock);
1703
1704 req = READ_ONCE(glb_iface->input->req);
1705 ack = READ_ONCE(glb_iface->output->ack);
1706 evts = (req ^ ack) & GLB_EVT_MASK;
1707
1708 if (evts & GLB_IDLE)
1709 sched_process_idle_event_locked(ptdev);
1710 }
1711
process_fw_events_work(struct work_struct * work)1712 static void process_fw_events_work(struct work_struct *work)
1713 {
1714 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1715 fw_events_work);
1716 u32 events = atomic_xchg(&sched->fw_events, 0);
1717 struct panthor_device *ptdev = sched->ptdev;
1718
1719 mutex_lock(&sched->lock);
1720
1721 if (events & JOB_INT_GLOBAL_IF) {
1722 sched_process_global_irq_locked(ptdev);
1723 events &= ~JOB_INT_GLOBAL_IF;
1724 }
1725
1726 while (events) {
1727 u32 csg_id = ffs(events) - 1;
1728
1729 sched_process_csg_irq_locked(ptdev, csg_id);
1730 events &= ~BIT(csg_id);
1731 }
1732
1733 mutex_unlock(&sched->lock);
1734 }
1735
1736 /**
1737 * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1738 */
panthor_sched_report_fw_events(struct panthor_device * ptdev,u32 events)1739 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1740 {
1741 if (!ptdev->scheduler)
1742 return;
1743
1744 atomic_or(events, &ptdev->scheduler->fw_events);
1745 sched_queue_work(ptdev->scheduler, fw_events);
1746 }
1747
fence_get_driver_name(struct dma_fence * fence)1748 static const char *fence_get_driver_name(struct dma_fence *fence)
1749 {
1750 return "panthor";
1751 }
1752
queue_fence_get_timeline_name(struct dma_fence * fence)1753 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1754 {
1755 return "queue-fence";
1756 }
1757
1758 static const struct dma_fence_ops panthor_queue_fence_ops = {
1759 .get_driver_name = fence_get_driver_name,
1760 .get_timeline_name = queue_fence_get_timeline_name,
1761 };
1762
1763 struct panthor_csg_slots_upd_ctx {
1764 u32 update_mask;
1765 u32 timedout_mask;
1766 struct {
1767 u32 value;
1768 u32 mask;
1769 } requests[MAX_CSGS];
1770 };
1771
csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx * ctx)1772 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1773 {
1774 memset(ctx, 0, sizeof(*ctx));
1775 }
1776
csgs_upd_ctx_queue_reqs(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx,u32 csg_id,u32 value,u32 mask)1777 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1778 struct panthor_csg_slots_upd_ctx *ctx,
1779 u32 csg_id, u32 value, u32 mask)
1780 {
1781 if (drm_WARN_ON(&ptdev->base, !mask) ||
1782 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1783 return;
1784
1785 ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1786 ctx->requests[csg_id].mask |= mask;
1787 ctx->update_mask |= BIT(csg_id);
1788 }
1789
csgs_upd_ctx_apply_locked(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx)1790 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1791 struct panthor_csg_slots_upd_ctx *ctx)
1792 {
1793 struct panthor_scheduler *sched = ptdev->scheduler;
1794 u32 update_slots = ctx->update_mask;
1795
1796 lockdep_assert_held(&sched->lock);
1797
1798 if (!ctx->update_mask)
1799 return 0;
1800
1801 while (update_slots) {
1802 struct panthor_fw_csg_iface *csg_iface;
1803 u32 csg_id = ffs(update_slots) - 1;
1804
1805 update_slots &= ~BIT(csg_id);
1806 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1807 panthor_fw_update_reqs(csg_iface, req,
1808 ctx->requests[csg_id].value,
1809 ctx->requests[csg_id].mask);
1810 }
1811
1812 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1813
1814 update_slots = ctx->update_mask;
1815 while (update_slots) {
1816 struct panthor_fw_csg_iface *csg_iface;
1817 u32 csg_id = ffs(update_slots) - 1;
1818 u32 req_mask = ctx->requests[csg_id].mask, acked;
1819 int ret;
1820
1821 update_slots &= ~BIT(csg_id);
1822 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1823
1824 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1825
1826 if (acked & CSG_ENDPOINT_CONFIG)
1827 csg_slot_sync_priority_locked(ptdev, csg_id);
1828
1829 if (acked & CSG_STATE_MASK)
1830 csg_slot_sync_state_locked(ptdev, csg_id);
1831
1832 if (acked & CSG_STATUS_UPDATE) {
1833 csg_slot_sync_queues_state_locked(ptdev, csg_id);
1834 csg_slot_sync_idle_state_locked(ptdev, csg_id);
1835 }
1836
1837 if (ret && acked != req_mask &&
1838 ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1839 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1840 ctx->timedout_mask |= BIT(csg_id);
1841 }
1842 }
1843
1844 if (ctx->timedout_mask)
1845 return -ETIMEDOUT;
1846
1847 return 0;
1848 }
1849
1850 struct panthor_sched_tick_ctx {
1851 struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1852 struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1853 u32 idle_group_count;
1854 u32 group_count;
1855 enum panthor_csg_priority min_priority;
1856 struct panthor_vm *vms[MAX_CS_PER_CSG];
1857 u32 as_count;
1858 bool immediate_tick;
1859 u32 csg_upd_failed_mask;
1860 };
1861
1862 static bool
tick_ctx_is_full(const struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)1863 tick_ctx_is_full(const struct panthor_scheduler *sched,
1864 const struct panthor_sched_tick_ctx *ctx)
1865 {
1866 return ctx->group_count == sched->csg_slot_count;
1867 }
1868
1869 static bool
group_is_idle(struct panthor_group * group)1870 group_is_idle(struct panthor_group *group)
1871 {
1872 struct panthor_device *ptdev = group->ptdev;
1873 u32 inactive_queues;
1874
1875 if (group->csg_id >= 0)
1876 return ptdev->scheduler->csg_slots[group->csg_id].idle;
1877
1878 inactive_queues = group->idle_queues | group->blocked_queues;
1879 return hweight32(inactive_queues) == group->queue_count;
1880 }
1881
1882 static bool
group_can_run(struct panthor_group * group)1883 group_can_run(struct panthor_group *group)
1884 {
1885 return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1886 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1887 !group->destroyed && group->fatal_queues == 0 &&
1888 !group->timedout;
1889 }
1890
1891 static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct list_head * queue,bool skip_idle_groups,bool owned_by_tick_ctx)1892 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1893 struct panthor_sched_tick_ctx *ctx,
1894 struct list_head *queue,
1895 bool skip_idle_groups,
1896 bool owned_by_tick_ctx)
1897 {
1898 struct panthor_group *group, *tmp;
1899
1900 if (tick_ctx_is_full(sched, ctx))
1901 return;
1902
1903 list_for_each_entry_safe(group, tmp, queue, run_node) {
1904 u32 i;
1905
1906 if (!group_can_run(group))
1907 continue;
1908
1909 if (skip_idle_groups && group_is_idle(group))
1910 continue;
1911
1912 for (i = 0; i < ctx->as_count; i++) {
1913 if (ctx->vms[i] == group->vm)
1914 break;
1915 }
1916
1917 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1918 continue;
1919
1920 if (!owned_by_tick_ctx)
1921 group_get(group);
1922
1923 list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1924 ctx->group_count++;
1925 if (group_is_idle(group))
1926 ctx->idle_group_count++;
1927
1928 if (i == ctx->as_count)
1929 ctx->vms[ctx->as_count++] = group->vm;
1930
1931 if (ctx->min_priority > group->priority)
1932 ctx->min_priority = group->priority;
1933
1934 if (tick_ctx_is_full(sched, ctx))
1935 return;
1936 }
1937 }
1938
1939 static void
tick_ctx_insert_old_group(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct panthor_group * group,bool full_tick)1940 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1941 struct panthor_sched_tick_ctx *ctx,
1942 struct panthor_group *group,
1943 bool full_tick)
1944 {
1945 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1946 struct panthor_group *other_group;
1947
1948 if (!full_tick) {
1949 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1950 return;
1951 }
1952
1953 /* Rotate to make sure groups with lower CSG slot
1954 * priorities have a chance to get a higher CSG slot
1955 * priority next time they get picked. This priority
1956 * has an impact on resource request ordering, so it's
1957 * important to make sure we don't let one group starve
1958 * all other groups with the same group priority.
1959 */
1960 list_for_each_entry(other_group,
1961 &ctx->old_groups[csg_slot->group->priority],
1962 run_node) {
1963 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
1964
1965 if (other_csg_slot->priority > csg_slot->priority) {
1966 list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
1967 return;
1968 }
1969 }
1970
1971 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1972 }
1973
1974 static void
tick_ctx_init(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,bool full_tick)1975 tick_ctx_init(struct panthor_scheduler *sched,
1976 struct panthor_sched_tick_ctx *ctx,
1977 bool full_tick)
1978 {
1979 struct panthor_device *ptdev = sched->ptdev;
1980 struct panthor_csg_slots_upd_ctx upd_ctx;
1981 int ret;
1982 u32 i;
1983
1984 memset(ctx, 0, sizeof(*ctx));
1985 csgs_upd_ctx_init(&upd_ctx);
1986
1987 ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
1988 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
1989 INIT_LIST_HEAD(&ctx->groups[i]);
1990 INIT_LIST_HEAD(&ctx->old_groups[i]);
1991 }
1992
1993 for (i = 0; i < sched->csg_slot_count; i++) {
1994 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
1995 struct panthor_group *group = csg_slot->group;
1996 struct panthor_fw_csg_iface *csg_iface;
1997
1998 if (!group)
1999 continue;
2000
2001 csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2002 group_get(group);
2003
2004 /* If there was unhandled faults on the VM, force processing of
2005 * CSG IRQs, so we can flag the faulty queue.
2006 */
2007 if (panthor_vm_has_unhandled_faults(group->vm)) {
2008 sched_process_csg_irq_locked(ptdev, i);
2009
2010 /* No fatal fault reported, flag all queues as faulty. */
2011 if (!group->fatal_queues)
2012 group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
2013 }
2014
2015 tick_ctx_insert_old_group(sched, ctx, group, full_tick);
2016 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2017 csg_iface->output->ack ^ CSG_STATUS_UPDATE,
2018 CSG_STATUS_UPDATE);
2019 }
2020
2021 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2022 if (ret) {
2023 panthor_device_schedule_reset(ptdev);
2024 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2025 }
2026 }
2027
2028 static void
group_term_post_processing(struct panthor_group * group)2029 group_term_post_processing(struct panthor_group *group)
2030 {
2031 struct panthor_job *job, *tmp;
2032 LIST_HEAD(faulty_jobs);
2033 bool cookie;
2034 u32 i = 0;
2035
2036 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2037 return;
2038
2039 cookie = dma_fence_begin_signalling();
2040 for (i = 0; i < group->queue_count; i++) {
2041 struct panthor_queue *queue = group->queues[i];
2042 struct panthor_syncobj_64b *syncobj;
2043 int err;
2044
2045 if (group->fatal_queues & BIT(i))
2046 err = -EINVAL;
2047 else if (group->timedout)
2048 err = -ETIMEDOUT;
2049 else
2050 err = -ECANCELED;
2051
2052 if (!queue)
2053 continue;
2054
2055 spin_lock(&queue->fence_ctx.lock);
2056 list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2057 list_move_tail(&job->node, &faulty_jobs);
2058 dma_fence_set_error(job->done_fence, err);
2059 dma_fence_signal_locked(job->done_fence);
2060 }
2061 spin_unlock(&queue->fence_ctx.lock);
2062
2063 /* Manually update the syncobj seqno to unblock waiters. */
2064 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2065 syncobj->status = ~0;
2066 syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2067 sched_queue_work(group->ptdev->scheduler, sync_upd);
2068 }
2069 dma_fence_end_signalling(cookie);
2070
2071 list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2072 list_del_init(&job->node);
2073 panthor_job_put(&job->base);
2074 }
2075 }
2076
group_term_work(struct work_struct * work)2077 static void group_term_work(struct work_struct *work)
2078 {
2079 struct panthor_group *group =
2080 container_of(work, struct panthor_group, term_work);
2081
2082 group_term_post_processing(group);
2083 group_put(group);
2084 }
2085
2086 static void
tick_ctx_cleanup(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2087 tick_ctx_cleanup(struct panthor_scheduler *sched,
2088 struct panthor_sched_tick_ctx *ctx)
2089 {
2090 struct panthor_device *ptdev = sched->ptdev;
2091 struct panthor_group *group, *tmp;
2092 u32 i;
2093
2094 for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2095 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2096 /* If everything went fine, we should only have groups
2097 * to be terminated in the old_groups lists.
2098 */
2099 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2100 group_can_run(group));
2101
2102 if (!group_can_run(group)) {
2103 list_del_init(&group->run_node);
2104 list_del_init(&group->wait_node);
2105 group_queue_work(group, term);
2106 } else if (group->csg_id >= 0) {
2107 list_del_init(&group->run_node);
2108 } else {
2109 list_move(&group->run_node,
2110 group_is_idle(group) ?
2111 &sched->groups.idle[group->priority] :
2112 &sched->groups.runnable[group->priority]);
2113 }
2114 group_put(group);
2115 }
2116 }
2117
2118 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2119 /* If everything went fine, the groups to schedule lists should
2120 * be empty.
2121 */
2122 drm_WARN_ON(&ptdev->base,
2123 !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2124
2125 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2126 if (group->csg_id >= 0) {
2127 list_del_init(&group->run_node);
2128 } else {
2129 list_move(&group->run_node,
2130 group_is_idle(group) ?
2131 &sched->groups.idle[group->priority] :
2132 &sched->groups.runnable[group->priority]);
2133 }
2134 group_put(group);
2135 }
2136 }
2137 }
2138
2139 static void
tick_ctx_apply(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2140 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2141 {
2142 struct panthor_group *group, *tmp;
2143 struct panthor_device *ptdev = sched->ptdev;
2144 struct panthor_csg_slot *csg_slot;
2145 int prio, new_csg_prio = MAX_CSG_PRIO, i;
2146 u32 free_csg_slots = 0;
2147 struct panthor_csg_slots_upd_ctx upd_ctx;
2148 int ret;
2149
2150 csgs_upd_ctx_init(&upd_ctx);
2151
2152 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2153 /* Suspend or terminate evicted groups. */
2154 list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2155 bool term = !group_can_run(group);
2156 int csg_id = group->csg_id;
2157
2158 if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2159 continue;
2160
2161 csg_slot = &sched->csg_slots[csg_id];
2162 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2163 term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2164 CSG_STATE_MASK);
2165 }
2166
2167 /* Update priorities on already running groups. */
2168 list_for_each_entry(group, &ctx->groups[prio], run_node) {
2169 struct panthor_fw_csg_iface *csg_iface;
2170 int csg_id = group->csg_id;
2171
2172 if (csg_id < 0) {
2173 new_csg_prio--;
2174 continue;
2175 }
2176
2177 csg_slot = &sched->csg_slots[csg_id];
2178 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2179 if (csg_slot->priority == new_csg_prio) {
2180 new_csg_prio--;
2181 continue;
2182 }
2183
2184 panthor_fw_update_reqs(csg_iface, endpoint_req,
2185 CSG_EP_REQ_PRIORITY(new_csg_prio),
2186 CSG_EP_REQ_PRIORITY_MASK);
2187 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2188 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2189 CSG_ENDPOINT_CONFIG);
2190 new_csg_prio--;
2191 }
2192 }
2193
2194 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2195 if (ret) {
2196 panthor_device_schedule_reset(ptdev);
2197 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2198 return;
2199 }
2200
2201 /* Unbind evicted groups. */
2202 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2203 list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2204 /* This group is gone. Process interrupts to clear
2205 * any pending interrupts before we start the new
2206 * group.
2207 */
2208 if (group->csg_id >= 0)
2209 sched_process_csg_irq_locked(ptdev, group->csg_id);
2210
2211 group_unbind_locked(group);
2212 }
2213 }
2214
2215 for (i = 0; i < sched->csg_slot_count; i++) {
2216 if (!sched->csg_slots[i].group)
2217 free_csg_slots |= BIT(i);
2218 }
2219
2220 csgs_upd_ctx_init(&upd_ctx);
2221 new_csg_prio = MAX_CSG_PRIO;
2222
2223 /* Start new groups. */
2224 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2225 list_for_each_entry(group, &ctx->groups[prio], run_node) {
2226 int csg_id = group->csg_id;
2227 struct panthor_fw_csg_iface *csg_iface;
2228
2229 if (csg_id >= 0) {
2230 new_csg_prio--;
2231 continue;
2232 }
2233
2234 csg_id = ffs(free_csg_slots) - 1;
2235 if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2236 break;
2237
2238 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2239 csg_slot = &sched->csg_slots[csg_id];
2240 group_bind_locked(group, csg_id);
2241 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2242 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2243 group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2244 CSG_STATE_RESUME : CSG_STATE_START,
2245 CSG_STATE_MASK);
2246 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2247 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2248 CSG_ENDPOINT_CONFIG);
2249 free_csg_slots &= ~BIT(csg_id);
2250 }
2251 }
2252
2253 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2254 if (ret) {
2255 panthor_device_schedule_reset(ptdev);
2256 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2257 return;
2258 }
2259
2260 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2261 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2262 list_del_init(&group->run_node);
2263
2264 /* If the group has been destroyed while we were
2265 * scheduling, ask for an immediate tick to
2266 * re-evaluate as soon as possible and get rid of
2267 * this dangling group.
2268 */
2269 if (group->destroyed)
2270 ctx->immediate_tick = true;
2271 group_put(group);
2272 }
2273
2274 /* Return evicted groups to the idle or run queues. Groups
2275 * that can no longer be run (because they've been destroyed
2276 * or experienced an unrecoverable error) will be scheduled
2277 * for destruction in tick_ctx_cleanup().
2278 */
2279 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2280 if (!group_can_run(group))
2281 continue;
2282
2283 if (group_is_idle(group))
2284 list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2285 else
2286 list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2287 group_put(group);
2288 }
2289 }
2290
2291 sched->used_csg_slot_count = ctx->group_count;
2292 sched->might_have_idle_groups = ctx->idle_group_count > 0;
2293 }
2294
2295 static u64
tick_ctx_update_resched_target(struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)2296 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2297 const struct panthor_sched_tick_ctx *ctx)
2298 {
2299 /* We had space left, no need to reschedule until some external event happens. */
2300 if (!tick_ctx_is_full(sched, ctx))
2301 goto no_tick;
2302
2303 /* If idle groups were scheduled, no need to wake up until some external
2304 * event happens (group unblocked, new job submitted, ...).
2305 */
2306 if (ctx->idle_group_count)
2307 goto no_tick;
2308
2309 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2310 goto no_tick;
2311
2312 /* If there are groups of the same priority waiting, we need to
2313 * keep the scheduler ticking, otherwise, we'll just wait for
2314 * new groups with higher priority to be queued.
2315 */
2316 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2317 u64 resched_target = sched->last_tick + sched->tick_period;
2318
2319 if (time_before64(sched->resched_target, sched->last_tick) ||
2320 time_before64(resched_target, sched->resched_target))
2321 sched->resched_target = resched_target;
2322
2323 return sched->resched_target - sched->last_tick;
2324 }
2325
2326 no_tick:
2327 sched->resched_target = U64_MAX;
2328 return U64_MAX;
2329 }
2330
tick_work(struct work_struct * work)2331 static void tick_work(struct work_struct *work)
2332 {
2333 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2334 tick_work.work);
2335 struct panthor_device *ptdev = sched->ptdev;
2336 struct panthor_sched_tick_ctx ctx;
2337 u64 remaining_jiffies = 0, resched_delay;
2338 u64 now = get_jiffies_64();
2339 int prio, ret, cookie;
2340
2341 if (!drm_dev_enter(&ptdev->base, &cookie))
2342 return;
2343
2344 ret = pm_runtime_resume_and_get(ptdev->base.dev);
2345 if (drm_WARN_ON(&ptdev->base, ret))
2346 goto out_dev_exit;
2347
2348 if (time_before64(now, sched->resched_target))
2349 remaining_jiffies = sched->resched_target - now;
2350
2351 mutex_lock(&sched->lock);
2352 if (panthor_device_reset_is_pending(sched->ptdev))
2353 goto out_unlock;
2354
2355 tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2356 if (ctx.csg_upd_failed_mask)
2357 goto out_cleanup_ctx;
2358
2359 if (remaining_jiffies) {
2360 /* Scheduling forced in the middle of a tick. Only RT groups
2361 * can preempt non-RT ones. Currently running RT groups can't be
2362 * preempted.
2363 */
2364 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2365 prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2366 prio--) {
2367 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2368 true, true);
2369 if (prio == PANTHOR_CSG_PRIORITY_RT) {
2370 tick_ctx_pick_groups_from_list(sched, &ctx,
2371 &sched->groups.runnable[prio],
2372 true, false);
2373 }
2374 }
2375 }
2376
2377 /* First pick non-idle groups */
2378 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2379 prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2380 prio--) {
2381 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2382 true, false);
2383 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2384 }
2385
2386 /* If we have free CSG slots left, pick idle groups */
2387 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2388 prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2389 prio--) {
2390 /* Check the old_group queue first to avoid reprogramming the slots */
2391 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2392 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2393 false, false);
2394 }
2395
2396 tick_ctx_apply(sched, &ctx);
2397 if (ctx.csg_upd_failed_mask)
2398 goto out_cleanup_ctx;
2399
2400 if (ctx.idle_group_count == ctx.group_count) {
2401 panthor_devfreq_record_idle(sched->ptdev);
2402 if (sched->pm.has_ref) {
2403 pm_runtime_put_autosuspend(ptdev->base.dev);
2404 sched->pm.has_ref = false;
2405 }
2406 } else {
2407 panthor_devfreq_record_busy(sched->ptdev);
2408 if (!sched->pm.has_ref) {
2409 pm_runtime_get(ptdev->base.dev);
2410 sched->pm.has_ref = true;
2411 }
2412 }
2413
2414 sched->last_tick = now;
2415 resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2416 if (ctx.immediate_tick)
2417 resched_delay = 0;
2418
2419 if (resched_delay != U64_MAX)
2420 sched_queue_delayed_work(sched, tick, resched_delay);
2421
2422 out_cleanup_ctx:
2423 tick_ctx_cleanup(sched, &ctx);
2424
2425 out_unlock:
2426 mutex_unlock(&sched->lock);
2427 pm_runtime_mark_last_busy(ptdev->base.dev);
2428 pm_runtime_put_autosuspend(ptdev->base.dev);
2429
2430 out_dev_exit:
2431 drm_dev_exit(cookie);
2432 }
2433
panthor_queue_eval_syncwait(struct panthor_group * group,u8 queue_idx)2434 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2435 {
2436 struct panthor_queue *queue = group->queues[queue_idx];
2437 union {
2438 struct panthor_syncobj_64b sync64;
2439 struct panthor_syncobj_32b sync32;
2440 } *syncobj;
2441 bool result;
2442 u64 value;
2443
2444 syncobj = panthor_queue_get_syncwait_obj(group, queue);
2445 if (!syncobj)
2446 return -EINVAL;
2447
2448 value = queue->syncwait.sync64 ?
2449 syncobj->sync64.seqno :
2450 syncobj->sync32.seqno;
2451
2452 if (queue->syncwait.gt)
2453 result = value > queue->syncwait.ref;
2454 else
2455 result = value <= queue->syncwait.ref;
2456
2457 if (result)
2458 panthor_queue_put_syncwait_obj(queue);
2459
2460 return result;
2461 }
2462
sync_upd_work(struct work_struct * work)2463 static void sync_upd_work(struct work_struct *work)
2464 {
2465 struct panthor_scheduler *sched = container_of(work,
2466 struct panthor_scheduler,
2467 sync_upd_work);
2468 struct panthor_group *group, *tmp;
2469 bool immediate_tick = false;
2470
2471 mutex_lock(&sched->lock);
2472 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2473 u32 tested_queues = group->blocked_queues;
2474 u32 unblocked_queues = 0;
2475
2476 while (tested_queues) {
2477 u32 cs_id = ffs(tested_queues) - 1;
2478 int ret;
2479
2480 ret = panthor_queue_eval_syncwait(group, cs_id);
2481 drm_WARN_ON(&group->ptdev->base, ret < 0);
2482 if (ret)
2483 unblocked_queues |= BIT(cs_id);
2484
2485 tested_queues &= ~BIT(cs_id);
2486 }
2487
2488 if (unblocked_queues) {
2489 group->blocked_queues &= ~unblocked_queues;
2490
2491 if (group->csg_id < 0) {
2492 list_move(&group->run_node,
2493 &sched->groups.runnable[group->priority]);
2494 if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2495 immediate_tick = true;
2496 }
2497 }
2498
2499 if (!group->blocked_queues)
2500 list_del_init(&group->wait_node);
2501 }
2502 mutex_unlock(&sched->lock);
2503
2504 if (immediate_tick)
2505 sched_queue_delayed_work(sched, tick, 0);
2506 }
2507
group_schedule_locked(struct panthor_group * group,u32 queue_mask)2508 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2509 {
2510 struct panthor_device *ptdev = group->ptdev;
2511 struct panthor_scheduler *sched = ptdev->scheduler;
2512 struct list_head *queue = &sched->groups.runnable[group->priority];
2513 u64 delay_jiffies = 0;
2514 bool was_idle;
2515 u64 now;
2516
2517 if (!group_can_run(group))
2518 return;
2519
2520 /* All updated queues are blocked, no need to wake up the scheduler. */
2521 if ((queue_mask & group->blocked_queues) == queue_mask)
2522 return;
2523
2524 was_idle = group_is_idle(group);
2525 group->idle_queues &= ~queue_mask;
2526
2527 /* Don't mess up with the lists if we're in a middle of a reset. */
2528 if (atomic_read(&sched->reset.in_progress))
2529 return;
2530
2531 if (was_idle && !group_is_idle(group))
2532 list_move_tail(&group->run_node, queue);
2533
2534 /* RT groups are preemptive. */
2535 if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2536 sched_queue_delayed_work(sched, tick, 0);
2537 return;
2538 }
2539
2540 /* Some groups might be idle, force an immediate tick to
2541 * re-evaluate.
2542 */
2543 if (sched->might_have_idle_groups) {
2544 sched_queue_delayed_work(sched, tick, 0);
2545 return;
2546 }
2547
2548 /* Scheduler is ticking, nothing to do. */
2549 if (sched->resched_target != U64_MAX) {
2550 /* If there are free slots, force immediating ticking. */
2551 if (sched->used_csg_slot_count < sched->csg_slot_count)
2552 sched_queue_delayed_work(sched, tick, 0);
2553
2554 return;
2555 }
2556
2557 /* Scheduler tick was off, recalculate the resched_target based on the
2558 * last tick event, and queue the scheduler work.
2559 */
2560 now = get_jiffies_64();
2561 sched->resched_target = sched->last_tick + sched->tick_period;
2562 if (sched->used_csg_slot_count == sched->csg_slot_count &&
2563 time_before64(now, sched->resched_target))
2564 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2565
2566 sched_queue_delayed_work(sched, tick, delay_jiffies);
2567 }
2568
queue_stop(struct panthor_queue * queue,struct panthor_job * bad_job)2569 static void queue_stop(struct panthor_queue *queue,
2570 struct panthor_job *bad_job)
2571 {
2572 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2573 }
2574
queue_start(struct panthor_queue * queue)2575 static void queue_start(struct panthor_queue *queue)
2576 {
2577 struct panthor_job *job;
2578
2579 /* Re-assign the parent fences. */
2580 list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2581 job->base.s_fence->parent = dma_fence_get(job->done_fence);
2582
2583 drm_sched_start(&queue->scheduler);
2584 }
2585
panthor_group_stop(struct panthor_group * group)2586 static void panthor_group_stop(struct panthor_group *group)
2587 {
2588 struct panthor_scheduler *sched = group->ptdev->scheduler;
2589
2590 lockdep_assert_held(&sched->reset.lock);
2591
2592 for (u32 i = 0; i < group->queue_count; i++)
2593 queue_stop(group->queues[i], NULL);
2594
2595 group_get(group);
2596 list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2597 }
2598
panthor_group_start(struct panthor_group * group)2599 static void panthor_group_start(struct panthor_group *group)
2600 {
2601 struct panthor_scheduler *sched = group->ptdev->scheduler;
2602
2603 lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2604
2605 for (u32 i = 0; i < group->queue_count; i++)
2606 queue_start(group->queues[i]);
2607
2608 if (group_can_run(group)) {
2609 list_move_tail(&group->run_node,
2610 group_is_idle(group) ?
2611 &sched->groups.idle[group->priority] :
2612 &sched->groups.runnable[group->priority]);
2613 } else {
2614 list_del_init(&group->run_node);
2615 list_del_init(&group->wait_node);
2616 group_queue_work(group, term);
2617 }
2618
2619 group_put(group);
2620 }
2621
panthor_sched_immediate_tick(struct panthor_device * ptdev)2622 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2623 {
2624 struct panthor_scheduler *sched = ptdev->scheduler;
2625
2626 sched_queue_delayed_work(sched, tick, 0);
2627 }
2628
2629 /**
2630 * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2631 */
panthor_sched_report_mmu_fault(struct panthor_device * ptdev)2632 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2633 {
2634 /* Force a tick to immediately kill faulty groups. */
2635 if (ptdev->scheduler)
2636 panthor_sched_immediate_tick(ptdev);
2637 }
2638
panthor_sched_resume(struct panthor_device * ptdev)2639 void panthor_sched_resume(struct panthor_device *ptdev)
2640 {
2641 /* Force a tick to re-evaluate after a resume. */
2642 panthor_sched_immediate_tick(ptdev);
2643 }
2644
panthor_sched_suspend(struct panthor_device * ptdev)2645 void panthor_sched_suspend(struct panthor_device *ptdev)
2646 {
2647 struct panthor_scheduler *sched = ptdev->scheduler;
2648 struct panthor_csg_slots_upd_ctx upd_ctx;
2649 struct panthor_group *group;
2650 u32 suspended_slots;
2651 u32 i;
2652
2653 mutex_lock(&sched->lock);
2654 csgs_upd_ctx_init(&upd_ctx);
2655 for (i = 0; i < sched->csg_slot_count; i++) {
2656 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2657
2658 if (csg_slot->group) {
2659 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2660 group_can_run(csg_slot->group) ?
2661 CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2662 CSG_STATE_MASK);
2663 }
2664 }
2665
2666 suspended_slots = upd_ctx.update_mask;
2667
2668 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2669 suspended_slots &= ~upd_ctx.timedout_mask;
2670
2671 if (upd_ctx.timedout_mask) {
2672 u32 slot_mask = upd_ctx.timedout_mask;
2673
2674 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2675 csgs_upd_ctx_init(&upd_ctx);
2676 while (slot_mask) {
2677 u32 csg_id = ffs(slot_mask) - 1;
2678 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2679
2680 /* We consider group suspension failures as fatal and flag the
2681 * group as unusable by setting timedout=true.
2682 */
2683 csg_slot->group->timedout = true;
2684
2685 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2686 CSG_STATE_TERMINATE,
2687 CSG_STATE_MASK);
2688 slot_mask &= ~BIT(csg_id);
2689 }
2690
2691 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2692
2693 slot_mask = upd_ctx.timedout_mask;
2694 while (slot_mask) {
2695 u32 csg_id = ffs(slot_mask) - 1;
2696 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2697
2698 /* Terminate command timedout, but the soft-reset will
2699 * automatically terminate all active groups, so let's
2700 * force the state to halted here.
2701 */
2702 if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2703 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2704 slot_mask &= ~BIT(csg_id);
2705 }
2706 }
2707
2708 /* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2709 * If the flush fails, flag all queues for termination.
2710 */
2711 if (suspended_slots) {
2712 bool flush_caches_failed = false;
2713 u32 slot_mask = suspended_slots;
2714
2715 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2716 flush_caches_failed = true;
2717
2718 while (slot_mask) {
2719 u32 csg_id = ffs(slot_mask) - 1;
2720 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2721
2722 if (flush_caches_failed)
2723 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2724 else
2725 csg_slot_sync_update_locked(ptdev, csg_id);
2726
2727 slot_mask &= ~BIT(csg_id);
2728 }
2729 }
2730
2731 for (i = 0; i < sched->csg_slot_count; i++) {
2732 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2733
2734 group = csg_slot->group;
2735 if (!group)
2736 continue;
2737
2738 group_get(group);
2739
2740 if (group->csg_id >= 0)
2741 sched_process_csg_irq_locked(ptdev, group->csg_id);
2742
2743 group_unbind_locked(group);
2744
2745 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2746
2747 if (group_can_run(group)) {
2748 list_add(&group->run_node,
2749 &sched->groups.idle[group->priority]);
2750 } else {
2751 /* We don't bother stopping the scheduler if the group is
2752 * faulty, the group termination work will finish the job.
2753 */
2754 list_del_init(&group->wait_node);
2755 group_queue_work(group, term);
2756 }
2757 group_put(group);
2758 }
2759 mutex_unlock(&sched->lock);
2760 }
2761
panthor_sched_pre_reset(struct panthor_device * ptdev)2762 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2763 {
2764 struct panthor_scheduler *sched = ptdev->scheduler;
2765 struct panthor_group *group, *group_tmp;
2766 u32 i;
2767
2768 mutex_lock(&sched->reset.lock);
2769 atomic_set(&sched->reset.in_progress, true);
2770
2771 /* Cancel all scheduler works. Once this is done, these works can't be
2772 * scheduled again until the reset operation is complete.
2773 */
2774 cancel_work_sync(&sched->sync_upd_work);
2775 cancel_delayed_work_sync(&sched->tick_work);
2776
2777 panthor_sched_suspend(ptdev);
2778
2779 /* Stop all groups that might still accept jobs, so we don't get passed
2780 * new jobs while we're resetting.
2781 */
2782 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2783 /* All groups should be in the idle lists. */
2784 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2785 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2786 panthor_group_stop(group);
2787 }
2788
2789 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2790 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2791 panthor_group_stop(group);
2792 }
2793
2794 mutex_unlock(&sched->reset.lock);
2795 }
2796
panthor_sched_post_reset(struct panthor_device * ptdev,bool reset_failed)2797 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2798 {
2799 struct panthor_scheduler *sched = ptdev->scheduler;
2800 struct panthor_group *group, *group_tmp;
2801
2802 mutex_lock(&sched->reset.lock);
2803
2804 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2805 /* Consider all previously running group as terminated if the
2806 * reset failed.
2807 */
2808 if (reset_failed)
2809 group->state = PANTHOR_CS_GROUP_TERMINATED;
2810
2811 panthor_group_start(group);
2812 }
2813
2814 /* We're done resetting the GPU, clear the reset.in_progress bit so we can
2815 * kick the scheduler.
2816 */
2817 atomic_set(&sched->reset.in_progress, false);
2818 mutex_unlock(&sched->reset.lock);
2819
2820 /* No need to queue a tick and update syncs if the reset failed. */
2821 if (!reset_failed) {
2822 sched_queue_delayed_work(sched, tick, 0);
2823 sched_queue_work(sched, sync_upd);
2824 }
2825 }
2826
group_sync_upd_work(struct work_struct * work)2827 static void group_sync_upd_work(struct work_struct *work)
2828 {
2829 struct panthor_group *group =
2830 container_of(work, struct panthor_group, sync_upd_work);
2831 struct panthor_job *job, *job_tmp;
2832 LIST_HEAD(done_jobs);
2833 u32 queue_idx;
2834 bool cookie;
2835
2836 cookie = dma_fence_begin_signalling();
2837 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2838 struct panthor_queue *queue = group->queues[queue_idx];
2839 struct panthor_syncobj_64b *syncobj;
2840
2841 if (!queue)
2842 continue;
2843
2844 syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2845
2846 spin_lock(&queue->fence_ctx.lock);
2847 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2848 if (syncobj->seqno < job->done_fence->seqno)
2849 break;
2850
2851 list_move_tail(&job->node, &done_jobs);
2852 dma_fence_signal_locked(job->done_fence);
2853 }
2854 spin_unlock(&queue->fence_ctx.lock);
2855 }
2856 dma_fence_end_signalling(cookie);
2857
2858 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2859 list_del_init(&job->node);
2860 panthor_job_put(&job->base);
2861 }
2862
2863 group_put(group);
2864 }
2865
2866 struct panthor_job_ringbuf_instrs {
2867 u64 buffer[MAX_INSTRS_PER_JOB];
2868 u32 count;
2869 };
2870
2871 struct panthor_job_instr {
2872 u32 profile_mask;
2873 u64 instr;
2874 };
2875
2876 #define JOB_INSTR(__prof, __instr) \
2877 { \
2878 .profile_mask = __prof, \
2879 .instr = __instr, \
2880 }
2881
2882 static void
copy_instrs_to_ringbuf(struct panthor_queue * queue,struct panthor_job * job,struct panthor_job_ringbuf_instrs * instrs)2883 copy_instrs_to_ringbuf(struct panthor_queue *queue,
2884 struct panthor_job *job,
2885 struct panthor_job_ringbuf_instrs *instrs)
2886 {
2887 u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2888 u64 start = job->ringbuf.start & (ringbuf_size - 1);
2889 u64 size, written;
2890
2891 /*
2892 * We need to write a whole slot, including any trailing zeroes
2893 * that may come at the end of it. Also, because instrs.buffer has
2894 * been zero-initialised, there's no need to pad it with 0's
2895 */
2896 instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
2897 size = instrs->count * sizeof(u64);
2898 WARN_ON(size > ringbuf_size);
2899 written = min(ringbuf_size - start, size);
2900
2901 memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
2902
2903 if (written < size)
2904 memcpy(queue->ringbuf->kmap,
2905 &instrs->buffer[written / sizeof(u64)],
2906 size - written);
2907 }
2908
2909 struct panthor_job_cs_params {
2910 u32 profile_mask;
2911 u64 addr_reg; u64 val_reg;
2912 u64 cycle_reg; u64 time_reg;
2913 u64 sync_addr; u64 times_addr;
2914 u64 cs_start; u64 cs_size;
2915 u32 last_flush; u32 waitall_mask;
2916 };
2917
2918 static void
get_job_cs_params(struct panthor_job * job,struct panthor_job_cs_params * params)2919 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
2920 {
2921 struct panthor_group *group = job->group;
2922 struct panthor_queue *queue = group->queues[job->queue_idx];
2923 struct panthor_device *ptdev = group->ptdev;
2924 struct panthor_scheduler *sched = ptdev->scheduler;
2925
2926 params->addr_reg = ptdev->csif_info.cs_reg_count -
2927 ptdev->csif_info.unpreserved_cs_reg_count;
2928 params->val_reg = params->addr_reg + 2;
2929 params->cycle_reg = params->addr_reg;
2930 params->time_reg = params->val_reg;
2931
2932 params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
2933 job->queue_idx * sizeof(struct panthor_syncobj_64b);
2934 params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
2935 (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
2936 params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
2937
2938 params->cs_start = job->call_info.start;
2939 params->cs_size = job->call_info.size;
2940 params->last_flush = job->call_info.latest_flush;
2941
2942 params->profile_mask = job->profiling.mask;
2943 }
2944
2945 #define JOB_INSTR_ALWAYS(instr) \
2946 JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
2947 #define JOB_INSTR_TIMESTAMP(instr) \
2948 JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
2949 #define JOB_INSTR_CYCLES(instr) \
2950 JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
2951
2952 static void
prepare_job_instrs(const struct panthor_job_cs_params * params,struct panthor_job_ringbuf_instrs * instrs)2953 prepare_job_instrs(const struct panthor_job_cs_params *params,
2954 struct panthor_job_ringbuf_instrs *instrs)
2955 {
2956 const struct panthor_job_instr instr_seq[] = {
2957 /* MOV32 rX+2, cs.latest_flush */
2958 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
2959 /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
2960 JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
2961 (0 << 16) | 0x233),
2962 /* MOV48 rX:rX+1, cycles_offset */
2963 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
2964 (params->times_addr +
2965 offsetof(struct panthor_job_profiling_data, cycles.before))),
2966 /* STORE_STATE cycles */
2967 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
2968 /* MOV48 rX:rX+1, time_offset */
2969 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
2970 (params->times_addr +
2971 offsetof(struct panthor_job_profiling_data, time.before))),
2972 /* STORE_STATE timer */
2973 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
2974 /* MOV48 rX:rX+1, cs.start */
2975 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
2976 /* MOV32 rX+2, cs.size */
2977 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
2978 /* WAIT(0) => waits for FLUSH_CACHE2 instruction */
2979 JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
2980 /* CALL rX:rX+1, rX+2 */
2981 JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
2982 (params->val_reg << 32)),
2983 /* MOV48 rX:rX+1, cycles_offset */
2984 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
2985 (params->times_addr +
2986 offsetof(struct panthor_job_profiling_data, cycles.after))),
2987 /* STORE_STATE cycles */
2988 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
2989 /* MOV48 rX:rX+1, time_offset */
2990 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
2991 (params->times_addr +
2992 offsetof(struct panthor_job_profiling_data, time.after))),
2993 /* STORE_STATE timer */
2994 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
2995 /* MOV48 rX:rX+1, sync_addr */
2996 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
2997 /* MOV48 rX+2, #1 */
2998 JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
2999 /* WAIT(all) */
3000 JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
3001 /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
3002 JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
3003 (params->val_reg << 32) | (0 << 16) | 1),
3004 /* ERROR_BARRIER, so we can recover from faults at job boundaries. */
3005 JOB_INSTR_ALWAYS((47ull << 56)),
3006 };
3007 u32 pad;
3008
3009 instrs->count = 0;
3010
3011 /* NEED to be cacheline aligned to please the prefetcher. */
3012 static_assert(sizeof(instrs->buffer) % 64 == 0,
3013 "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
3014
3015 /* Make sure we have enough storage to store the whole sequence. */
3016 static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
3017 ARRAY_SIZE(instrs->buffer),
3018 "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
3019
3020 for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
3021 /* If the profile mask of this instruction is not enabled, skip it. */
3022 if (instr_seq[i].profile_mask &&
3023 !(instr_seq[i].profile_mask & params->profile_mask))
3024 continue;
3025
3026 instrs->buffer[instrs->count++] = instr_seq[i].instr;
3027 }
3028
3029 pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3030 memset(&instrs->buffer[instrs->count], 0,
3031 (pad - instrs->count) * sizeof(instrs->buffer[0]));
3032 instrs->count = pad;
3033 }
3034
calc_job_credits(u32 profile_mask)3035 static u32 calc_job_credits(u32 profile_mask)
3036 {
3037 struct panthor_job_ringbuf_instrs instrs;
3038 struct panthor_job_cs_params params = {
3039 .profile_mask = profile_mask,
3040 };
3041
3042 prepare_job_instrs(¶ms, &instrs);
3043 return instrs.count;
3044 }
3045
3046 static struct dma_fence *
queue_run_job(struct drm_sched_job * sched_job)3047 queue_run_job(struct drm_sched_job *sched_job)
3048 {
3049 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3050 struct panthor_group *group = job->group;
3051 struct panthor_queue *queue = group->queues[job->queue_idx];
3052 struct panthor_device *ptdev = group->ptdev;
3053 struct panthor_scheduler *sched = ptdev->scheduler;
3054 struct panthor_job_ringbuf_instrs instrs;
3055 struct panthor_job_cs_params cs_params;
3056 struct dma_fence *done_fence;
3057 int ret;
3058
3059 /* Stream size is zero, nothing to do except making sure all previously
3060 * submitted jobs are done before we signal the
3061 * drm_sched_job::s_fence::finished fence.
3062 */
3063 if (!job->call_info.size) {
3064 job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
3065 return dma_fence_get(job->done_fence);
3066 }
3067
3068 ret = pm_runtime_resume_and_get(ptdev->base.dev);
3069 if (drm_WARN_ON(&ptdev->base, ret))
3070 return ERR_PTR(ret);
3071
3072 mutex_lock(&sched->lock);
3073 if (!group_can_run(group)) {
3074 done_fence = ERR_PTR(-ECANCELED);
3075 goto out_unlock;
3076 }
3077
3078 dma_fence_init(job->done_fence,
3079 &panthor_queue_fence_ops,
3080 &queue->fence_ctx.lock,
3081 queue->fence_ctx.id,
3082 atomic64_inc_return(&queue->fence_ctx.seqno));
3083
3084 job->profiling.slot = queue->profiling.seqno++;
3085 if (queue->profiling.seqno == queue->profiling.slot_count)
3086 queue->profiling.seqno = 0;
3087
3088 job->ringbuf.start = queue->iface.input->insert;
3089
3090 get_job_cs_params(job, &cs_params);
3091 prepare_job_instrs(&cs_params, &instrs);
3092 copy_instrs_to_ringbuf(queue, job, &instrs);
3093
3094 job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
3095
3096 panthor_job_get(&job->base);
3097 spin_lock(&queue->fence_ctx.lock);
3098 list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
3099 spin_unlock(&queue->fence_ctx.lock);
3100
3101 /* Make sure the ring buffer is updated before the INSERT
3102 * register.
3103 */
3104 wmb();
3105
3106 queue->iface.input->extract = queue->iface.output->extract;
3107 queue->iface.input->insert = job->ringbuf.end;
3108
3109 if (group->csg_id < 0) {
3110 /* If the queue is blocked, we want to keep the timeout running, so we
3111 * can detect unbounded waits and kill the group when that happens.
3112 * Otherwise, we suspend the timeout so the time we spend waiting for
3113 * a CSG slot is not counted.
3114 */
3115 if (!(group->blocked_queues & BIT(job->queue_idx)) &&
3116 !queue->timeout_suspended) {
3117 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
3118 queue->timeout_suspended = true;
3119 }
3120
3121 group_schedule_locked(group, BIT(job->queue_idx));
3122 } else {
3123 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3124 if (!sched->pm.has_ref &&
3125 !(group->blocked_queues & BIT(job->queue_idx))) {
3126 pm_runtime_get(ptdev->base.dev);
3127 sched->pm.has_ref = true;
3128 }
3129 panthor_devfreq_record_busy(sched->ptdev);
3130 }
3131
3132 /* Update the last fence. */
3133 dma_fence_put(queue->fence_ctx.last_fence);
3134 queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
3135
3136 done_fence = dma_fence_get(job->done_fence);
3137
3138 out_unlock:
3139 mutex_unlock(&sched->lock);
3140 pm_runtime_mark_last_busy(ptdev->base.dev);
3141 pm_runtime_put_autosuspend(ptdev->base.dev);
3142
3143 return done_fence;
3144 }
3145
3146 static enum drm_gpu_sched_stat
queue_timedout_job(struct drm_sched_job * sched_job)3147 queue_timedout_job(struct drm_sched_job *sched_job)
3148 {
3149 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3150 struct panthor_group *group = job->group;
3151 struct panthor_device *ptdev = group->ptdev;
3152 struct panthor_scheduler *sched = ptdev->scheduler;
3153 struct panthor_queue *queue = group->queues[job->queue_idx];
3154
3155 drm_warn(&ptdev->base, "job timeout\n");
3156
3157 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3158
3159 queue_stop(queue, job);
3160
3161 mutex_lock(&sched->lock);
3162 group->timedout = true;
3163 if (group->csg_id >= 0) {
3164 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3165 } else {
3166 /* Remove from the run queues, so the scheduler can't
3167 * pick the group on the next tick.
3168 */
3169 list_del_init(&group->run_node);
3170 list_del_init(&group->wait_node);
3171
3172 group_queue_work(group, term);
3173 }
3174 mutex_unlock(&sched->lock);
3175
3176 queue_start(queue);
3177
3178 return DRM_GPU_SCHED_STAT_NOMINAL;
3179 }
3180
queue_free_job(struct drm_sched_job * sched_job)3181 static void queue_free_job(struct drm_sched_job *sched_job)
3182 {
3183 drm_sched_job_cleanup(sched_job);
3184 panthor_job_put(sched_job);
3185 }
3186
3187 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
3188 .run_job = queue_run_job,
3189 .timedout_job = queue_timedout_job,
3190 .free_job = queue_free_job,
3191 };
3192
calc_profiling_ringbuf_num_slots(struct panthor_device * ptdev,u32 cs_ringbuf_size)3193 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3194 u32 cs_ringbuf_size)
3195 {
3196 u32 min_profiled_job_instrs = U32_MAX;
3197 u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
3198
3199 /*
3200 * We want to calculate the minimum size of a profiled job's CS,
3201 * because since they need additional instructions for the sampling
3202 * of performance metrics, they might take up further slots in
3203 * the queue's ringbuffer. This means we might not need as many job
3204 * slots for keeping track of their profiling information. What we
3205 * need is the maximum number of slots we should allocate to this end,
3206 * which matches the maximum number of profiled jobs we can place
3207 * simultaneously in the queue's ring buffer.
3208 * That has to be calculated separately for every single job profiling
3209 * flag, but not in the case job profiling is disabled, since unprofiled
3210 * jobs don't need to keep track of this at all.
3211 */
3212 for (u32 i = 0; i < last_flag; i++) {
3213 min_profiled_job_instrs =
3214 min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
3215 }
3216
3217 return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
3218 }
3219
3220 static struct panthor_queue *
group_create_queue(struct panthor_group * group,const struct drm_panthor_queue_create * args)3221 group_create_queue(struct panthor_group *group,
3222 const struct drm_panthor_queue_create *args)
3223 {
3224 struct drm_gpu_scheduler *drm_sched;
3225 struct panthor_queue *queue;
3226 int ret;
3227
3228 if (args->pad[0] || args->pad[1] || args->pad[2])
3229 return ERR_PTR(-EINVAL);
3230
3231 if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3232 !is_power_of_2(args->ringbuf_size))
3233 return ERR_PTR(-EINVAL);
3234
3235 if (args->priority > CSF_MAX_QUEUE_PRIO)
3236 return ERR_PTR(-EINVAL);
3237
3238 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
3239 if (!queue)
3240 return ERR_PTR(-ENOMEM);
3241
3242 queue->fence_ctx.id = dma_fence_context_alloc(1);
3243 spin_lock_init(&queue->fence_ctx.lock);
3244 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3245
3246 queue->priority = args->priority;
3247
3248 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3249 args->ringbuf_size,
3250 DRM_PANTHOR_BO_NO_MMAP,
3251 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3252 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3253 PANTHOR_VM_KERNEL_AUTO_VA);
3254 if (IS_ERR(queue->ringbuf)) {
3255 ret = PTR_ERR(queue->ringbuf);
3256 goto err_free_queue;
3257 }
3258
3259 ret = panthor_kernel_bo_vmap(queue->ringbuf);
3260 if (ret)
3261 goto err_free_queue;
3262
3263 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3264 &queue->iface.input,
3265 &queue->iface.output,
3266 &queue->iface.input_fw_va,
3267 &queue->iface.output_fw_va);
3268 if (IS_ERR(queue->iface.mem)) {
3269 ret = PTR_ERR(queue->iface.mem);
3270 goto err_free_queue;
3271 }
3272
3273 queue->profiling.slot_count =
3274 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3275
3276 queue->profiling.slots =
3277 panthor_kernel_bo_create(group->ptdev, group->vm,
3278 queue->profiling.slot_count *
3279 sizeof(struct panthor_job_profiling_data),
3280 DRM_PANTHOR_BO_NO_MMAP,
3281 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3282 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3283 PANTHOR_VM_KERNEL_AUTO_VA);
3284
3285 if (IS_ERR(queue->profiling.slots)) {
3286 ret = PTR_ERR(queue->profiling.slots);
3287 goto err_free_queue;
3288 }
3289
3290 ret = panthor_kernel_bo_vmap(queue->profiling.slots);
3291 if (ret)
3292 goto err_free_queue;
3293
3294 /*
3295 * Credit limit argument tells us the total number of instructions
3296 * across all CS slots in the ringbuffer, with some jobs requiring
3297 * twice as many as others, depending on their profiling status.
3298 */
3299 ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
3300 group->ptdev->scheduler->wq, 1,
3301 args->ringbuf_size / sizeof(u64),
3302 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
3303 group->ptdev->reset.wq,
3304 NULL, "panthor-queue", group->ptdev->base.dev);
3305 if (ret)
3306 goto err_free_queue;
3307
3308 drm_sched = &queue->scheduler;
3309 ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3310
3311 return queue;
3312
3313 err_free_queue:
3314 group_free_queue(group, queue);
3315 return ERR_PTR(ret);
3316 }
3317
3318 #define MAX_GROUPS_PER_POOL 128
3319
panthor_group_create(struct panthor_file * pfile,const struct drm_panthor_group_create * group_args,const struct drm_panthor_queue_create * queue_args)3320 int panthor_group_create(struct panthor_file *pfile,
3321 const struct drm_panthor_group_create *group_args,
3322 const struct drm_panthor_queue_create *queue_args)
3323 {
3324 struct panthor_device *ptdev = pfile->ptdev;
3325 struct panthor_group_pool *gpool = pfile->groups;
3326 struct panthor_scheduler *sched = ptdev->scheduler;
3327 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3328 struct panthor_group *group = NULL;
3329 u32 gid, i, suspend_size;
3330 int ret;
3331
3332 if (group_args->pad)
3333 return -EINVAL;
3334
3335 if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3336 return -EINVAL;
3337
3338 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3339 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3340 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3341 return -EINVAL;
3342
3343 if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3344 hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3345 hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3346 return -EINVAL;
3347
3348 group = kzalloc(sizeof(*group), GFP_KERNEL);
3349 if (!group)
3350 return -ENOMEM;
3351
3352 spin_lock_init(&group->fatal_lock);
3353 kref_init(&group->refcount);
3354 group->state = PANTHOR_CS_GROUP_CREATED;
3355 group->csg_id = -1;
3356
3357 group->ptdev = ptdev;
3358 group->max_compute_cores = group_args->max_compute_cores;
3359 group->compute_core_mask = group_args->compute_core_mask;
3360 group->max_fragment_cores = group_args->max_fragment_cores;
3361 group->fragment_core_mask = group_args->fragment_core_mask;
3362 group->max_tiler_cores = group_args->max_tiler_cores;
3363 group->tiler_core_mask = group_args->tiler_core_mask;
3364 group->priority = group_args->priority;
3365
3366 INIT_LIST_HEAD(&group->wait_node);
3367 INIT_LIST_HEAD(&group->run_node);
3368 INIT_WORK(&group->term_work, group_term_work);
3369 INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3370 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3371 INIT_WORK(&group->release_work, group_release_work);
3372
3373 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3374 if (!group->vm) {
3375 ret = -EINVAL;
3376 goto err_put_group;
3377 }
3378
3379 suspend_size = csg_iface->control->suspend_size;
3380 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3381 if (IS_ERR(group->suspend_buf)) {
3382 ret = PTR_ERR(group->suspend_buf);
3383 group->suspend_buf = NULL;
3384 goto err_put_group;
3385 }
3386
3387 suspend_size = csg_iface->control->protm_suspend_size;
3388 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3389 if (IS_ERR(group->protm_suspend_buf)) {
3390 ret = PTR_ERR(group->protm_suspend_buf);
3391 group->protm_suspend_buf = NULL;
3392 goto err_put_group;
3393 }
3394
3395 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3396 group_args->queues.count *
3397 sizeof(struct panthor_syncobj_64b),
3398 DRM_PANTHOR_BO_NO_MMAP,
3399 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3400 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3401 PANTHOR_VM_KERNEL_AUTO_VA);
3402 if (IS_ERR(group->syncobjs)) {
3403 ret = PTR_ERR(group->syncobjs);
3404 goto err_put_group;
3405 }
3406
3407 ret = panthor_kernel_bo_vmap(group->syncobjs);
3408 if (ret)
3409 goto err_put_group;
3410
3411 memset(group->syncobjs->kmap, 0,
3412 group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3413
3414 for (i = 0; i < group_args->queues.count; i++) {
3415 group->queues[i] = group_create_queue(group, &queue_args[i]);
3416 if (IS_ERR(group->queues[i])) {
3417 ret = PTR_ERR(group->queues[i]);
3418 group->queues[i] = NULL;
3419 goto err_put_group;
3420 }
3421
3422 group->queue_count++;
3423 }
3424
3425 group->idle_queues = GENMASK(group->queue_count - 1, 0);
3426
3427 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3428 if (ret)
3429 goto err_put_group;
3430
3431 mutex_lock(&sched->reset.lock);
3432 if (atomic_read(&sched->reset.in_progress)) {
3433 panthor_group_stop(group);
3434 } else {
3435 mutex_lock(&sched->lock);
3436 list_add_tail(&group->run_node,
3437 &sched->groups.idle[group->priority]);
3438 mutex_unlock(&sched->lock);
3439 }
3440 mutex_unlock(&sched->reset.lock);
3441
3442 return gid;
3443
3444 err_put_group:
3445 group_put(group);
3446 return ret;
3447 }
3448
panthor_group_destroy(struct panthor_file * pfile,u32 group_handle)3449 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3450 {
3451 struct panthor_group_pool *gpool = pfile->groups;
3452 struct panthor_device *ptdev = pfile->ptdev;
3453 struct panthor_scheduler *sched = ptdev->scheduler;
3454 struct panthor_group *group;
3455
3456 group = xa_erase(&gpool->xa, group_handle);
3457 if (!group)
3458 return -EINVAL;
3459
3460 mutex_lock(&sched->reset.lock);
3461 mutex_lock(&sched->lock);
3462 group->destroyed = true;
3463 if (group->csg_id >= 0) {
3464 sched_queue_delayed_work(sched, tick, 0);
3465 } else if (!atomic_read(&sched->reset.in_progress)) {
3466 /* Remove from the run queues, so the scheduler can't
3467 * pick the group on the next tick.
3468 */
3469 list_del_init(&group->run_node);
3470 list_del_init(&group->wait_node);
3471 group_queue_work(group, term);
3472 }
3473 mutex_unlock(&sched->lock);
3474 mutex_unlock(&sched->reset.lock);
3475
3476 group_put(group);
3477 return 0;
3478 }
3479
group_from_handle(struct panthor_group_pool * pool,u32 group_handle)3480 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3481 u32 group_handle)
3482 {
3483 struct panthor_group *group;
3484
3485 xa_lock(&pool->xa);
3486 group = group_get(xa_load(&pool->xa, group_handle));
3487 xa_unlock(&pool->xa);
3488
3489 return group;
3490 }
3491
panthor_group_get_state(struct panthor_file * pfile,struct drm_panthor_group_get_state * get_state)3492 int panthor_group_get_state(struct panthor_file *pfile,
3493 struct drm_panthor_group_get_state *get_state)
3494 {
3495 struct panthor_group_pool *gpool = pfile->groups;
3496 struct panthor_device *ptdev = pfile->ptdev;
3497 struct panthor_scheduler *sched = ptdev->scheduler;
3498 struct panthor_group *group;
3499
3500 if (get_state->pad)
3501 return -EINVAL;
3502
3503 group = group_from_handle(gpool, get_state->group_handle);
3504 if (!group)
3505 return -EINVAL;
3506
3507 memset(get_state, 0, sizeof(*get_state));
3508
3509 mutex_lock(&sched->lock);
3510 if (group->timedout)
3511 get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3512 if (group->fatal_queues) {
3513 get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3514 get_state->fatal_queues = group->fatal_queues;
3515 }
3516 mutex_unlock(&sched->lock);
3517
3518 group_put(group);
3519 return 0;
3520 }
3521
panthor_group_pool_create(struct panthor_file * pfile)3522 int panthor_group_pool_create(struct panthor_file *pfile)
3523 {
3524 struct panthor_group_pool *gpool;
3525
3526 gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3527 if (!gpool)
3528 return -ENOMEM;
3529
3530 xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3531 pfile->groups = gpool;
3532 return 0;
3533 }
3534
panthor_group_pool_destroy(struct panthor_file * pfile)3535 void panthor_group_pool_destroy(struct panthor_file *pfile)
3536 {
3537 struct panthor_group_pool *gpool = pfile->groups;
3538 struct panthor_group *group;
3539 unsigned long i;
3540
3541 if (IS_ERR_OR_NULL(gpool))
3542 return;
3543
3544 xa_for_each(&gpool->xa, i, group)
3545 panthor_group_destroy(pfile, i);
3546
3547 xa_destroy(&gpool->xa);
3548 kfree(gpool);
3549 pfile->groups = NULL;
3550 }
3551
job_release(struct kref * ref)3552 static void job_release(struct kref *ref)
3553 {
3554 struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3555
3556 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3557
3558 if (job->base.s_fence)
3559 drm_sched_job_cleanup(&job->base);
3560
3561 if (job->done_fence && job->done_fence->ops)
3562 dma_fence_put(job->done_fence);
3563 else
3564 dma_fence_free(job->done_fence);
3565
3566 group_put(job->group);
3567
3568 kfree(job);
3569 }
3570
panthor_job_get(struct drm_sched_job * sched_job)3571 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3572 {
3573 if (sched_job) {
3574 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3575
3576 kref_get(&job->refcount);
3577 }
3578
3579 return sched_job;
3580 }
3581
panthor_job_put(struct drm_sched_job * sched_job)3582 void panthor_job_put(struct drm_sched_job *sched_job)
3583 {
3584 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3585
3586 if (sched_job)
3587 kref_put(&job->refcount, job_release);
3588 }
3589
panthor_job_vm(struct drm_sched_job * sched_job)3590 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3591 {
3592 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3593
3594 return job->group->vm;
3595 }
3596
3597 struct drm_sched_job *
panthor_job_create(struct panthor_file * pfile,u16 group_handle,const struct drm_panthor_queue_submit * qsubmit)3598 panthor_job_create(struct panthor_file *pfile,
3599 u16 group_handle,
3600 const struct drm_panthor_queue_submit *qsubmit)
3601 {
3602 struct panthor_group_pool *gpool = pfile->groups;
3603 struct panthor_job *job;
3604 u32 credits;
3605 int ret;
3606
3607 if (qsubmit->pad)
3608 return ERR_PTR(-EINVAL);
3609
3610 /* If stream_addr is zero, so stream_size should be. */
3611 if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3612 return ERR_PTR(-EINVAL);
3613
3614 /* Make sure the address is aligned on 64-byte (cacheline) and the size is
3615 * aligned on 8-byte (instruction size).
3616 */
3617 if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3618 return ERR_PTR(-EINVAL);
3619
3620 /* bits 24:30 must be zero. */
3621 if (qsubmit->latest_flush & GENMASK(30, 24))
3622 return ERR_PTR(-EINVAL);
3623
3624 job = kzalloc(sizeof(*job), GFP_KERNEL);
3625 if (!job)
3626 return ERR_PTR(-ENOMEM);
3627
3628 kref_init(&job->refcount);
3629 job->queue_idx = qsubmit->queue_index;
3630 job->call_info.size = qsubmit->stream_size;
3631 job->call_info.start = qsubmit->stream_addr;
3632 job->call_info.latest_flush = qsubmit->latest_flush;
3633 INIT_LIST_HEAD(&job->node);
3634
3635 job->group = group_from_handle(gpool, group_handle);
3636 if (!job->group) {
3637 ret = -EINVAL;
3638 goto err_put_job;
3639 }
3640
3641 if (!group_can_run(job->group)) {
3642 ret = -EINVAL;
3643 goto err_put_job;
3644 }
3645
3646 if (job->queue_idx >= job->group->queue_count ||
3647 !job->group->queues[job->queue_idx]) {
3648 ret = -EINVAL;
3649 goto err_put_job;
3650 }
3651
3652 /* Empty command streams don't need a fence, they'll pick the one from
3653 * the previously submitted job.
3654 */
3655 if (job->call_info.size) {
3656 job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3657 if (!job->done_fence) {
3658 ret = -ENOMEM;
3659 goto err_put_job;
3660 }
3661 }
3662
3663 job->profiling.mask = pfile->ptdev->profile_mask;
3664 credits = calc_job_credits(job->profiling.mask);
3665 if (credits == 0) {
3666 ret = -EINVAL;
3667 goto err_put_job;
3668 }
3669
3670 ret = drm_sched_job_init(&job->base,
3671 &job->group->queues[job->queue_idx]->entity,
3672 credits, job->group);
3673 if (ret)
3674 goto err_put_job;
3675
3676 return &job->base;
3677
3678 err_put_job:
3679 panthor_job_put(&job->base);
3680 return ERR_PTR(ret);
3681 }
3682
panthor_job_update_resvs(struct drm_exec * exec,struct drm_sched_job * sched_job)3683 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3684 {
3685 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3686
3687 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3688 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
3689 }
3690
panthor_sched_unplug(struct panthor_device * ptdev)3691 void panthor_sched_unplug(struct panthor_device *ptdev)
3692 {
3693 struct panthor_scheduler *sched = ptdev->scheduler;
3694
3695 cancel_delayed_work_sync(&sched->tick_work);
3696
3697 mutex_lock(&sched->lock);
3698 if (sched->pm.has_ref) {
3699 pm_runtime_put(ptdev->base.dev);
3700 sched->pm.has_ref = false;
3701 }
3702 mutex_unlock(&sched->lock);
3703 }
3704
panthor_sched_fini(struct drm_device * ddev,void * res)3705 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3706 {
3707 struct panthor_scheduler *sched = res;
3708 int prio;
3709
3710 if (!sched || !sched->csg_slot_count)
3711 return;
3712
3713 cancel_delayed_work_sync(&sched->tick_work);
3714
3715 if (sched->wq)
3716 destroy_workqueue(sched->wq);
3717
3718 if (sched->heap_alloc_wq)
3719 destroy_workqueue(sched->heap_alloc_wq);
3720
3721 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3722 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3723 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3724 }
3725
3726 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3727 }
3728
panthor_sched_init(struct panthor_device * ptdev)3729 int panthor_sched_init(struct panthor_device *ptdev)
3730 {
3731 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3732 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3733 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3734 struct panthor_scheduler *sched;
3735 u32 gpu_as_count, num_groups;
3736 int prio, ret;
3737
3738 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3739 if (!sched)
3740 return -ENOMEM;
3741
3742 /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3743 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3744 */
3745 num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3746
3747 /* The FW-side scheduler might deadlock if two groups with the same
3748 * priority try to access a set of resources that overlaps, with part
3749 * of the resources being allocated to one group and the other part to
3750 * the other group, both groups waiting for the remaining resources to
3751 * be allocated. To avoid that, it is recommended to assign each CSG a
3752 * different priority. In theory we could allow several groups to have
3753 * the same CSG priority if they don't request the same resources, but
3754 * that makes the scheduling logic more complicated, so let's clamp
3755 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3756 */
3757 num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3758
3759 /* We need at least one AS for the MCU and one for the GPU contexts. */
3760 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3761 if (!gpu_as_count) {
3762 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3763 gpu_as_count + 1);
3764 return -EINVAL;
3765 }
3766
3767 sched->ptdev = ptdev;
3768 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3769 sched->csg_slot_count = num_groups;
3770 sched->cs_slot_count = csg_iface->control->stream_num;
3771 sched->as_slot_count = gpu_as_count;
3772 ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3773 ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3774 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3775
3776 sched->last_tick = 0;
3777 sched->resched_target = U64_MAX;
3778 sched->tick_period = msecs_to_jiffies(10);
3779 INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3780 INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3781 INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3782
3783 ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3784 if (ret)
3785 return ret;
3786
3787 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3788 INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3789 INIT_LIST_HEAD(&sched->groups.idle[prio]);
3790 }
3791 INIT_LIST_HEAD(&sched->groups.waiting);
3792
3793 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3794 if (ret)
3795 return ret;
3796
3797 INIT_LIST_HEAD(&sched->reset.stopped_groups);
3798
3799 /* sched->heap_alloc_wq will be used for heap chunk allocation on
3800 * tiler OOM events, which means we can't use the same workqueue for
3801 * the scheduler because works queued by the scheduler are in
3802 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3803 * work around this limitation.
3804 *
3805 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3806 * allocation path that we can call when a heap OOM is reported. The
3807 * FW is smart enough to fall back on other methods if the kernel can't
3808 * allocate memory, and fail the tiling job if none of these
3809 * countermeasures worked.
3810 *
3811 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3812 * system is running out of memory.
3813 */
3814 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
3815 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
3816 if (!sched->wq || !sched->heap_alloc_wq) {
3817 panthor_sched_fini(&ptdev->base, sched);
3818 drm_err(&ptdev->base, "Failed to allocate the workqueues");
3819 return -ENOMEM;
3820 }
3821
3822 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
3823 if (ret)
3824 return ret;
3825
3826 ptdev->scheduler = sched;
3827 return 0;
3828 }
3829