• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 
3 #ifndef NOUVEAU_SCHED_H
4 #define NOUVEAU_SCHED_H
5 
6 #include <linux/types.h>
7 
8 #include <drm/drm_exec.h>
9 #include <drm/gpu_scheduler.h>
10 
11 #include "nouveau_drv.h"
12 
13 #define to_nouveau_job(sched_job)		\
14 		container_of((sched_job), struct nouveau_job, base)
15 
16 struct nouveau_job_ops;
17 
18 enum nouveau_job_state {
19 	NOUVEAU_JOB_UNINITIALIZED = 0,
20 	NOUVEAU_JOB_INITIALIZED,
21 	NOUVEAU_JOB_SUBMIT_SUCCESS,
22 	NOUVEAU_JOB_SUBMIT_FAILED,
23 	NOUVEAU_JOB_RUN_SUCCESS,
24 	NOUVEAU_JOB_RUN_FAILED,
25 };
26 
27 struct nouveau_job_args {
28 	struct drm_file *file_priv;
29 	struct nouveau_sched_entity *sched_entity;
30 
31 	enum dma_resv_usage resv_usage;
32 	bool sync;
33 
34 	struct {
35 		struct drm_nouveau_sync *s;
36 		u32 count;
37 	} in_sync;
38 
39 	struct {
40 		struct drm_nouveau_sync *s;
41 		u32 count;
42 	} out_sync;
43 
44 	struct nouveau_job_ops *ops;
45 };
46 
47 struct nouveau_job {
48 	struct drm_sched_job base;
49 
50 	enum nouveau_job_state state;
51 
52 	struct nouveau_sched_entity *entity;
53 
54 	struct drm_file *file_priv;
55 	struct nouveau_cli *cli;
56 
57 	struct drm_exec exec;
58 	enum dma_resv_usage resv_usage;
59 	struct dma_fence *done_fence;
60 
61 	bool sync;
62 
63 	struct {
64 		struct drm_nouveau_sync *data;
65 		u32 count;
66 	} in_sync;
67 
68 	struct {
69 		struct drm_nouveau_sync *data;
70 		struct drm_syncobj **objs;
71 		struct dma_fence_chain **chains;
72 		u32 count;
73 	} out_sync;
74 
75 	struct nouveau_job_ops {
76 		/* If .submit() returns without any error, it is guaranteed that
77 		 * armed_submit() is called.
78 		 */
79 		int (*submit)(struct nouveau_job *);
80 		void (*armed_submit)(struct nouveau_job *);
81 		struct dma_fence *(*run)(struct nouveau_job *);
82 		void (*free)(struct nouveau_job *);
83 		enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
84 	} *ops;
85 };
86 
87 int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
88 			    u32 inc, u64 ins,
89 			    u32 outc, u64 outs);
90 
91 int nouveau_job_init(struct nouveau_job *job,
92 		     struct nouveau_job_args *args);
93 void nouveau_job_free(struct nouveau_job *job);
94 
95 int nouveau_job_submit(struct nouveau_job *job);
96 void nouveau_job_fini(struct nouveau_job *job);
97 
98 #define to_nouveau_sched_entity(entity)		\
99 		container_of((entity), struct nouveau_sched_entity, base)
100 
101 struct nouveau_sched_entity {
102 	struct drm_sched_entity base;
103 	struct mutex mutex;
104 
105 	struct workqueue_struct *sched_wq;
106 
107 	struct {
108 		struct {
109 			struct list_head head;
110 			spinlock_t lock;
111 		} list;
112 		struct wait_queue_head wq;
113 	} job;
114 };
115 
116 int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
117 			      struct drm_gpu_scheduler *sched,
118 			      struct workqueue_struct *sched_wq);
119 void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
120 
121 bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
122 				struct work_struct *work);
123 
124 int nouveau_sched_init(struct nouveau_drm *drm);
125 void nouveau_sched_fini(struct nouveau_drm *drm);
126 
127 #endif
128