• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/wait.h>
29 
30 #include <drm/gpu_scheduler.h>
31 
32 static struct kmem_cache *sched_fence_slab;
33 
drm_sched_fence_slab_init(void)34 static int __init drm_sched_fence_slab_init(void)
35 {
36 	sched_fence_slab = kmem_cache_create(
37 		"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
38 		SLAB_HWCACHE_ALIGN, NULL);
39 	if (!sched_fence_slab)
40 		return -ENOMEM;
41 
42 	return 0;
43 }
44 
drm_sched_fence_slab_fini(void)45 static void __exit drm_sched_fence_slab_fini(void)
46 {
47 	rcu_barrier();
48 	kmem_cache_destroy(sched_fence_slab);
49 }
50 
drm_sched_fence_set_parent(struct drm_sched_fence * s_fence,struct dma_fence * fence)51 static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
52 				       struct dma_fence *fence)
53 {
54 	/*
55 	 * smp_store_release() to ensure another thread racing us
56 	 * in drm_sched_fence_set_deadline_finished() sees the
57 	 * fence's parent set before test_bit()
58 	 */
59 	smp_store_release(&s_fence->parent, dma_fence_get(fence));
60 	if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
61 		     &s_fence->finished.flags))
62 		dma_fence_set_deadline(fence, s_fence->deadline);
63 }
64 
drm_sched_fence_scheduled(struct drm_sched_fence * fence,struct dma_fence * parent)65 void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
66 			       struct dma_fence *parent)
67 {
68 	/* Set the parent before signaling the scheduled fence, such that,
69 	 * any waiter expecting the parent to be filled after the job has
70 	 * been scheduled (which is the case for drivers delegating waits
71 	 * to some firmware) doesn't have to busy wait for parent to show
72 	 * up.
73 	 */
74 	if (!IS_ERR_OR_NULL(parent))
75 		drm_sched_fence_set_parent(fence, parent);
76 
77 	dma_fence_signal(&fence->scheduled);
78 }
79 
drm_sched_fence_finished(struct drm_sched_fence * fence,int result)80 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
81 {
82 	if (result)
83 		dma_fence_set_error(&fence->finished, result);
84 	dma_fence_signal(&fence->finished);
85 }
86 
drm_sched_fence_get_driver_name(struct dma_fence * fence)87 static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
88 {
89 	return "drm_sched";
90 }
91 
drm_sched_fence_get_timeline_name(struct dma_fence * f)92 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
93 {
94 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
95 	return (const char *)fence->sched->name;
96 }
97 
drm_sched_fence_free_rcu(struct rcu_head * rcu)98 static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
99 {
100 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
101 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
102 
103 	if (!WARN_ON_ONCE(!fence))
104 		kmem_cache_free(sched_fence_slab, fence);
105 }
106 
107 /**
108  * drm_sched_fence_free - free up an uninitialized fence
109  *
110  * @fence: fence to free
111  *
112  * Free up the fence memory. Should only be used if drm_sched_fence_init()
113  * has not been called yet.
114  */
drm_sched_fence_free(struct drm_sched_fence * fence)115 void drm_sched_fence_free(struct drm_sched_fence *fence)
116 {
117 	/* This function should not be called if the fence has been initialized. */
118 	if (!WARN_ON_ONCE(fence->sched))
119 		kmem_cache_free(sched_fence_slab, fence);
120 }
121 
122 /**
123  * drm_sched_fence_release_scheduled - callback that fence can be freed
124  *
125  * @f: fence
126  *
127  * This function is called when the reference count becomes zero.
128  * It just RCU schedules freeing up the fence.
129  */
drm_sched_fence_release_scheduled(struct dma_fence * f)130 static void drm_sched_fence_release_scheduled(struct dma_fence *f)
131 {
132 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
133 
134 	dma_fence_put(fence->parent);
135 	call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
136 }
137 
138 /**
139  * drm_sched_fence_release_finished - drop extra reference
140  *
141  * @f: fence
142  *
143  * Drop the extra reference from the scheduled fence to the base fence.
144  */
drm_sched_fence_release_finished(struct dma_fence * f)145 static void drm_sched_fence_release_finished(struct dma_fence *f)
146 {
147 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
148 
149 	dma_fence_put(&fence->scheduled);
150 }
151 
drm_sched_fence_set_deadline_finished(struct dma_fence * f,ktime_t deadline)152 static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
153 						  ktime_t deadline)
154 {
155 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
156 	struct dma_fence *parent;
157 	unsigned long flags;
158 
159 	spin_lock_irqsave(&fence->lock, flags);
160 
161 	/* If we already have an earlier deadline, keep it: */
162 	if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
163 	    ktime_before(fence->deadline, deadline)) {
164 		spin_unlock_irqrestore(&fence->lock, flags);
165 		return;
166 	}
167 
168 	fence->deadline = deadline;
169 	set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
170 
171 	spin_unlock_irqrestore(&fence->lock, flags);
172 
173 	/*
174 	 * smp_load_aquire() to ensure that if we are racing another
175 	 * thread calling drm_sched_fence_set_parent(), that we see
176 	 * the parent set before it calls test_bit(HAS_DEADLINE_BIT)
177 	 */
178 	parent = smp_load_acquire(&fence->parent);
179 	if (parent)
180 		dma_fence_set_deadline(parent, deadline);
181 }
182 
183 static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
184 	.get_driver_name = drm_sched_fence_get_driver_name,
185 	.get_timeline_name = drm_sched_fence_get_timeline_name,
186 	.release = drm_sched_fence_release_scheduled,
187 };
188 
189 static const struct dma_fence_ops drm_sched_fence_ops_finished = {
190 	.get_driver_name = drm_sched_fence_get_driver_name,
191 	.get_timeline_name = drm_sched_fence_get_timeline_name,
192 	.release = drm_sched_fence_release_finished,
193 	.set_deadline = drm_sched_fence_set_deadline_finished,
194 };
195 
to_drm_sched_fence(struct dma_fence * f)196 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
197 {
198 	if (f->ops == &drm_sched_fence_ops_scheduled)
199 		return container_of(f, struct drm_sched_fence, scheduled);
200 
201 	if (f->ops == &drm_sched_fence_ops_finished)
202 		return container_of(f, struct drm_sched_fence, finished);
203 
204 	return NULL;
205 }
206 EXPORT_SYMBOL(to_drm_sched_fence);
207 
drm_sched_fence_alloc(struct drm_sched_entity * entity,void * owner)208 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
209 					      void *owner)
210 {
211 	struct drm_sched_fence *fence = NULL;
212 
213 	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
214 	if (fence == NULL)
215 		return NULL;
216 
217 	fence->owner = owner;
218 	spin_lock_init(&fence->lock);
219 
220 	return fence;
221 }
222 
drm_sched_fence_init(struct drm_sched_fence * fence,struct drm_sched_entity * entity)223 void drm_sched_fence_init(struct drm_sched_fence *fence,
224 			  struct drm_sched_entity *entity)
225 {
226 	unsigned seq;
227 
228 	fence->sched = entity->rq->sched;
229 	seq = atomic_inc_return(&entity->fence_seq);
230 	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
231 		       &fence->lock, entity->fence_context, seq);
232 	dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
233 		       &fence->lock, entity->fence_context + 1, seq);
234 }
235 
236 module_init(drm_sched_fence_slab_init);
237 module_exit(drm_sched_fence_slab_fini);
238 
239 MODULE_DESCRIPTION("DRM GPU scheduler");
240 MODULE_LICENSE("GPL and additional rights");
241