• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24 
25 #include <drm/drm_auth.h>
26 #include "amdgpu.h"
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29 #include <linux/nospec.h>
30 
31 #define to_amdgpu_ctx_entity(e)	\
32 	container_of((e), struct amdgpu_ctx_entity, entity)
33 
34 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
35 	[AMDGPU_HW_IP_GFX]	=	1,
36 	[AMDGPU_HW_IP_COMPUTE]	=	4,
37 	[AMDGPU_HW_IP_DMA]	=	2,
38 	[AMDGPU_HW_IP_UVD]	=	1,
39 	[AMDGPU_HW_IP_VCE]	=	1,
40 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
41 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
42 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
43 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
44 };
45 
amdgpu_ctx_priority_permit(struct drm_file * filp,enum drm_sched_priority priority)46 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
47 				      enum drm_sched_priority priority)
48 {
49 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
50 		return -EINVAL;
51 
52 	/* NORMAL and below are accessible by everyone */
53 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
54 		return 0;
55 
56 	if (capable(CAP_SYS_NICE))
57 		return 0;
58 
59 	if (drm_is_current_master(filp))
60 		return 0;
61 
62 	return -EACCES;
63 }
64 
amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)65 static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
66 {
67 	switch (prio) {
68 	case DRM_SCHED_PRIORITY_HIGH:
69 	case DRM_SCHED_PRIORITY_KERNEL:
70 		return AMDGPU_GFX_PIPE_PRIO_HIGH;
71 	default:
72 		return AMDGPU_GFX_PIPE_PRIO_NORMAL;
73 	}
74 }
75 
amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device * adev,enum drm_sched_priority prio,u32 hw_ip)76 static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
77 						 enum drm_sched_priority prio,
78 						 u32 hw_ip)
79 {
80 	unsigned int hw_prio;
81 
82 	hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
83 			amdgpu_ctx_sched_prio_to_compute_prio(prio) :
84 			AMDGPU_RING_PRIO_DEFAULT;
85 	hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
86 	if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
87 		hw_prio = AMDGPU_RING_PRIO_DEFAULT;
88 
89 	return hw_prio;
90 }
91 
amdgpu_ctx_init_entity(struct amdgpu_ctx * ctx,u32 hw_ip,const u32 ring)92 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
93 				   const u32 ring)
94 {
95 	struct amdgpu_device *adev = ctx->adev;
96 	struct amdgpu_ctx_entity *entity;
97 	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
98 	unsigned num_scheds = 0;
99 	unsigned int hw_prio;
100 	enum drm_sched_priority priority;
101 	int r;
102 
103 	entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
104 			 GFP_KERNEL);
105 	if (!entity)
106 		return  -ENOMEM;
107 
108 	entity->sequence = 1;
109 	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
110 				ctx->init_priority : ctx->override_priority;
111 	hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
112 
113 	hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
114 	scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
115 	num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
116 
117 	/* disable load balance if the hw engine retains context among dependent jobs */
118 	if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
119 	    hw_ip == AMDGPU_HW_IP_VCN_DEC ||
120 	    hw_ip == AMDGPU_HW_IP_UVD_ENC ||
121 	    hw_ip == AMDGPU_HW_IP_UVD) {
122 		sched = drm_sched_pick_best(scheds, num_scheds);
123 		scheds = &sched;
124 		num_scheds = 1;
125 	}
126 
127 	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
128 				  &ctx->guilty);
129 	if (r)
130 		goto error_free_entity;
131 
132 	ctx->entities[hw_ip][ring] = entity;
133 	return 0;
134 
135 error_free_entity:
136 	kfree(entity);
137 
138 	return r;
139 }
140 
amdgpu_ctx_init(struct amdgpu_device * adev,enum drm_sched_priority priority,struct drm_file * filp,struct amdgpu_ctx * ctx)141 static int amdgpu_ctx_init(struct amdgpu_device *adev,
142 			   enum drm_sched_priority priority,
143 			   struct drm_file *filp,
144 			   struct amdgpu_ctx *ctx)
145 {
146 	int r;
147 
148 	r = amdgpu_ctx_priority_permit(filp, priority);
149 	if (r)
150 		return r;
151 
152 	memset(ctx, 0, sizeof(*ctx));
153 
154 	ctx->adev = adev;
155 
156 	kref_init(&ctx->refcount);
157 	spin_lock_init(&ctx->ring_lock);
158 	mutex_init(&ctx->lock);
159 
160 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
161 	ctx->reset_counter_query = ctx->reset_counter;
162 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
163 	ctx->init_priority = priority;
164 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
165 
166 	return 0;
167 }
168 
amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity * entity)169 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
170 {
171 
172 	int i;
173 
174 	if (!entity)
175 		return;
176 
177 	for (i = 0; i < amdgpu_sched_jobs; ++i)
178 		dma_fence_put(entity->fences[i]);
179 
180 	kfree(entity);
181 }
182 
amdgpu_ctx_fini(struct kref * ref)183 static void amdgpu_ctx_fini(struct kref *ref)
184 {
185 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
186 	struct amdgpu_device *adev = ctx->adev;
187 	unsigned i, j;
188 
189 	if (!adev)
190 		return;
191 
192 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
193 		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
194 			amdgpu_ctx_fini_entity(ctx->entities[i][j]);
195 			ctx->entities[i][j] = NULL;
196 		}
197 	}
198 
199 	mutex_destroy(&ctx->lock);
200 	kfree(ctx);
201 }
202 
amdgpu_ctx_get_entity(struct amdgpu_ctx * ctx,u32 hw_ip,u32 instance,u32 ring,struct drm_sched_entity ** entity)203 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
204 			  u32 ring, struct drm_sched_entity **entity)
205 {
206 	int r;
207 
208 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
209 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
210 		return -EINVAL;
211 	}
212 
213 	/* Right now all IPs have only one instance - multiple rings. */
214 	if (instance != 0) {
215 		DRM_DEBUG("invalid ip instance: %d\n", instance);
216 		return -EINVAL;
217 	}
218 
219 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
220 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
221 		return -EINVAL;
222 	}
223 
224 	if (ctx->entities[hw_ip][ring] == NULL) {
225 		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
226 		if (r)
227 			return r;
228 	}
229 
230 	*entity = &ctx->entities[hw_ip][ring]->entity;
231 	return 0;
232 }
233 
amdgpu_ctx_alloc(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * filp,enum drm_sched_priority priority,uint32_t * id)234 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
235 			    struct amdgpu_fpriv *fpriv,
236 			    struct drm_file *filp,
237 			    enum drm_sched_priority priority,
238 			    uint32_t *id)
239 {
240 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
241 	struct amdgpu_ctx *ctx;
242 	int r;
243 
244 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
245 	if (!ctx)
246 		return -ENOMEM;
247 
248 	mutex_lock(&mgr->lock);
249 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
250 	if (r < 0) {
251 		mutex_unlock(&mgr->lock);
252 		kfree(ctx);
253 		return r;
254 	}
255 
256 	*id = (uint32_t)r;
257 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
258 	if (r) {
259 		idr_remove(&mgr->ctx_handles, *id);
260 		*id = 0;
261 		kfree(ctx);
262 	}
263 	mutex_unlock(&mgr->lock);
264 	return r;
265 }
266 
amdgpu_ctx_do_release(struct kref * ref)267 static void amdgpu_ctx_do_release(struct kref *ref)
268 {
269 	struct amdgpu_ctx *ctx;
270 	u32 i, j;
271 
272 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
273 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
274 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
275 			if (!ctx->entities[i][j])
276 				continue;
277 
278 			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
279 		}
280 	}
281 
282 	amdgpu_ctx_fini(ref);
283 }
284 
amdgpu_ctx_free(struct amdgpu_fpriv * fpriv,uint32_t id)285 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
286 {
287 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
288 	struct amdgpu_ctx *ctx;
289 
290 	mutex_lock(&mgr->lock);
291 	ctx = idr_remove(&mgr->ctx_handles, id);
292 	if (ctx)
293 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
294 	mutex_unlock(&mgr->lock);
295 	return ctx ? 0 : -EINVAL;
296 }
297 
amdgpu_ctx_query(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)298 static int amdgpu_ctx_query(struct amdgpu_device *adev,
299 			    struct amdgpu_fpriv *fpriv, uint32_t id,
300 			    union drm_amdgpu_ctx_out *out)
301 {
302 	struct amdgpu_ctx *ctx;
303 	struct amdgpu_ctx_mgr *mgr;
304 	unsigned reset_counter;
305 
306 	if (!fpriv)
307 		return -EINVAL;
308 
309 	mgr = &fpriv->ctx_mgr;
310 	mutex_lock(&mgr->lock);
311 	ctx = idr_find(&mgr->ctx_handles, id);
312 	if (!ctx) {
313 		mutex_unlock(&mgr->lock);
314 		return -EINVAL;
315 	}
316 
317 	/* TODO: these two are always zero */
318 	out->state.flags = 0x0;
319 	out->state.hangs = 0x0;
320 
321 	/* determine if a GPU reset has occured since the last call */
322 	reset_counter = atomic_read(&adev->gpu_reset_counter);
323 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
324 	if (ctx->reset_counter_query == reset_counter)
325 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
326 	else
327 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
328 	ctx->reset_counter_query = reset_counter;
329 
330 	mutex_unlock(&mgr->lock);
331 	return 0;
332 }
333 
amdgpu_ctx_query2(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)334 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
335 	struct amdgpu_fpriv *fpriv, uint32_t id,
336 	union drm_amdgpu_ctx_out *out)
337 {
338 	struct amdgpu_ctx *ctx;
339 	struct amdgpu_ctx_mgr *mgr;
340 
341 	if (!fpriv)
342 		return -EINVAL;
343 
344 	mgr = &fpriv->ctx_mgr;
345 	mutex_lock(&mgr->lock);
346 	ctx = idr_find(&mgr->ctx_handles, id);
347 	if (!ctx) {
348 		mutex_unlock(&mgr->lock);
349 		return -EINVAL;
350 	}
351 
352 	out->state.flags = 0x0;
353 	out->state.hangs = 0x0;
354 
355 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
356 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
357 
358 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
359 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
360 
361 	if (atomic_read(&ctx->guilty))
362 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
363 
364 	mutex_unlock(&mgr->lock);
365 	return 0;
366 }
367 
amdgpu_ctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)368 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
369 		     struct drm_file *filp)
370 {
371 	int r;
372 	uint32_t id;
373 	enum drm_sched_priority priority;
374 
375 	union drm_amdgpu_ctx *args = data;
376 	struct amdgpu_device *adev = drm_to_adev(dev);
377 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
378 
379 	id = args->in.ctx_id;
380 	r = amdgpu_to_sched_priority(args->in.priority, &priority);
381 
382 	/* For backwards compatibility reasons, we need to accept
383 	 * ioctls with garbage in the priority field */
384 	if (r == -EINVAL)
385 		priority = DRM_SCHED_PRIORITY_NORMAL;
386 
387 	switch (args->in.op) {
388 	case AMDGPU_CTX_OP_ALLOC_CTX:
389 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
390 		args->out.alloc.ctx_id = id;
391 		break;
392 	case AMDGPU_CTX_OP_FREE_CTX:
393 		r = amdgpu_ctx_free(fpriv, id);
394 		break;
395 	case AMDGPU_CTX_OP_QUERY_STATE:
396 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
397 		break;
398 	case AMDGPU_CTX_OP_QUERY_STATE2:
399 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
400 		break;
401 	default:
402 		return -EINVAL;
403 	}
404 
405 	return r;
406 }
407 
amdgpu_ctx_get(struct amdgpu_fpriv * fpriv,uint32_t id)408 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
409 {
410 	struct amdgpu_ctx *ctx;
411 	struct amdgpu_ctx_mgr *mgr;
412 
413 	if (!fpriv)
414 		return NULL;
415 
416 	mgr = &fpriv->ctx_mgr;
417 
418 	mutex_lock(&mgr->lock);
419 	ctx = idr_find(&mgr->ctx_handles, id);
420 	if (ctx)
421 		kref_get(&ctx->refcount);
422 	mutex_unlock(&mgr->lock);
423 	return ctx;
424 }
425 
amdgpu_ctx_put(struct amdgpu_ctx * ctx)426 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
427 {
428 	if (ctx == NULL)
429 		return -EINVAL;
430 
431 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
432 	return 0;
433 }
434 
amdgpu_ctx_add_fence(struct amdgpu_ctx * ctx,struct drm_sched_entity * entity,struct dma_fence * fence,uint64_t * handle)435 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
436 			  struct drm_sched_entity *entity,
437 			  struct dma_fence *fence, uint64_t* handle)
438 {
439 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
440 	uint64_t seq = centity->sequence;
441 	struct dma_fence *other = NULL;
442 	unsigned idx = 0;
443 
444 	idx = seq & (amdgpu_sched_jobs - 1);
445 	other = centity->fences[idx];
446 	if (other)
447 		BUG_ON(!dma_fence_is_signaled(other));
448 
449 	dma_fence_get(fence);
450 
451 	spin_lock(&ctx->ring_lock);
452 	centity->fences[idx] = fence;
453 	centity->sequence++;
454 	spin_unlock(&ctx->ring_lock);
455 
456 	dma_fence_put(other);
457 	if (handle)
458 		*handle = seq;
459 }
460 
amdgpu_ctx_get_fence(struct amdgpu_ctx * ctx,struct drm_sched_entity * entity,uint64_t seq)461 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
462 				       struct drm_sched_entity *entity,
463 				       uint64_t seq)
464 {
465 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
466 	struct dma_fence *fence;
467 
468 	spin_lock(&ctx->ring_lock);
469 
470 	if (seq == ~0ull)
471 		seq = centity->sequence - 1;
472 
473 	if (seq >= centity->sequence) {
474 		spin_unlock(&ctx->ring_lock);
475 		return ERR_PTR(-EINVAL);
476 	}
477 
478 
479 	if (seq + amdgpu_sched_jobs < centity->sequence) {
480 		spin_unlock(&ctx->ring_lock);
481 		return NULL;
482 	}
483 
484 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
485 	spin_unlock(&ctx->ring_lock);
486 
487 	return fence;
488 }
489 
amdgpu_ctx_set_entity_priority(struct amdgpu_ctx * ctx,struct amdgpu_ctx_entity * aentity,int hw_ip,enum drm_sched_priority priority)490 static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
491 					    struct amdgpu_ctx_entity *aentity,
492 					    int hw_ip,
493 					    enum drm_sched_priority priority)
494 {
495 	struct amdgpu_device *adev = ctx->adev;
496 	unsigned int hw_prio;
497 	struct drm_gpu_scheduler **scheds = NULL;
498 	unsigned num_scheds;
499 
500 	/* set sw priority */
501 	drm_sched_entity_set_priority(&aentity->entity, priority);
502 
503 	/* set hw priority */
504 	if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
505 		hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
506 						      AMDGPU_HW_IP_COMPUTE);
507 		hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
508 		scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
509 		num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
510 		drm_sched_entity_modify_sched(&aentity->entity, scheds,
511 					      num_scheds);
512 	}
513 }
514 
amdgpu_ctx_priority_override(struct amdgpu_ctx * ctx,enum drm_sched_priority priority)515 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
516 				  enum drm_sched_priority priority)
517 {
518 	enum drm_sched_priority ctx_prio;
519 	unsigned i, j;
520 
521 	ctx->override_priority = priority;
522 
523 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
524 			ctx->init_priority : ctx->override_priority;
525 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
526 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
527 			if (!ctx->entities[i][j])
528 				continue;
529 
530 			amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
531 						       i, ctx_prio);
532 		}
533 	}
534 }
535 
amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx * ctx,struct drm_sched_entity * entity)536 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
537 			       struct drm_sched_entity *entity)
538 {
539 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
540 	struct dma_fence *other;
541 	unsigned idx;
542 	long r;
543 
544 	spin_lock(&ctx->ring_lock);
545 	idx = centity->sequence & (amdgpu_sched_jobs - 1);
546 	other = dma_fence_get(centity->fences[idx]);
547 	spin_unlock(&ctx->ring_lock);
548 
549 	if (!other)
550 		return 0;
551 
552 	r = dma_fence_wait(other, true);
553 	if (r < 0 && r != -ERESTARTSYS)
554 		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
555 
556 	dma_fence_put(other);
557 	return r;
558 }
559 
amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr * mgr)560 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
561 {
562 	mutex_init(&mgr->lock);
563 	idr_init(&mgr->ctx_handles);
564 }
565 
amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr * mgr,long timeout)566 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
567 {
568 	struct amdgpu_ctx *ctx;
569 	struct idr *idp;
570 	uint32_t id, i, j;
571 
572 	idp = &mgr->ctx_handles;
573 
574 	mutex_lock(&mgr->lock);
575 	idr_for_each_entry(idp, ctx, id) {
576 		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
577 			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
578 				struct drm_sched_entity *entity;
579 
580 				if (!ctx->entities[i][j])
581 					continue;
582 
583 				entity = &ctx->entities[i][j]->entity;
584 				timeout = drm_sched_entity_flush(entity, timeout);
585 			}
586 		}
587 	}
588 	mutex_unlock(&mgr->lock);
589 	return timeout;
590 }
591 
amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr * mgr)592 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
593 {
594 	struct amdgpu_ctx *ctx;
595 	struct idr *idp;
596 	uint32_t id, i, j;
597 
598 	idp = &mgr->ctx_handles;
599 
600 	idr_for_each_entry(idp, ctx, id) {
601 		if (kref_read(&ctx->refcount) != 1) {
602 			DRM_ERROR("ctx %p is still alive\n", ctx);
603 			continue;
604 		}
605 
606 		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
607 			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
608 				struct drm_sched_entity *entity;
609 
610 				if (!ctx->entities[i][j])
611 					continue;
612 
613 				entity = &ctx->entities[i][j]->entity;
614 				drm_sched_entity_fini(entity);
615 			}
616 		}
617 	}
618 }
619 
amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr * mgr)620 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
621 {
622 	struct amdgpu_ctx *ctx;
623 	struct idr *idp;
624 	uint32_t id;
625 
626 	amdgpu_ctx_mgr_entity_fini(mgr);
627 
628 	idp = &mgr->ctx_handles;
629 
630 	idr_for_each_entry(idp, ctx, id) {
631 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
632 			DRM_ERROR("ctx %p is still alive\n", ctx);
633 	}
634 
635 	idr_destroy(&mgr->ctx_handles);
636 	mutex_destroy(&mgr->lock);
637 }
638