1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40 }
41
amdgpu_mes_kernel_doorbell_get(struct amdgpu_device * adev,int ip_type,uint64_t * doorbell_index)42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 int ip_type, uint64_t *doorbell_index)
44 {
45 unsigned int offset, found;
46 struct amdgpu_mes *mes = &adev->mes;
47
48 if (ip_type == AMDGPU_RING_TYPE_SDMA)
49 offset = adev->doorbell_index.sdma_engine[0];
50 else
51 offset = 0;
52
53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
54 if (found >= mes->num_mes_dbs) {
55 DRM_WARN("No doorbell available\n");
56 return -ENOSPC;
57 }
58
59 set_bit(found, mes->doorbell_bitmap);
60
61 /* Get the absolute doorbell index on BAR */
62 *doorbell_index = mes->db_start_dw_offset + found * 2;
63 return 0;
64 }
65
amdgpu_mes_kernel_doorbell_free(struct amdgpu_device * adev,uint32_t doorbell_index)66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
67 uint32_t doorbell_index)
68 {
69 unsigned int old, rel_index;
70 struct amdgpu_mes *mes = &adev->mes;
71
72 /* Find the relative index of the doorbell in this object */
73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
75 WARN_ON(!old);
76 }
77
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
79 {
80 int i;
81 struct amdgpu_mes *mes = &adev->mes;
82
83 /* Bitmap for dynamic allocation of kernel doorbells */
84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
85 if (!mes->doorbell_bitmap) {
86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
87 return -ENOMEM;
88 }
89
90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
93 set_bit(i, mes->doorbell_bitmap);
94 }
95
96 return 0;
97 }
98
amdgpu_mes_event_log_init(struct amdgpu_device * adev)99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
100 {
101 int r;
102
103 if (!amdgpu_mes_log_enable)
104 return 0;
105
106 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
107 AMDGPU_GEM_DOMAIN_GTT,
108 &adev->mes.event_log_gpu_obj,
109 &adev->mes.event_log_gpu_addr,
110 &adev->mes.event_log_cpu_addr);
111 if (r) {
112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
113 return r;
114 }
115
116 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
117
118 return 0;
119
120 }
121
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
123 {
124 bitmap_free(adev->mes.doorbell_bitmap);
125 }
126
amdgpu_mes_init(struct amdgpu_device * adev)127 int amdgpu_mes_init(struct amdgpu_device *adev)
128 {
129 int i, r;
130
131 adev->mes.adev = adev;
132
133 idr_init(&adev->mes.pasid_idr);
134 idr_init(&adev->mes.gang_id_idr);
135 idr_init(&adev->mes.queue_id_idr);
136 ida_init(&adev->mes.doorbell_ida);
137 spin_lock_init(&adev->mes.queue_id_lock);
138 mutex_init(&adev->mes.mutex_hidden);
139
140 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
141 spin_lock_init(&adev->mes.ring_lock[i]);
142
143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
144 adev->mes.vmid_mask_mmhub = 0xffffff00;
145 adev->mes.vmid_mask_gfxhub = 0xffffff00;
146
147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
148 /* use only 1st MEC pipes */
149 if (i >= adev->gfx.mec.num_pipe_per_mec)
150 continue;
151 adev->mes.compute_hqd_mask[i] = 0xc;
152 }
153
154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
156
157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
159 IP_VERSION(6, 0, 0))
160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
161 /* zero sdma_hqd_mask for non-existent engine */
162 else if (adev->sdma.num_instances == 1)
163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
164 else
165 adev->mes.sdma_hqd_mask[i] = 0xfc;
166 }
167
168 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
169 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
170 if (r) {
171 dev_err(adev->dev,
172 "(%d) ring trail_fence_offs wb alloc failed\n",
173 r);
174 goto error;
175 }
176 adev->mes.sch_ctx_gpu_addr[i] =
177 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
178 adev->mes.sch_ctx_ptr[i] =
179 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
180
181 r = amdgpu_device_wb_get(adev,
182 &adev->mes.query_status_fence_offs[i]);
183 if (r) {
184 dev_err(adev->dev,
185 "(%d) query_status_fence_offs wb alloc failed\n",
186 r);
187 goto error;
188 }
189 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
190 (adev->mes.query_status_fence_offs[i] * 4);
191 adev->mes.query_status_fence_ptr[i] =
192 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
193 }
194
195 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
196 if (r) {
197 dev_err(adev->dev,
198 "(%d) read_val_offs alloc failed\n", r);
199 goto error;
200 }
201 adev->mes.read_val_gpu_addr =
202 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
203 adev->mes.read_val_ptr =
204 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
205
206 r = amdgpu_mes_doorbell_init(adev);
207 if (r)
208 goto error;
209
210 r = amdgpu_mes_event_log_init(adev);
211 if (r)
212 goto error_doorbell;
213
214 return 0;
215
216 error_doorbell:
217 amdgpu_mes_doorbell_free(adev);
218 error:
219 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
220 if (adev->mes.sch_ctx_ptr[i])
221 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
222 if (adev->mes.query_status_fence_ptr[i])
223 amdgpu_device_wb_free(adev,
224 adev->mes.query_status_fence_offs[i]);
225 }
226 if (adev->mes.read_val_ptr)
227 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
228
229 idr_destroy(&adev->mes.pasid_idr);
230 idr_destroy(&adev->mes.gang_id_idr);
231 idr_destroy(&adev->mes.queue_id_idr);
232 ida_destroy(&adev->mes.doorbell_ida);
233 mutex_destroy(&adev->mes.mutex_hidden);
234 return r;
235 }
236
amdgpu_mes_fini(struct amdgpu_device * adev)237 void amdgpu_mes_fini(struct amdgpu_device *adev)
238 {
239 int i;
240
241 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
242 &adev->mes.event_log_gpu_addr,
243 &adev->mes.event_log_cpu_addr);
244
245 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
246 if (adev->mes.sch_ctx_ptr[i])
247 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
248 if (adev->mes.query_status_fence_ptr[i])
249 amdgpu_device_wb_free(adev,
250 adev->mes.query_status_fence_offs[i]);
251 }
252 if (adev->mes.read_val_ptr)
253 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
254
255 amdgpu_mes_doorbell_free(adev);
256
257 idr_destroy(&adev->mes.pasid_idr);
258 idr_destroy(&adev->mes.gang_id_idr);
259 idr_destroy(&adev->mes.queue_id_idr);
260 ida_destroy(&adev->mes.doorbell_ida);
261 mutex_destroy(&adev->mes.mutex_hidden);
262 }
263
amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue * q)264 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
265 {
266 amdgpu_bo_free_kernel(&q->mqd_obj,
267 &q->mqd_gpu_addr,
268 &q->mqd_cpu_ptr);
269 }
270
amdgpu_mes_create_process(struct amdgpu_device * adev,int pasid,struct amdgpu_vm * vm)271 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
272 struct amdgpu_vm *vm)
273 {
274 struct amdgpu_mes_process *process;
275 int r;
276
277 /* allocate the mes process buffer */
278 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
279 if (!process) {
280 DRM_ERROR("no more memory to create mes process\n");
281 return -ENOMEM;
282 }
283
284 /* allocate the process context bo and map it */
285 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
286 AMDGPU_GEM_DOMAIN_GTT,
287 &process->proc_ctx_bo,
288 &process->proc_ctx_gpu_addr,
289 &process->proc_ctx_cpu_ptr);
290 if (r) {
291 DRM_ERROR("failed to allocate process context bo\n");
292 goto clean_up_memory;
293 }
294 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
295
296 /*
297 * Avoid taking any other locks under MES lock to avoid circular
298 * lock dependencies.
299 */
300 amdgpu_mes_lock(&adev->mes);
301
302 /* add the mes process to idr list */
303 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
304 GFP_KERNEL);
305 if (r < 0) {
306 DRM_ERROR("failed to lock pasid=%d\n", pasid);
307 goto clean_up_ctx;
308 }
309
310 INIT_LIST_HEAD(&process->gang_list);
311 process->vm = vm;
312 process->pasid = pasid;
313 process->process_quantum = adev->mes.default_process_quantum;
314 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
315
316 amdgpu_mes_unlock(&adev->mes);
317 return 0;
318
319 clean_up_ctx:
320 amdgpu_mes_unlock(&adev->mes);
321 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
322 &process->proc_ctx_gpu_addr,
323 &process->proc_ctx_cpu_ptr);
324 clean_up_memory:
325 kfree(process);
326 return r;
327 }
328
amdgpu_mes_destroy_process(struct amdgpu_device * adev,int pasid)329 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
330 {
331 struct amdgpu_mes_process *process;
332 struct amdgpu_mes_gang *gang, *tmp1;
333 struct amdgpu_mes_queue *queue, *tmp2;
334 struct mes_remove_queue_input queue_input;
335 unsigned long flags;
336 int r;
337
338 /*
339 * Avoid taking any other locks under MES lock to avoid circular
340 * lock dependencies.
341 */
342 amdgpu_mes_lock(&adev->mes);
343
344 process = idr_find(&adev->mes.pasid_idr, pasid);
345 if (!process) {
346 DRM_WARN("pasid %d doesn't exist\n", pasid);
347 amdgpu_mes_unlock(&adev->mes);
348 return;
349 }
350
351 /* Remove all queues from hardware */
352 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
353 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
354 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
355 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
356 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
357
358 queue_input.doorbell_offset = queue->doorbell_off;
359 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
360
361 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
362 &queue_input);
363 if (r)
364 DRM_WARN("failed to remove hardware queue\n");
365 }
366
367 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
368 }
369
370 idr_remove(&adev->mes.pasid_idr, pasid);
371 amdgpu_mes_unlock(&adev->mes);
372
373 /* free all memory allocated by the process */
374 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
375 /* free all queues in the gang */
376 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
377 amdgpu_mes_queue_free_mqd(queue);
378 list_del(&queue->list);
379 kfree(queue);
380 }
381 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
382 &gang->gang_ctx_gpu_addr,
383 &gang->gang_ctx_cpu_ptr);
384 list_del(&gang->list);
385 kfree(gang);
386
387 }
388 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
389 &process->proc_ctx_gpu_addr,
390 &process->proc_ctx_cpu_ptr);
391 kfree(process);
392 }
393
amdgpu_mes_add_gang(struct amdgpu_device * adev,int pasid,struct amdgpu_mes_gang_properties * gprops,int * gang_id)394 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
395 struct amdgpu_mes_gang_properties *gprops,
396 int *gang_id)
397 {
398 struct amdgpu_mes_process *process;
399 struct amdgpu_mes_gang *gang;
400 int r;
401
402 /* allocate the mes gang buffer */
403 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
404 if (!gang) {
405 return -ENOMEM;
406 }
407
408 /* allocate the gang context bo and map it to cpu space */
409 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
410 AMDGPU_GEM_DOMAIN_GTT,
411 &gang->gang_ctx_bo,
412 &gang->gang_ctx_gpu_addr,
413 &gang->gang_ctx_cpu_ptr);
414 if (r) {
415 DRM_ERROR("failed to allocate process context bo\n");
416 goto clean_up_mem;
417 }
418 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
419
420 /*
421 * Avoid taking any other locks under MES lock to avoid circular
422 * lock dependencies.
423 */
424 amdgpu_mes_lock(&adev->mes);
425
426 process = idr_find(&adev->mes.pasid_idr, pasid);
427 if (!process) {
428 DRM_ERROR("pasid %d doesn't exist\n", pasid);
429 r = -EINVAL;
430 goto clean_up_ctx;
431 }
432
433 /* add the mes gang to idr list */
434 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
435 GFP_KERNEL);
436 if (r < 0) {
437 DRM_ERROR("failed to allocate idr for gang\n");
438 goto clean_up_ctx;
439 }
440
441 gang->gang_id = r;
442 *gang_id = r;
443
444 INIT_LIST_HEAD(&gang->queue_list);
445 gang->process = process;
446 gang->priority = gprops->priority;
447 gang->gang_quantum = gprops->gang_quantum ?
448 gprops->gang_quantum : adev->mes.default_gang_quantum;
449 gang->global_priority_level = gprops->global_priority_level;
450 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
451 list_add_tail(&gang->list, &process->gang_list);
452
453 amdgpu_mes_unlock(&adev->mes);
454 return 0;
455
456 clean_up_ctx:
457 amdgpu_mes_unlock(&adev->mes);
458 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
459 &gang->gang_ctx_gpu_addr,
460 &gang->gang_ctx_cpu_ptr);
461 clean_up_mem:
462 kfree(gang);
463 return r;
464 }
465
amdgpu_mes_remove_gang(struct amdgpu_device * adev,int gang_id)466 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
467 {
468 struct amdgpu_mes_gang *gang;
469
470 /*
471 * Avoid taking any other locks under MES lock to avoid circular
472 * lock dependencies.
473 */
474 amdgpu_mes_lock(&adev->mes);
475
476 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
477 if (!gang) {
478 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
479 amdgpu_mes_unlock(&adev->mes);
480 return -EINVAL;
481 }
482
483 if (!list_empty(&gang->queue_list)) {
484 DRM_ERROR("queue list is not empty\n");
485 amdgpu_mes_unlock(&adev->mes);
486 return -EBUSY;
487 }
488
489 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
490 list_del(&gang->list);
491 amdgpu_mes_unlock(&adev->mes);
492
493 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
494 &gang->gang_ctx_gpu_addr,
495 &gang->gang_ctx_cpu_ptr);
496
497 kfree(gang);
498
499 return 0;
500 }
501
amdgpu_mes_suspend(struct amdgpu_device * adev)502 int amdgpu_mes_suspend(struct amdgpu_device *adev)
503 {
504 struct mes_suspend_gang_input input;
505 int r;
506
507 if (!amdgpu_mes_suspend_resume_all_supported(adev))
508 return 0;
509
510 memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
511 input.suspend_all_gangs = 1;
512
513 /*
514 * Avoid taking any other locks under MES lock to avoid circular
515 * lock dependencies.
516 */
517 amdgpu_mes_lock(&adev->mes);
518 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
519 amdgpu_mes_unlock(&adev->mes);
520 if (r)
521 DRM_ERROR("failed to suspend all gangs");
522
523 return r;
524 }
525
amdgpu_mes_resume(struct amdgpu_device * adev)526 int amdgpu_mes_resume(struct amdgpu_device *adev)
527 {
528 struct mes_resume_gang_input input;
529 int r;
530
531 if (!amdgpu_mes_suspend_resume_all_supported(adev))
532 return 0;
533
534 memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
535 input.resume_all_gangs = 1;
536
537 /*
538 * Avoid taking any other locks under MES lock to avoid circular
539 * lock dependencies.
540 */
541 amdgpu_mes_lock(&adev->mes);
542 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
543 amdgpu_mes_unlock(&adev->mes);
544 if (r)
545 DRM_ERROR("failed to resume all gangs");
546
547 return r;
548 }
549
amdgpu_mes_queue_alloc_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)550 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
551 struct amdgpu_mes_queue *q,
552 struct amdgpu_mes_queue_properties *p)
553 {
554 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
555 u32 mqd_size = mqd_mgr->mqd_size;
556 int r;
557
558 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
559 AMDGPU_GEM_DOMAIN_GTT,
560 &q->mqd_obj,
561 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
562 if (r) {
563 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
564 return r;
565 }
566 memset(q->mqd_cpu_ptr, 0, mqd_size);
567
568 r = amdgpu_bo_reserve(q->mqd_obj, false);
569 if (unlikely(r != 0))
570 goto clean_up;
571
572 return 0;
573
574 clean_up:
575 amdgpu_bo_free_kernel(&q->mqd_obj,
576 &q->mqd_gpu_addr,
577 &q->mqd_cpu_ptr);
578 return r;
579 }
580
amdgpu_mes_queue_init_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)581 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
582 struct amdgpu_mes_queue *q,
583 struct amdgpu_mes_queue_properties *p)
584 {
585 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
586 struct amdgpu_mqd_prop mqd_prop = {0};
587
588 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
589 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
590 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
591 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
592 mqd_prop.queue_size = p->queue_size;
593 mqd_prop.use_doorbell = true;
594 mqd_prop.doorbell_index = p->doorbell_off;
595 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
596 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
597 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
598 mqd_prop.hqd_active = false;
599
600 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
601 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
602 mutex_lock(&adev->srbm_mutex);
603 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
604 }
605
606 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
607
608 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
609 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
610 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
611 mutex_unlock(&adev->srbm_mutex);
612 }
613
614 amdgpu_bo_unreserve(q->mqd_obj);
615 }
616
amdgpu_mes_add_hw_queue(struct amdgpu_device * adev,int gang_id,struct amdgpu_mes_queue_properties * qprops,int * queue_id)617 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
618 struct amdgpu_mes_queue_properties *qprops,
619 int *queue_id)
620 {
621 struct amdgpu_mes_queue *queue;
622 struct amdgpu_mes_gang *gang;
623 struct mes_add_queue_input queue_input;
624 unsigned long flags;
625 int r;
626
627 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
628
629 /* allocate the mes queue buffer */
630 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
631 if (!queue) {
632 DRM_ERROR("Failed to allocate memory for queue\n");
633 return -ENOMEM;
634 }
635
636 /* Allocate the queue mqd */
637 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
638 if (r)
639 goto clean_up_memory;
640
641 /*
642 * Avoid taking any other locks under MES lock to avoid circular
643 * lock dependencies.
644 */
645 amdgpu_mes_lock(&adev->mes);
646
647 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
648 if (!gang) {
649 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
650 r = -EINVAL;
651 goto clean_up_mqd;
652 }
653
654 /* add the mes gang to idr list */
655 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
656 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
657 GFP_ATOMIC);
658 if (r < 0) {
659 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
660 goto clean_up_mqd;
661 }
662 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
663 *queue_id = queue->queue_id = r;
664
665 /* allocate a doorbell index for the queue */
666 r = amdgpu_mes_kernel_doorbell_get(adev,
667 qprops->queue_type,
668 &qprops->doorbell_off);
669 if (r)
670 goto clean_up_queue_id;
671
672 /* initialize the queue mqd */
673 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
674
675 /* add hw queue to mes */
676 queue_input.process_id = gang->process->pasid;
677
678 queue_input.page_table_base_addr =
679 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
680 adev->gmc.vram_start;
681
682 queue_input.process_va_start = 0;
683 queue_input.process_va_end =
684 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
685 queue_input.process_quantum = gang->process->process_quantum;
686 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
687 queue_input.gang_quantum = gang->gang_quantum;
688 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
689 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
690 queue_input.gang_global_priority_level = gang->global_priority_level;
691 queue_input.doorbell_offset = qprops->doorbell_off;
692 queue_input.mqd_addr = queue->mqd_gpu_addr;
693 queue_input.wptr_addr = qprops->wptr_gpu_addr;
694 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
695 queue_input.queue_type = qprops->queue_type;
696 queue_input.paging = qprops->paging;
697 queue_input.is_kfd_process = 0;
698
699 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
700 if (r) {
701 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
702 qprops->doorbell_off);
703 goto clean_up_doorbell;
704 }
705
706 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
707 "queue type=%d, doorbell=0x%llx\n",
708 gang->process->pasid, gang_id, qprops->queue_type,
709 qprops->doorbell_off);
710
711 queue->ring = qprops->ring;
712 queue->doorbell_off = qprops->doorbell_off;
713 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
714 queue->queue_type = qprops->queue_type;
715 queue->paging = qprops->paging;
716 queue->gang = gang;
717 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
718 list_add_tail(&queue->list, &gang->queue_list);
719
720 amdgpu_mes_unlock(&adev->mes);
721 return 0;
722
723 clean_up_doorbell:
724 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
725 clean_up_queue_id:
726 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
727 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
728 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
729 clean_up_mqd:
730 amdgpu_mes_unlock(&adev->mes);
731 amdgpu_mes_queue_free_mqd(queue);
732 clean_up_memory:
733 kfree(queue);
734 return r;
735 }
736
amdgpu_mes_remove_hw_queue(struct amdgpu_device * adev,int queue_id)737 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
738 {
739 unsigned long flags;
740 struct amdgpu_mes_queue *queue;
741 struct amdgpu_mes_gang *gang;
742 struct mes_remove_queue_input queue_input;
743 int r;
744
745 /*
746 * Avoid taking any other locks under MES lock to avoid circular
747 * lock dependencies.
748 */
749 amdgpu_mes_lock(&adev->mes);
750
751 /* remove the mes gang from idr list */
752 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
753
754 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
755 if (!queue) {
756 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
757 amdgpu_mes_unlock(&adev->mes);
758 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
759 return -EINVAL;
760 }
761
762 idr_remove(&adev->mes.queue_id_idr, queue_id);
763 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
764
765 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
766 queue->doorbell_off);
767
768 gang = queue->gang;
769 queue_input.doorbell_offset = queue->doorbell_off;
770 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
771
772 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
773 if (r)
774 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
775 queue_id);
776
777 list_del(&queue->list);
778 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
779 amdgpu_mes_unlock(&adev->mes);
780
781 amdgpu_mes_queue_free_mqd(queue);
782 kfree(queue);
783 return 0;
784 }
785
amdgpu_mes_reset_hw_queue(struct amdgpu_device * adev,int queue_id)786 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
787 {
788 unsigned long flags;
789 struct amdgpu_mes_queue *queue;
790 struct amdgpu_mes_gang *gang;
791 struct mes_reset_queue_input queue_input;
792 int r;
793
794 /*
795 * Avoid taking any other locks under MES lock to avoid circular
796 * lock dependencies.
797 */
798 amdgpu_mes_lock(&adev->mes);
799
800 /* remove the mes gang from idr list */
801 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
802
803 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
804 if (!queue) {
805 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
806 amdgpu_mes_unlock(&adev->mes);
807 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
808 return -EINVAL;
809 }
810 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
811
812 DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
813 queue->doorbell_off);
814
815 gang = queue->gang;
816 queue_input.doorbell_offset = queue->doorbell_off;
817 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
818
819 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
820 if (r)
821 DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
822 queue_id);
823
824 amdgpu_mes_unlock(&adev->mes);
825
826 return 0;
827 }
828
amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device * adev,int queue_type,int me_id,int pipe_id,int queue_id,int vmid)829 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
830 int me_id, int pipe_id, int queue_id, int vmid)
831 {
832 struct mes_reset_queue_input queue_input;
833 int r;
834
835 queue_input.queue_type = queue_type;
836 queue_input.use_mmio = true;
837 queue_input.me_id = me_id;
838 queue_input.pipe_id = pipe_id;
839 queue_input.queue_id = queue_id;
840 queue_input.vmid = vmid;
841 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
842 if (r)
843 DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
844 queue_id);
845 return r;
846 }
847
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)848 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
849 struct amdgpu_ring *ring)
850 {
851 struct mes_map_legacy_queue_input queue_input;
852 int r;
853
854 memset(&queue_input, 0, sizeof(queue_input));
855
856 queue_input.queue_type = ring->funcs->type;
857 queue_input.doorbell_offset = ring->doorbell_index;
858 queue_input.pipe_id = ring->pipe;
859 queue_input.queue_id = ring->queue;
860 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
861 queue_input.wptr_addr = ring->wptr_gpu_addr;
862
863 amdgpu_mes_lock(&adev->mes);
864 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
865 amdgpu_mes_unlock(&adev->mes);
866 if (r)
867 DRM_ERROR("failed to map legacy queue\n");
868
869 return r;
870 }
871
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)872 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
873 struct amdgpu_ring *ring,
874 enum amdgpu_unmap_queues_action action,
875 u64 gpu_addr, u64 seq)
876 {
877 struct mes_unmap_legacy_queue_input queue_input;
878 int r;
879
880 queue_input.action = action;
881 queue_input.queue_type = ring->funcs->type;
882 queue_input.doorbell_offset = ring->doorbell_index;
883 queue_input.pipe_id = ring->pipe;
884 queue_input.queue_id = ring->queue;
885 queue_input.trail_fence_addr = gpu_addr;
886 queue_input.trail_fence_data = seq;
887
888 amdgpu_mes_lock(&adev->mes);
889 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
890 amdgpu_mes_unlock(&adev->mes);
891 if (r)
892 DRM_ERROR("failed to unmap legacy queue\n");
893
894 return r;
895 }
896
amdgpu_mes_reset_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int vmid,bool use_mmio)897 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
898 struct amdgpu_ring *ring,
899 unsigned int vmid,
900 bool use_mmio)
901 {
902 struct mes_reset_legacy_queue_input queue_input;
903 int r;
904
905 memset(&queue_input, 0, sizeof(queue_input));
906
907 queue_input.queue_type = ring->funcs->type;
908 queue_input.doorbell_offset = ring->doorbell_index;
909 queue_input.me_id = ring->me;
910 queue_input.pipe_id = ring->pipe;
911 queue_input.queue_id = ring->queue;
912 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
913 queue_input.wptr_addr = ring->wptr_gpu_addr;
914 queue_input.vmid = vmid;
915 queue_input.use_mmio = use_mmio;
916
917 amdgpu_mes_lock(&adev->mes);
918 r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
919 amdgpu_mes_unlock(&adev->mes);
920 if (r)
921 DRM_ERROR("failed to reset legacy queue\n");
922
923 return r;
924 }
925
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)926 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
927 {
928 struct mes_misc_op_input op_input;
929 int r, val = 0;
930
931 op_input.op = MES_MISC_OP_READ_REG;
932 op_input.read_reg.reg_offset = reg;
933 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
934
935 if (!adev->mes.funcs->misc_op) {
936 DRM_ERROR("mes rreg is not supported!\n");
937 goto error;
938 }
939
940 amdgpu_mes_lock(&adev->mes);
941 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
942 amdgpu_mes_unlock(&adev->mes);
943 if (r)
944 DRM_ERROR("failed to read reg (0x%x)\n", reg);
945 else
946 val = *(adev->mes.read_val_ptr);
947
948 error:
949 return val;
950 }
951
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)952 int amdgpu_mes_wreg(struct amdgpu_device *adev,
953 uint32_t reg, uint32_t val)
954 {
955 struct mes_misc_op_input op_input;
956 int r;
957
958 op_input.op = MES_MISC_OP_WRITE_REG;
959 op_input.write_reg.reg_offset = reg;
960 op_input.write_reg.reg_value = val;
961
962 if (!adev->mes.funcs->misc_op) {
963 DRM_ERROR("mes wreg is not supported!\n");
964 r = -EINVAL;
965 goto error;
966 }
967
968 amdgpu_mes_lock(&adev->mes);
969 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
970 amdgpu_mes_unlock(&adev->mes);
971 if (r)
972 DRM_ERROR("failed to write reg (0x%x)\n", reg);
973
974 error:
975 return r;
976 }
977
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)978 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
979 uint32_t reg0, uint32_t reg1,
980 uint32_t ref, uint32_t mask)
981 {
982 struct mes_misc_op_input op_input;
983 int r;
984
985 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
986 op_input.wrm_reg.reg0 = reg0;
987 op_input.wrm_reg.reg1 = reg1;
988 op_input.wrm_reg.ref = ref;
989 op_input.wrm_reg.mask = mask;
990
991 if (!adev->mes.funcs->misc_op) {
992 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
993 r = -EINVAL;
994 goto error;
995 }
996
997 amdgpu_mes_lock(&adev->mes);
998 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
999 amdgpu_mes_unlock(&adev->mes);
1000 if (r)
1001 DRM_ERROR("failed to reg_write_reg_wait\n");
1002
1003 error:
1004 return r;
1005 }
1006
amdgpu_mes_reg_wait(struct amdgpu_device * adev,uint32_t reg,uint32_t val,uint32_t mask)1007 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
1008 uint32_t val, uint32_t mask)
1009 {
1010 struct mes_misc_op_input op_input;
1011 int r;
1012
1013 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
1014 op_input.wrm_reg.reg0 = reg;
1015 op_input.wrm_reg.ref = val;
1016 op_input.wrm_reg.mask = mask;
1017
1018 if (!adev->mes.funcs->misc_op) {
1019 DRM_ERROR("mes reg wait is not supported!\n");
1020 r = -EINVAL;
1021 goto error;
1022 }
1023
1024 amdgpu_mes_lock(&adev->mes);
1025 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1026 amdgpu_mes_unlock(&adev->mes);
1027 if (r)
1028 DRM_ERROR("failed to reg_write_reg_wait\n");
1029
1030 error:
1031 return r;
1032 }
1033
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)1034 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
1035 uint64_t process_context_addr,
1036 uint32_t spi_gdbg_per_vmid_cntl,
1037 const uint32_t *tcp_watch_cntl,
1038 uint32_t flags,
1039 bool trap_en)
1040 {
1041 struct mes_misc_op_input op_input = {0};
1042 int r;
1043
1044 if (!adev->mes.funcs->misc_op) {
1045 DRM_ERROR("mes set shader debugger is not supported!\n");
1046 return -EINVAL;
1047 }
1048
1049 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1050 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1051 op_input.set_shader_debugger.flags.u32all = flags;
1052
1053 /* use amdgpu mes_flush_shader_debugger instead */
1054 if (op_input.set_shader_debugger.flags.process_ctx_flush)
1055 return -EINVAL;
1056
1057 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
1058 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
1059 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
1060
1061 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
1062 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
1063 op_input.set_shader_debugger.trap_en = trap_en;
1064
1065 amdgpu_mes_lock(&adev->mes);
1066
1067 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1068 if (r)
1069 DRM_ERROR("failed to set_shader_debugger\n");
1070
1071 amdgpu_mes_unlock(&adev->mes);
1072
1073 return r;
1074 }
1075
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)1076 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
1077 uint64_t process_context_addr)
1078 {
1079 struct mes_misc_op_input op_input = {0};
1080 int r;
1081
1082 if (!adev->mes.funcs->misc_op) {
1083 DRM_ERROR("mes flush shader debugger is not supported!\n");
1084 return -EINVAL;
1085 }
1086
1087 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
1088 op_input.set_shader_debugger.process_context_addr = process_context_addr;
1089 op_input.set_shader_debugger.flags.process_ctx_flush = true;
1090
1091 amdgpu_mes_lock(&adev->mes);
1092
1093 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
1094 if (r)
1095 DRM_ERROR("failed to set_shader_debugger\n");
1096
1097 amdgpu_mes_unlock(&adev->mes);
1098
1099 return r;
1100 }
1101
1102 static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_mes_queue_properties * props)1103 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1104 struct amdgpu_ring *ring,
1105 struct amdgpu_mes_queue_properties *props)
1106 {
1107 props->queue_type = ring->funcs->type;
1108 props->hqd_base_gpu_addr = ring->gpu_addr;
1109 props->rptr_gpu_addr = ring->rptr_gpu_addr;
1110 props->wptr_gpu_addr = ring->wptr_gpu_addr;
1111 props->wptr_mc_addr =
1112 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1113 props->queue_size = ring->ring_size;
1114 props->eop_gpu_addr = ring->eop_gpu_addr;
1115 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1116 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1117 props->paging = false;
1118 props->ring = ring;
1119 }
1120
1121 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
1122 do { \
1123 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
1124 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1125 _eng[ring->idx].slots[id_offs]); \
1126 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
1127 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1128 _eng[ring->idx].ring); \
1129 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1130 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1131 _eng[ring->idx].ib); \
1132 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1133 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1134 _eng[ring->idx].padding); \
1135 } while(0)
1136
amdgpu_mes_ctx_get_offs(struct amdgpu_ring * ring,unsigned int id_offs)1137 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1138 {
1139 switch (ring->funcs->type) {
1140 case AMDGPU_RING_TYPE_GFX:
1141 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1142 break;
1143 case AMDGPU_RING_TYPE_COMPUTE:
1144 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1145 break;
1146 case AMDGPU_RING_TYPE_SDMA:
1147 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1148 break;
1149 default:
1150 break;
1151 }
1152
1153 WARN_ON(1);
1154 return -EINVAL;
1155 }
1156
amdgpu_mes_add_ring(struct amdgpu_device * adev,int gang_id,int queue_type,int idx,struct amdgpu_mes_ctx_data * ctx_data,struct amdgpu_ring ** out)1157 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1158 int queue_type, int idx,
1159 struct amdgpu_mes_ctx_data *ctx_data,
1160 struct amdgpu_ring **out)
1161 {
1162 struct amdgpu_ring *ring;
1163 struct amdgpu_mes_gang *gang;
1164 struct amdgpu_mes_queue_properties qprops = {0};
1165 int r, queue_id, pasid;
1166
1167 /*
1168 * Avoid taking any other locks under MES lock to avoid circular
1169 * lock dependencies.
1170 */
1171 amdgpu_mes_lock(&adev->mes);
1172 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1173 if (!gang) {
1174 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1175 amdgpu_mes_unlock(&adev->mes);
1176 return -EINVAL;
1177 }
1178 pasid = gang->process->pasid;
1179
1180 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1181 if (!ring) {
1182 amdgpu_mes_unlock(&adev->mes);
1183 return -ENOMEM;
1184 }
1185
1186 ring->ring_obj = NULL;
1187 ring->use_doorbell = true;
1188 ring->is_mes_queue = true;
1189 ring->mes_ctx = ctx_data;
1190 ring->idx = idx;
1191 ring->no_scheduler = true;
1192
1193 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1194 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1195 compute[ring->idx].mec_hpd);
1196 ring->eop_gpu_addr =
1197 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1198 }
1199
1200 switch (queue_type) {
1201 case AMDGPU_RING_TYPE_GFX:
1202 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1203 ring->me = adev->gfx.gfx_ring[0].me;
1204 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1205 break;
1206 case AMDGPU_RING_TYPE_COMPUTE:
1207 ring->funcs = adev->gfx.compute_ring[0].funcs;
1208 ring->me = adev->gfx.compute_ring[0].me;
1209 ring->pipe = adev->gfx.compute_ring[0].pipe;
1210 break;
1211 case AMDGPU_RING_TYPE_SDMA:
1212 ring->funcs = adev->sdma.instance[0].ring.funcs;
1213 break;
1214 default:
1215 BUG();
1216 }
1217
1218 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1219 AMDGPU_RING_PRIO_DEFAULT, NULL);
1220 if (r) {
1221 amdgpu_mes_unlock(&adev->mes);
1222 goto clean_up_memory;
1223 }
1224
1225 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1226
1227 dma_fence_wait(gang->process->vm->last_update, false);
1228 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1229 amdgpu_mes_unlock(&adev->mes);
1230
1231 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1232 if (r)
1233 goto clean_up_ring;
1234
1235 ring->hw_queue_id = queue_id;
1236 ring->doorbell_index = qprops.doorbell_off;
1237
1238 if (queue_type == AMDGPU_RING_TYPE_GFX)
1239 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1240 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1241 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1242 queue_id);
1243 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1244 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1245 queue_id);
1246 else
1247 BUG();
1248
1249 *out = ring;
1250 return 0;
1251
1252 clean_up_ring:
1253 amdgpu_ring_fini(ring);
1254 clean_up_memory:
1255 kfree(ring);
1256 return r;
1257 }
1258
amdgpu_mes_remove_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring)1259 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1260 struct amdgpu_ring *ring)
1261 {
1262 if (!ring)
1263 return;
1264
1265 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1266 del_timer_sync(&ring->fence_drv.fallback_timer);
1267 amdgpu_ring_fini(ring);
1268 kfree(ring);
1269 }
1270
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)1271 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1272 enum amdgpu_mes_priority_level prio)
1273 {
1274 return adev->mes.aggregated_doorbells[prio];
1275 }
1276
amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1277 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1278 struct amdgpu_mes_ctx_data *ctx_data)
1279 {
1280 int r;
1281
1282 r = amdgpu_bo_create_kernel(adev,
1283 sizeof(struct amdgpu_mes_ctx_meta_data),
1284 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1285 &ctx_data->meta_data_obj,
1286 &ctx_data->meta_data_mc_addr,
1287 &ctx_data->meta_data_ptr);
1288 if (r) {
1289 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1290 return r;
1291 }
1292
1293 if (!ctx_data->meta_data_obj)
1294 return -ENOMEM;
1295
1296 memset(ctx_data->meta_data_ptr, 0,
1297 sizeof(struct amdgpu_mes_ctx_meta_data));
1298
1299 return 0;
1300 }
1301
amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data * ctx_data)1302 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1303 {
1304 if (ctx_data->meta_data_obj)
1305 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1306 &ctx_data->meta_data_mc_addr,
1307 &ctx_data->meta_data_ptr);
1308 }
1309
amdgpu_mes_ctx_map_meta_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_mes_ctx_data * ctx_data)1310 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1311 struct amdgpu_vm *vm,
1312 struct amdgpu_mes_ctx_data *ctx_data)
1313 {
1314 struct amdgpu_bo_va *bo_va;
1315 struct amdgpu_sync sync;
1316 struct drm_exec exec;
1317 int r;
1318
1319 amdgpu_sync_create(&sync);
1320
1321 drm_exec_init(&exec, 0, 0);
1322 drm_exec_until_all_locked(&exec) {
1323 r = drm_exec_lock_obj(&exec,
1324 &ctx_data->meta_data_obj->tbo.base);
1325 drm_exec_retry_on_contention(&exec);
1326 if (unlikely(r))
1327 goto error_fini_exec;
1328
1329 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1330 drm_exec_retry_on_contention(&exec);
1331 if (unlikely(r))
1332 goto error_fini_exec;
1333 }
1334
1335 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1336 if (!bo_va) {
1337 DRM_ERROR("failed to create bo_va for meta data BO\n");
1338 r = -ENOMEM;
1339 goto error_fini_exec;
1340 }
1341
1342 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1343 sizeof(struct amdgpu_mes_ctx_meta_data),
1344 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1345 AMDGPU_PTE_EXECUTABLE);
1346
1347 if (r) {
1348 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1349 goto error_del_bo_va;
1350 }
1351
1352 r = amdgpu_vm_bo_update(adev, bo_va, false);
1353 if (r) {
1354 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1355 goto error_del_bo_va;
1356 }
1357 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1358
1359 r = amdgpu_vm_update_pdes(adev, vm, false);
1360 if (r) {
1361 DRM_ERROR("failed to update pdes on meta data\n");
1362 goto error_del_bo_va;
1363 }
1364 amdgpu_sync_fence(&sync, vm->last_update);
1365
1366 amdgpu_sync_wait(&sync, false);
1367 drm_exec_fini(&exec);
1368
1369 amdgpu_sync_free(&sync);
1370 ctx_data->meta_data_va = bo_va;
1371 return 0;
1372
1373 error_del_bo_va:
1374 amdgpu_vm_bo_del(adev, bo_va);
1375
1376 error_fini_exec:
1377 drm_exec_fini(&exec);
1378 amdgpu_sync_free(&sync);
1379 return r;
1380 }
1381
amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1382 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1383 struct amdgpu_mes_ctx_data *ctx_data)
1384 {
1385 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1386 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1387 struct amdgpu_vm *vm = bo_va->base.vm;
1388 struct dma_fence *fence;
1389 struct drm_exec exec;
1390 long r;
1391
1392 drm_exec_init(&exec, 0, 0);
1393 drm_exec_until_all_locked(&exec) {
1394 r = drm_exec_lock_obj(&exec,
1395 &ctx_data->meta_data_obj->tbo.base);
1396 drm_exec_retry_on_contention(&exec);
1397 if (unlikely(r))
1398 goto out_unlock;
1399
1400 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1401 drm_exec_retry_on_contention(&exec);
1402 if (unlikely(r))
1403 goto out_unlock;
1404 }
1405
1406 amdgpu_vm_bo_del(adev, bo_va);
1407 if (!amdgpu_vm_ready(vm))
1408 goto out_unlock;
1409
1410 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1411 &fence);
1412 if (r)
1413 goto out_unlock;
1414 if (fence) {
1415 amdgpu_bo_fence(bo, fence, true);
1416 fence = NULL;
1417 }
1418
1419 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1420 if (r || !fence)
1421 goto out_unlock;
1422
1423 dma_fence_wait(fence, false);
1424 amdgpu_bo_fence(bo, fence, true);
1425 dma_fence_put(fence);
1426
1427 out_unlock:
1428 if (unlikely(r < 0))
1429 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1430 drm_exec_fini(&exec);
1431
1432 return r;
1433 }
1434
amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device * adev,int pasid,int * gang_id,int queue_type,int num_queue,struct amdgpu_ring ** added_rings,struct amdgpu_mes_ctx_data * ctx_data)1435 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1436 int pasid, int *gang_id,
1437 int queue_type, int num_queue,
1438 struct amdgpu_ring **added_rings,
1439 struct amdgpu_mes_ctx_data *ctx_data)
1440 {
1441 struct amdgpu_ring *ring;
1442 struct amdgpu_mes_gang_properties gprops = {0};
1443 int r, j;
1444
1445 /* create a gang for the process */
1446 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1447 gprops.gang_quantum = adev->mes.default_gang_quantum;
1448 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1449 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1450 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1451
1452 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1453 if (r) {
1454 DRM_ERROR("failed to add gang\n");
1455 return r;
1456 }
1457
1458 /* create queues for the gang */
1459 for (j = 0; j < num_queue; j++) {
1460 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1461 ctx_data, &ring);
1462 if (r) {
1463 DRM_ERROR("failed to add ring\n");
1464 break;
1465 }
1466
1467 DRM_INFO("ring %s was added\n", ring->name);
1468 added_rings[j] = ring;
1469 }
1470
1471 return 0;
1472 }
1473
amdgpu_mes_test_queues(struct amdgpu_ring ** added_rings)1474 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1475 {
1476 struct amdgpu_ring *ring;
1477 int i, r;
1478
1479 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1480 ring = added_rings[i];
1481 if (!ring)
1482 continue;
1483
1484 r = amdgpu_ring_test_helper(ring);
1485 if (r)
1486 return r;
1487
1488 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1489 if (r) {
1490 DRM_DEV_ERROR(ring->adev->dev,
1491 "ring %s ib test failed (%d)\n",
1492 ring->name, r);
1493 return r;
1494 } else
1495 DRM_INFO("ring %s ib test pass\n", ring->name);
1496 }
1497
1498 return 0;
1499 }
1500
amdgpu_mes_self_test(struct amdgpu_device * adev)1501 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1502 {
1503 struct amdgpu_vm *vm = NULL;
1504 struct amdgpu_mes_ctx_data ctx_data = {0};
1505 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1506 int gang_ids[3] = {0};
1507 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1508 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1509 { AMDGPU_RING_TYPE_SDMA, 1} };
1510 int i, r, pasid, k = 0;
1511
1512 pasid = amdgpu_pasid_alloc(16);
1513 if (pasid < 0) {
1514 dev_warn(adev->dev, "No more PASIDs available!");
1515 pasid = 0;
1516 }
1517
1518 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1519 if (!vm) {
1520 r = -ENOMEM;
1521 goto error_pasid;
1522 }
1523
1524 r = amdgpu_vm_init(adev, vm, -1);
1525 if (r) {
1526 DRM_ERROR("failed to initialize vm\n");
1527 goto error_pasid;
1528 }
1529
1530 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1531 if (r) {
1532 DRM_ERROR("failed to alloc ctx meta data\n");
1533 goto error_fini;
1534 }
1535
1536 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1537 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1538 if (r) {
1539 DRM_ERROR("failed to map ctx meta data\n");
1540 goto error_vm;
1541 }
1542
1543 r = amdgpu_mes_create_process(adev, pasid, vm);
1544 if (r) {
1545 DRM_ERROR("failed to create MES process\n");
1546 goto error_vm;
1547 }
1548
1549 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1550 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1551 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1552 IP_VERSION(10, 3, 0) &&
1553 amdgpu_ip_version(adev, GC_HWIP, 0) <
1554 IP_VERSION(11, 0, 0) &&
1555 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1556 continue;
1557
1558 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1559 &gang_ids[i],
1560 queue_types[i][0],
1561 queue_types[i][1],
1562 &added_rings[k],
1563 &ctx_data);
1564 if (r)
1565 goto error_queues;
1566
1567 k += queue_types[i][1];
1568 }
1569
1570 /* start ring test and ib test for MES queues */
1571 amdgpu_mes_test_queues(added_rings);
1572
1573 error_queues:
1574 /* remove all queues */
1575 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1576 if (!added_rings[i])
1577 continue;
1578 amdgpu_mes_remove_ring(adev, added_rings[i]);
1579 }
1580
1581 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1582 if (!gang_ids[i])
1583 continue;
1584 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1585 }
1586
1587 amdgpu_mes_destroy_process(adev, pasid);
1588
1589 error_vm:
1590 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1591
1592 error_fini:
1593 amdgpu_vm_fini(adev, vm);
1594
1595 error_pasid:
1596 if (pasid)
1597 amdgpu_pasid_free(pasid);
1598
1599 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1600 kfree(vm);
1601 return 0;
1602 }
1603
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)1604 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1605 {
1606 const struct mes_firmware_header_v1_0 *mes_hdr;
1607 struct amdgpu_firmware_info *info;
1608 char ucode_prefix[30];
1609 char fw_name[50];
1610 bool need_retry = false;
1611 int r;
1612
1613 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1614 sizeof(ucode_prefix));
1615 if (adev->enable_uni_mes) {
1616 snprintf(fw_name, sizeof(fw_name),
1617 "amdgpu/%s_uni_mes.bin", ucode_prefix);
1618 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1619 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
1620 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1621 ucode_prefix,
1622 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1623 need_retry = true;
1624 } else {
1625 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1626 ucode_prefix,
1627 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1628 }
1629
1630 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
1631 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1632 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
1633 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1634 "amdgpu/%s_mes.bin", ucode_prefix);
1635 }
1636
1637 if (r)
1638 goto out;
1639
1640 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1641 adev->mes.fw[pipe]->data;
1642 adev->mes.uc_start_addr[pipe] =
1643 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1644 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1645 adev->mes.data_start_addr[pipe] =
1646 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1647 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1648
1649 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1650 int ucode, ucode_data;
1651
1652 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1653 ucode = AMDGPU_UCODE_ID_CP_MES;
1654 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1655 } else {
1656 ucode = AMDGPU_UCODE_ID_CP_MES1;
1657 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1658 }
1659
1660 info = &adev->firmware.ucode[ucode];
1661 info->ucode_id = ucode;
1662 info->fw = adev->mes.fw[pipe];
1663 adev->firmware.fw_size +=
1664 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1665 PAGE_SIZE);
1666
1667 info = &adev->firmware.ucode[ucode_data];
1668 info->ucode_id = ucode_data;
1669 info->fw = adev->mes.fw[pipe];
1670 adev->firmware.fw_size +=
1671 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1672 PAGE_SIZE);
1673 }
1674
1675 return 0;
1676 out:
1677 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1678 return r;
1679 }
1680
amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device * adev)1681 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
1682 {
1683 uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
1684 bool is_supported = false;
1685
1686 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
1687 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
1688 mes_rev >= 0x63)
1689 is_supported = true;
1690
1691 return is_supported;
1692 }
1693
1694 #if defined(CONFIG_DEBUG_FS)
1695
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)1696 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1697 {
1698 struct amdgpu_device *adev = m->private;
1699 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1700
1701 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1702 mem, adev->mes.event_log_size, false);
1703
1704 return 0;
1705 }
1706
1707 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1708
1709 #endif
1710
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)1711 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1712 {
1713
1714 #if defined(CONFIG_DEBUG_FS)
1715 struct drm_minor *minor = adev_to_drm(adev)->primary;
1716 struct dentry *root = minor->debugfs_root;
1717 if (adev->enable_mes && amdgpu_mes_log_enable)
1718 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1719 adev, &amdgpu_debugfs_mes_event_log_fops);
1720
1721 #endif
1722 }
1723