1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 #include "amdgpu_ras.h"
30
31 /* delay 0.1 second to enable gfx off feature */
32 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
33
34 #define GFX_OFF_NO_DELAY 0
35
36 /*
37 * GPU GFX IP block helpers function.
38 */
39
amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device * adev,int mec,int pipe,int queue)40 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
41 int pipe, int queue)
42 {
43 int bit = 0;
44
45 bit += mec * adev->gfx.mec.num_pipe_per_mec
46 * adev->gfx.mec.num_queue_per_pipe;
47 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
48 bit += queue;
49
50 return bit;
51 }
52
amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device * adev,int bit,int * mec,int * pipe,int * queue)53 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
54 int *mec, int *pipe, int *queue)
55 {
56 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
57 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
58 % adev->gfx.mec.num_pipe_per_mec;
59 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
60 / adev->gfx.mec.num_pipe_per_mec;
61
62 }
63
amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device * adev,int mec,int pipe,int queue)64 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
65 int mec, int pipe, int queue)
66 {
67 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
68 adev->gfx.mec.queue_bitmap);
69 }
70
amdgpu_gfx_me_queue_to_bit(struct amdgpu_device * adev,int me,int pipe,int queue)71 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
72 int me, int pipe, int queue)
73 {
74 int bit = 0;
75
76 bit += me * adev->gfx.me.num_pipe_per_me
77 * adev->gfx.me.num_queue_per_pipe;
78 bit += pipe * adev->gfx.me.num_queue_per_pipe;
79 bit += queue;
80
81 return bit;
82 }
83
amdgpu_gfx_bit_to_me_queue(struct amdgpu_device * adev,int bit,int * me,int * pipe,int * queue)84 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
85 int *me, int *pipe, int *queue)
86 {
87 *queue = bit % adev->gfx.me.num_queue_per_pipe;
88 *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
89 % adev->gfx.me.num_pipe_per_me;
90 *me = (bit / adev->gfx.me.num_queue_per_pipe)
91 / adev->gfx.me.num_pipe_per_me;
92 }
93
amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device * adev,int me,int pipe,int queue)94 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
95 int me, int pipe, int queue)
96 {
97 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
98 adev->gfx.me.queue_bitmap);
99 }
100
101 /**
102 * amdgpu_gfx_scratch_get - Allocate a scratch register
103 *
104 * @adev: amdgpu_device pointer
105 * @reg: scratch register mmio offset
106 *
107 * Allocate a CP scratch register for use by the driver (all asics).
108 * Returns 0 on success or -EINVAL on failure.
109 */
amdgpu_gfx_scratch_get(struct amdgpu_device * adev,uint32_t * reg)110 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
111 {
112 int i;
113
114 i = ffs(adev->gfx.scratch.free_mask);
115 if (i != 0 && i <= adev->gfx.scratch.num_reg) {
116 i--;
117 adev->gfx.scratch.free_mask &= ~(1u << i);
118 *reg = adev->gfx.scratch.reg_base + i;
119 return 0;
120 }
121 return -EINVAL;
122 }
123
124 /**
125 * amdgpu_gfx_scratch_free - Free a scratch register
126 *
127 * @adev: amdgpu_device pointer
128 * @reg: scratch register mmio offset
129 *
130 * Free a CP scratch register allocated for use by the driver (all asics)
131 */
amdgpu_gfx_scratch_free(struct amdgpu_device * adev,uint32_t reg)132 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
133 {
134 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
135 }
136
137 /**
138 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
139 *
140 * @mask: array in which the per-shader array disable masks will be stored
141 * @max_se: number of SEs
142 * @max_sh: number of SHs
143 *
144 * The bitmask of CUs to be disabled in the shader array determined by se and
145 * sh is stored in mask[se * max_sh + sh].
146 */
amdgpu_gfx_parse_disable_cu(unsigned * mask,unsigned max_se,unsigned max_sh)147 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
148 {
149 unsigned se, sh, cu;
150 const char *p;
151
152 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
153
154 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
155 return;
156
157 p = amdgpu_disable_cu;
158 for (;;) {
159 char *next;
160 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
161 if (ret < 3) {
162 DRM_ERROR("amdgpu: could not parse disable_cu\n");
163 return;
164 }
165
166 if (se < max_se && sh < max_sh && cu < 16) {
167 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
168 mask[se * max_sh + sh] |= 1u << cu;
169 } else {
170 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
171 se, sh, cu);
172 }
173
174 next = strchr(p, ',');
175 if (!next)
176 break;
177 p = next + 1;
178 }
179 }
180
amdgpu_gfx_is_multipipe_capable(struct amdgpu_device * adev)181 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
182 {
183 if (amdgpu_compute_multipipe != -1) {
184 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
185 amdgpu_compute_multipipe);
186 return amdgpu_compute_multipipe == 1;
187 }
188
189 /* FIXME: spreading the queues across pipes causes perf regressions
190 * on POLARIS11 compute workloads */
191 if (adev->asic_type == CHIP_POLARIS11)
192 return false;
193
194 return adev->gfx.mec.num_mec > 1;
195 }
196
amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)197 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
198 struct amdgpu_ring *ring)
199 {
200 /* Policy: use 1st queue as high priority compute queue if we
201 * have more than one compute queue.
202 */
203 if (adev->gfx.num_compute_rings > 1 &&
204 ring == &adev->gfx.compute_ring[0])
205 return true;
206
207 return false;
208 }
209
amdgpu_gfx_compute_queue_acquire(struct amdgpu_device * adev)210 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
211 {
212 int i, queue, pipe;
213 bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
214 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
215 adev->gfx.mec.num_queue_per_pipe,
216 adev->gfx.num_compute_rings);
217
218 if (multipipe_policy) {
219 /* policy: make queues evenly cross all pipes on MEC1 only */
220 for (i = 0; i < max_queues_per_mec; i++) {
221 pipe = i % adev->gfx.mec.num_pipe_per_mec;
222 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
223 adev->gfx.mec.num_queue_per_pipe;
224
225 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
226 adev->gfx.mec.queue_bitmap);
227 }
228 } else {
229 /* policy: amdgpu owns all queues in the given pipe */
230 for (i = 0; i < max_queues_per_mec; ++i)
231 set_bit(i, adev->gfx.mec.queue_bitmap);
232 }
233
234 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
235 }
236
amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device * adev)237 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
238 {
239 int i, queue, me;
240
241 for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
242 queue = i % adev->gfx.me.num_queue_per_pipe;
243 me = (i / adev->gfx.me.num_queue_per_pipe)
244 / adev->gfx.me.num_pipe_per_me;
245
246 if (me >= adev->gfx.me.num_me)
247 break;
248 /* policy: amdgpu owns the first queue per pipe at this stage
249 * will extend to mulitple queues per pipe later */
250 if (me == 0 && queue < 1)
251 set_bit(i, adev->gfx.me.queue_bitmap);
252 }
253
254 /* update the number of active graphics rings */
255 adev->gfx.num_gfx_rings =
256 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
257 }
258
amdgpu_gfx_kiq_acquire(struct amdgpu_device * adev,struct amdgpu_ring * ring)259 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
260 struct amdgpu_ring *ring)
261 {
262 int queue_bit;
263 int mec, pipe, queue;
264
265 queue_bit = adev->gfx.mec.num_mec
266 * adev->gfx.mec.num_pipe_per_mec
267 * adev->gfx.mec.num_queue_per_pipe;
268
269 while (--queue_bit >= 0) {
270 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
271 continue;
272
273 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
274
275 /*
276 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
277 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
278 * only can be issued on queue 0.
279 */
280 if ((mec == 1 && pipe > 1) || queue != 0)
281 continue;
282
283 ring->me = mec + 1;
284 ring->pipe = pipe;
285 ring->queue = queue;
286
287 return 0;
288 }
289
290 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
291 return -EINVAL;
292 }
293
amdgpu_gfx_kiq_init_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_irq_src * irq)294 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
295 struct amdgpu_ring *ring,
296 struct amdgpu_irq_src *irq)
297 {
298 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
299 int r = 0;
300
301 spin_lock_init(&kiq->ring_lock);
302
303 ring->adev = NULL;
304 ring->ring_obj = NULL;
305 ring->use_doorbell = true;
306 ring->doorbell_index = adev->doorbell_index.kiq;
307
308 r = amdgpu_gfx_kiq_acquire(adev, ring);
309 if (r)
310 return r;
311
312 ring->eop_gpu_addr = kiq->eop_gpu_addr;
313 ring->no_scheduler = true;
314 sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
315 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
316 AMDGPU_RING_PRIO_DEFAULT, NULL);
317 if (r)
318 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
319
320 return r;
321 }
322
amdgpu_gfx_kiq_free_ring(struct amdgpu_ring * ring)323 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
324 {
325 amdgpu_ring_fini(ring);
326 }
327
amdgpu_gfx_kiq_fini(struct amdgpu_device * adev)328 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
329 {
330 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
331
332 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
333 }
334
amdgpu_gfx_kiq_init(struct amdgpu_device * adev,unsigned hpd_size)335 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
336 unsigned hpd_size)
337 {
338 int r;
339 u32 *hpd;
340 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
341
342 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
343 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
344 &kiq->eop_gpu_addr, (void **)&hpd);
345 if (r) {
346 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
347 return r;
348 }
349
350 memset(hpd, 0, hpd_size);
351
352 r = amdgpu_bo_reserve(kiq->eop_obj, true);
353 if (unlikely(r != 0))
354 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
355 amdgpu_bo_kunmap(kiq->eop_obj);
356 amdgpu_bo_unreserve(kiq->eop_obj);
357
358 return 0;
359 }
360
361 /* create MQD for each compute/gfx queue */
amdgpu_gfx_mqd_sw_init(struct amdgpu_device * adev,unsigned mqd_size)362 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
363 unsigned mqd_size)
364 {
365 struct amdgpu_ring *ring = NULL;
366 int r, i;
367
368 /* create MQD for KIQ */
369 ring = &adev->gfx.kiq.ring;
370 if (!ring->mqd_obj) {
371 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
372 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
373 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
374 * KIQ MQD no matter SRIOV or Bare-metal
375 */
376 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
377 AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
378 &ring->mqd_gpu_addr, &ring->mqd_ptr);
379 if (r) {
380 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
381 return r;
382 }
383
384 /* prepare MQD backup */
385 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
386 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
387 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
388 }
389
390 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
391 /* create MQD for each KGQ */
392 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
393 ring = &adev->gfx.gfx_ring[i];
394 if (!ring->mqd_obj) {
395 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
396 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
397 &ring->mqd_gpu_addr, &ring->mqd_ptr);
398 if (r) {
399 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
400 return r;
401 }
402
403 /* prepare MQD backup */
404 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
405 if (!adev->gfx.me.mqd_backup[i])
406 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
407 }
408 }
409 }
410
411 /* create MQD for each KCQ */
412 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
413 ring = &adev->gfx.compute_ring[i];
414 if (!ring->mqd_obj) {
415 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
416 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
417 &ring->mqd_gpu_addr, &ring->mqd_ptr);
418 if (r) {
419 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
420 return r;
421 }
422
423 /* prepare MQD backup */
424 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
425 if (!adev->gfx.mec.mqd_backup[i])
426 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
427 }
428 }
429
430 return 0;
431 }
432
amdgpu_gfx_mqd_sw_fini(struct amdgpu_device * adev)433 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
434 {
435 struct amdgpu_ring *ring = NULL;
436 int i;
437
438 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
439 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
440 ring = &adev->gfx.gfx_ring[i];
441 kfree(adev->gfx.me.mqd_backup[i]);
442 amdgpu_bo_free_kernel(&ring->mqd_obj,
443 &ring->mqd_gpu_addr,
444 &ring->mqd_ptr);
445 }
446 }
447
448 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
449 ring = &adev->gfx.compute_ring[i];
450 kfree(adev->gfx.mec.mqd_backup[i]);
451 amdgpu_bo_free_kernel(&ring->mqd_obj,
452 &ring->mqd_gpu_addr,
453 &ring->mqd_ptr);
454 }
455
456 ring = &adev->gfx.kiq.ring;
457 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
458 amdgpu_bo_free_kernel(&ring->mqd_obj,
459 &ring->mqd_gpu_addr,
460 &ring->mqd_ptr);
461 }
462
amdgpu_gfx_disable_kcq(struct amdgpu_device * adev)463 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
464 {
465 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
466 struct amdgpu_ring *kiq_ring = &kiq->ring;
467 int i, r;
468
469 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
470 return -EINVAL;
471
472 spin_lock(&adev->gfx.kiq.ring_lock);
473 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
474 adev->gfx.num_compute_rings)) {
475 spin_unlock(&adev->gfx.kiq.ring_lock);
476 return -ENOMEM;
477 }
478
479 for (i = 0; i < adev->gfx.num_compute_rings; i++)
480 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
481 RESET_QUEUES, 0, 0);
482 r = amdgpu_ring_test_helper(kiq_ring);
483 spin_unlock(&adev->gfx.kiq.ring_lock);
484
485 return r;
486 }
487
amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device * adev,int queue_bit)488 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
489 int queue_bit)
490 {
491 int mec, pipe, queue;
492 int set_resource_bit = 0;
493
494 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
495
496 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
497
498 return set_resource_bit;
499 }
500
amdgpu_gfx_enable_kcq(struct amdgpu_device * adev)501 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
502 {
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
504 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
505 uint64_t queue_mask = 0;
506 int r, i;
507
508 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
509 return -EINVAL;
510
511 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
512 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
513 continue;
514
515 /* This situation may be hit in the future if a new HW
516 * generation exposes more than 64 queues. If so, the
517 * definition of queue_mask needs updating */
518 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
519 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
520 break;
521 }
522
523 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
524 }
525
526 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
527 kiq_ring->queue);
528 spin_lock(&adev->gfx.kiq.ring_lock);
529 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
530 adev->gfx.num_compute_rings +
531 kiq->pmf->set_resources_size);
532 if (r) {
533 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
534 spin_unlock(&adev->gfx.kiq.ring_lock);
535 return r;
536 }
537
538 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
539 for (i = 0; i < adev->gfx.num_compute_rings; i++)
540 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
541
542 r = amdgpu_ring_test_helper(kiq_ring);
543 spin_unlock(&adev->gfx.kiq.ring_lock);
544 if (r)
545 DRM_ERROR("KCQ enable failed\n");
546
547 return r;
548 }
549
550 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
551 *
552 * @adev: amdgpu_device pointer
553 * @bool enable true: enable gfx off feature, false: disable gfx off feature
554 *
555 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
556 * 2. other client can send request to disable gfx off feature, the request should be honored.
557 * 3. other client can cancel their request of disable gfx off feature
558 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
559 */
560
amdgpu_gfx_off_ctrl(struct amdgpu_device * adev,bool enable)561 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
562 {
563 unsigned long delay = GFX_OFF_DELAY_ENABLE;
564
565 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
566 return;
567
568 mutex_lock(&adev->gfx.gfx_off_mutex);
569
570 if (enable) {
571 /* If the count is already 0, it means there's an imbalance bug somewhere.
572 * Note that the bug may be in a different caller than the one which triggers the
573 * WARN_ON_ONCE.
574 */
575 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
576 goto unlock;
577
578 adev->gfx.gfx_off_req_count--;
579
580 if (adev->gfx.gfx_off_req_count == 0 &&
581 !adev->gfx.gfx_off_state) {
582 /* If going to s2idle, no need to wait */
583 if (adev->in_s0ix) {
584 if (!amdgpu_dpm_set_powergating_by_smu(adev,
585 AMD_IP_BLOCK_TYPE_GFX, true))
586 adev->gfx.gfx_off_state = true;
587 } else {
588 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
589 delay);
590 }
591 }
592 } else {
593 if (adev->gfx.gfx_off_req_count == 0) {
594 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
595
596 if (adev->gfx.gfx_off_state &&
597 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
598 adev->gfx.gfx_off_state = false;
599
600 if (adev->gfx.funcs->init_spm_golden) {
601 dev_dbg(adev->dev,
602 "GFXOFF is disabled, re-init SPM golden settings\n");
603 amdgpu_gfx_init_spm_golden(adev);
604 }
605 }
606 }
607
608 adev->gfx.gfx_off_req_count++;
609 }
610
611 unlock:
612 mutex_unlock(&adev->gfx.gfx_off_mutex);
613 }
614
amdgpu_get_gfx_off_status(struct amdgpu_device * adev,uint32_t * value)615 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
616 {
617
618 int r = 0;
619
620 mutex_lock(&adev->gfx.gfx_off_mutex);
621
622 r = smu_get_status_gfxoff(adev, value);
623
624 mutex_unlock(&adev->gfx.gfx_off_mutex);
625
626 return r;
627 }
628
amdgpu_gfx_ras_late_init(struct amdgpu_device * adev)629 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
630 {
631 int r;
632 struct ras_fs_if fs_info = {
633 .sysfs_name = "gfx_err_count",
634 };
635 struct ras_ih_if ih_info = {
636 .cb = amdgpu_gfx_process_ras_data_cb,
637 };
638
639 if (!adev->gfx.ras_if) {
640 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
641 if (!adev->gfx.ras_if)
642 return -ENOMEM;
643 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
644 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
645 adev->gfx.ras_if->sub_block_index = 0;
646 }
647 fs_info.head = ih_info.head = *adev->gfx.ras_if;
648 r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
649 &fs_info, &ih_info);
650 if (r)
651 goto free;
652
653 if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
654 if (!amdgpu_persistent_edc_harvesting_supported(adev))
655 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
656
657 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
658 if (r)
659 goto late_fini;
660 } else {
661 /* free gfx ras_if if ras is not supported */
662 r = 0;
663 goto free;
664 }
665
666 return 0;
667 late_fini:
668 amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
669 free:
670 kfree(adev->gfx.ras_if);
671 adev->gfx.ras_if = NULL;
672 return r;
673 }
674
amdgpu_gfx_ras_fini(struct amdgpu_device * adev)675 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
676 {
677 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
678 adev->gfx.ras_if) {
679 struct ras_common_if *ras_if = adev->gfx.ras_if;
680 struct ras_ih_if ih_info = {
681 .head = *ras_if,
682 .cb = amdgpu_gfx_process_ras_data_cb,
683 };
684
685 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
686 kfree(ras_if);
687 }
688 }
689
amdgpu_gfx_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)690 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
691 void *err_data,
692 struct amdgpu_iv_entry *entry)
693 {
694 /* TODO ue will trigger an interrupt.
695 *
696 * When “Full RAS” is enabled, the per-IP interrupt sources should
697 * be disabled and the driver should only look for the aggregated
698 * interrupt via sync flood
699 */
700 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
701 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
702 if (adev->gfx.ras_funcs &&
703 adev->gfx.ras_funcs->query_ras_error_count)
704 adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
705 amdgpu_ras_reset_gpu(adev);
706 }
707 return AMDGPU_RAS_SUCCESS;
708 }
709
amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)710 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
711 struct amdgpu_irq_src *source,
712 struct amdgpu_iv_entry *entry)
713 {
714 struct ras_common_if *ras_if = adev->gfx.ras_if;
715 struct ras_dispatch_if ih_data = {
716 .entry = entry,
717 };
718
719 if (!ras_if)
720 return 0;
721
722 ih_data.head = *ras_if;
723
724 DRM_ERROR("CP ECC ERROR IRQ\n");
725 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
726 return 0;
727 }
728
amdgpu_kiq_rreg(struct amdgpu_device * adev,uint32_t reg)729 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
730 {
731 signed long r, cnt = 0;
732 unsigned long flags;
733 uint32_t seq, reg_val_offs = 0, value = 0;
734 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
735 struct amdgpu_ring *ring = &kiq->ring;
736
737 if (amdgpu_device_skip_hw_access(adev))
738 return 0;
739
740 BUG_ON(!ring->funcs->emit_rreg);
741
742 spin_lock_irqsave(&kiq->ring_lock, flags);
743 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
744 pr_err("critical bug! too many kiq readers\n");
745 goto failed_unlock;
746 }
747 amdgpu_ring_alloc(ring, 32);
748 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
749 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
750 if (r)
751 goto failed_undo;
752
753 amdgpu_ring_commit(ring);
754 spin_unlock_irqrestore(&kiq->ring_lock, flags);
755
756 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
757
758 /* don't wait anymore for gpu reset case because this way may
759 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
760 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
761 * never return if we keep waiting in virt_kiq_rreg, which cause
762 * gpu_recover() hang there.
763 *
764 * also don't wait anymore for IRQ context
765 * */
766 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
767 goto failed_kiq_read;
768
769 might_sleep();
770 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
771 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
772 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
773 }
774
775 if (cnt > MAX_KIQ_REG_TRY)
776 goto failed_kiq_read;
777
778 mb();
779 value = adev->wb.wb[reg_val_offs];
780 amdgpu_device_wb_free(adev, reg_val_offs);
781 return value;
782
783 failed_undo:
784 amdgpu_ring_undo(ring);
785 failed_unlock:
786 spin_unlock_irqrestore(&kiq->ring_lock, flags);
787 failed_kiq_read:
788 if (reg_val_offs)
789 amdgpu_device_wb_free(adev, reg_val_offs);
790 dev_err(adev->dev, "failed to read reg:%x\n", reg);
791 return ~0;
792 }
793
amdgpu_kiq_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)794 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
795 {
796 signed long r, cnt = 0;
797 unsigned long flags;
798 uint32_t seq;
799 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
800 struct amdgpu_ring *ring = &kiq->ring;
801
802 BUG_ON(!ring->funcs->emit_wreg);
803
804 if (amdgpu_device_skip_hw_access(adev))
805 return;
806
807 spin_lock_irqsave(&kiq->ring_lock, flags);
808 amdgpu_ring_alloc(ring, 32);
809 amdgpu_ring_emit_wreg(ring, reg, v);
810 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
811 if (r)
812 goto failed_undo;
813
814 amdgpu_ring_commit(ring);
815 spin_unlock_irqrestore(&kiq->ring_lock, flags);
816
817 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
818
819 /* don't wait anymore for gpu reset case because this way may
820 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
821 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
822 * never return if we keep waiting in virt_kiq_rreg, which cause
823 * gpu_recover() hang there.
824 *
825 * also don't wait anymore for IRQ context
826 * */
827 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
828 goto failed_kiq_write;
829
830 might_sleep();
831 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
832
833 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
834 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
835 }
836
837 if (cnt > MAX_KIQ_REG_TRY)
838 goto failed_kiq_write;
839
840 return;
841
842 failed_undo:
843 amdgpu_ring_undo(ring);
844 spin_unlock_irqrestore(&kiq->ring_lock, flags);
845 failed_kiq_write:
846 dev_err(adev->dev, "failed to write reg:%x\n", reg);
847 }
848
amdgpu_gfx_get_num_kcq(struct amdgpu_device * adev)849 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
850 {
851 if (amdgpu_num_kcq == -1) {
852 return 8;
853 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
854 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
855 return 8;
856 }
857 return amdgpu_num_kcq;
858 }
859
860 /* amdgpu_gfx_state_change_set - Handle gfx power state change set
861 * @adev: amdgpu_device pointer
862 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
863 *
864 */
865
amdgpu_gfx_state_change_set(struct amdgpu_device * adev,enum gfx_change_state state)866 void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
867 {
868 mutex_lock(&adev->pm.mutex);
869 if (adev->powerplay.pp_funcs &&
870 adev->powerplay.pp_funcs->gfx_state_change_set)
871 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
872 (adev)->powerplay.pp_handle, state));
873 mutex_unlock(&adev->pm.mutex);
874 }
875