• Home
  • Raw
  • Download

Lines Matching refs:adev

40 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,  in amdgpu_gfx_mec_queue_to_bit()  argument
45 bit += mec * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_mec_queue_to_bit()
46 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
47 bit += pipe * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_mec_queue_to_bit()
53 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, in amdgpu_queue_mask_bit_to_mec_queue() argument
56 *queue = bit % adev->gfx.mec.num_queue_per_pipe; in amdgpu_queue_mask_bit_to_mec_queue()
57 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
58 % adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
59 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) in amdgpu_queue_mask_bit_to_mec_queue()
60 / adev->gfx.mec.num_pipe_per_mec; in amdgpu_queue_mask_bit_to_mec_queue()
64 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, in amdgpu_gfx_is_mec_queue_enabled() argument
67 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), in amdgpu_gfx_is_mec_queue_enabled()
68 adev->gfx.mec.queue_bitmap); in amdgpu_gfx_is_mec_queue_enabled()
71 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, in amdgpu_gfx_me_queue_to_bit() argument
76 bit += me * adev->gfx.me.num_pipe_per_me in amdgpu_gfx_me_queue_to_bit()
77 * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
78 bit += pipe * adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_me_queue_to_bit()
84 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, in amdgpu_gfx_bit_to_me_queue() argument
87 *queue = bit % adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_bit_to_me_queue()
88 *pipe = (bit / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_bit_to_me_queue()
89 % adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_bit_to_me_queue()
90 *me = (bit / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_bit_to_me_queue()
91 / adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_bit_to_me_queue()
94 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, in amdgpu_gfx_is_me_queue_enabled() argument
97 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), in amdgpu_gfx_is_me_queue_enabled()
98 adev->gfx.me.queue_bitmap); in amdgpu_gfx_is_me_queue_enabled()
110 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) in amdgpu_gfx_scratch_get() argument
114 i = ffs(adev->gfx.scratch.free_mask); in amdgpu_gfx_scratch_get()
115 if (i != 0 && i <= adev->gfx.scratch.num_reg) { in amdgpu_gfx_scratch_get()
117 adev->gfx.scratch.free_mask &= ~(1u << i); in amdgpu_gfx_scratch_get()
118 *reg = adev->gfx.scratch.reg_base + i; in amdgpu_gfx_scratch_get()
132 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) in amdgpu_gfx_scratch_free() argument
134 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); in amdgpu_gfx_scratch_free()
181 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) in amdgpu_gfx_is_multipipe_capable() argument
191 if (adev->asic_type == CHIP_POLARIS11) in amdgpu_gfx_is_multipipe_capable()
194 return adev->gfx.mec.num_mec > 1; in amdgpu_gfx_is_multipipe_capable()
197 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, in amdgpu_gfx_is_high_priority_compute_queue() argument
203 if (adev->gfx.num_compute_rings > 1 && in amdgpu_gfx_is_high_priority_compute_queue()
204 ring == &adev->gfx.compute_ring[0]) in amdgpu_gfx_is_high_priority_compute_queue()
210 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) in amdgpu_gfx_compute_queue_acquire() argument
213 bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); in amdgpu_gfx_compute_queue_acquire()
214 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * in amdgpu_gfx_compute_queue_acquire()
215 adev->gfx.mec.num_queue_per_pipe, in amdgpu_gfx_compute_queue_acquire()
216 adev->gfx.num_compute_rings); in amdgpu_gfx_compute_queue_acquire()
221 pipe = i % adev->gfx.mec.num_pipe_per_mec; in amdgpu_gfx_compute_queue_acquire()
222 queue = (i / adev->gfx.mec.num_pipe_per_mec) % in amdgpu_gfx_compute_queue_acquire()
223 adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_compute_queue_acquire()
225 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, in amdgpu_gfx_compute_queue_acquire()
226 adev->gfx.mec.queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
231 set_bit(i, adev->gfx.mec.queue_bitmap); in amdgpu_gfx_compute_queue_acquire()
234 …dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGP… in amdgpu_gfx_compute_queue_acquire()
237 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) in amdgpu_gfx_graphics_queue_acquire() argument
242 queue = i % adev->gfx.me.num_queue_per_pipe; in amdgpu_gfx_graphics_queue_acquire()
243 me = (i / adev->gfx.me.num_queue_per_pipe) in amdgpu_gfx_graphics_queue_acquire()
244 / adev->gfx.me.num_pipe_per_me; in amdgpu_gfx_graphics_queue_acquire()
246 if (me >= adev->gfx.me.num_me) in amdgpu_gfx_graphics_queue_acquire()
251 set_bit(i, adev->gfx.me.queue_bitmap); in amdgpu_gfx_graphics_queue_acquire()
255 adev->gfx.num_gfx_rings = in amdgpu_gfx_graphics_queue_acquire()
256 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); in amdgpu_gfx_graphics_queue_acquire()
259 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, in amdgpu_gfx_kiq_acquire() argument
265 queue_bit = adev->gfx.mec.num_mec in amdgpu_gfx_kiq_acquire()
266 * adev->gfx.mec.num_pipe_per_mec in amdgpu_gfx_kiq_acquire()
267 * adev->gfx.mec.num_queue_per_pipe; in amdgpu_gfx_kiq_acquire()
270 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) in amdgpu_gfx_kiq_acquire()
273 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); in amdgpu_gfx_kiq_acquire()
290 dev_err(adev->dev, "Failed to find a queue for KIQ\n"); in amdgpu_gfx_kiq_acquire()
294 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, in amdgpu_gfx_kiq_init_ring() argument
298 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_init_ring()
303 ring->adev = NULL; in amdgpu_gfx_kiq_init_ring()
306 ring->doorbell_index = adev->doorbell_index.kiq; in amdgpu_gfx_kiq_init_ring()
308 r = amdgpu_gfx_kiq_acquire(adev, ring); in amdgpu_gfx_kiq_init_ring()
315 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, in amdgpu_gfx_kiq_init_ring()
318 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); in amdgpu_gfx_kiq_init_ring()
328 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) in amdgpu_gfx_kiq_fini() argument
330 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_fini()
335 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, in amdgpu_gfx_kiq_init() argument
340 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_kiq_init()
342 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, in amdgpu_gfx_kiq_init()
346 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); in amdgpu_gfx_kiq_init()
354 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); in amdgpu_gfx_kiq_init()
362 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, in amdgpu_gfx_mqd_sw_init() argument
369 ring = &adev->gfx.kiq.ring; in amdgpu_gfx_mqd_sw_init()
376 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, in amdgpu_gfx_mqd_sw_init()
380 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); in amdgpu_gfx_mqd_sw_init()
385 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
386 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) in amdgpu_gfx_mqd_sw_init()
387 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); in amdgpu_gfx_mqd_sw_init()
390 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { in amdgpu_gfx_mqd_sw_init()
392 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_init()
393 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_init()
395 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, in amdgpu_gfx_mqd_sw_init()
399 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); in amdgpu_gfx_mqd_sw_init()
404 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
405 if (!adev->gfx.me.mqd_backup[i]) in amdgpu_gfx_mqd_sw_init()
406 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); in amdgpu_gfx_mqd_sw_init()
412 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_init()
413 ring = &adev->gfx.compute_ring[i]; in amdgpu_gfx_mqd_sw_init()
415 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, in amdgpu_gfx_mqd_sw_init()
419 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); in amdgpu_gfx_mqd_sw_init()
424 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); in amdgpu_gfx_mqd_sw_init()
425 if (!adev->gfx.mec.mqd_backup[i]) in amdgpu_gfx_mqd_sw_init()
426 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); in amdgpu_gfx_mqd_sw_init()
433 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) in amdgpu_gfx_mqd_sw_fini() argument
438 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { in amdgpu_gfx_mqd_sw_fini()
439 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
440 ring = &adev->gfx.gfx_ring[i]; in amdgpu_gfx_mqd_sw_fini()
441 kfree(adev->gfx.me.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
448 for (i = 0; i < adev->gfx.num_compute_rings; i++) { in amdgpu_gfx_mqd_sw_fini()
449 ring = &adev->gfx.compute_ring[i]; in amdgpu_gfx_mqd_sw_fini()
450 kfree(adev->gfx.mec.mqd_backup[i]); in amdgpu_gfx_mqd_sw_fini()
456 ring = &adev->gfx.kiq.ring; in amdgpu_gfx_mqd_sw_fini()
457 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); in amdgpu_gfx_mqd_sw_fini()
463 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) in amdgpu_gfx_disable_kcq() argument
465 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_disable_kcq()
472 spin_lock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_disable_kcq()
474 adev->gfx.num_compute_rings)) { in amdgpu_gfx_disable_kcq()
475 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_disable_kcq()
479 for (i = 0; i < adev->gfx.num_compute_rings; i++) in amdgpu_gfx_disable_kcq()
480 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], in amdgpu_gfx_disable_kcq()
483 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_disable_kcq()
488 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, in amdgpu_queue_mask_bit_to_set_resource_bit() argument
494 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); in amdgpu_queue_mask_bit_to_set_resource_bit()
501 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) in amdgpu_gfx_enable_kcq() argument
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_gfx_enable_kcq()
504 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; in amdgpu_gfx_enable_kcq()
512 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) in amdgpu_gfx_enable_kcq()
523 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); in amdgpu_gfx_enable_kcq()
528 spin_lock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_enable_kcq()
530 adev->gfx.num_compute_rings + in amdgpu_gfx_enable_kcq()
534 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_enable_kcq()
539 for (i = 0; i < adev->gfx.num_compute_rings; i++) in amdgpu_gfx_enable_kcq()
540 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); in amdgpu_gfx_enable_kcq()
543 spin_unlock(&adev->gfx.kiq.ring_lock); in amdgpu_gfx_enable_kcq()
561 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) in amdgpu_gfx_off_ctrl() argument
565 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) in amdgpu_gfx_off_ctrl()
568 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
575 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) in amdgpu_gfx_off_ctrl()
578 adev->gfx.gfx_off_req_count--; in amdgpu_gfx_off_ctrl()
580 if (adev->gfx.gfx_off_req_count == 0 && in amdgpu_gfx_off_ctrl()
581 !adev->gfx.gfx_off_state) { in amdgpu_gfx_off_ctrl()
583 if (adev->in_s0ix) { in amdgpu_gfx_off_ctrl()
584 if (!amdgpu_dpm_set_powergating_by_smu(adev, in amdgpu_gfx_off_ctrl()
586 adev->gfx.gfx_off_state = true; in amdgpu_gfx_off_ctrl()
588 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, in amdgpu_gfx_off_ctrl()
593 if (adev->gfx.gfx_off_req_count == 0) { in amdgpu_gfx_off_ctrl()
594 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); in amdgpu_gfx_off_ctrl()
596 if (adev->gfx.gfx_off_state && in amdgpu_gfx_off_ctrl()
597 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { in amdgpu_gfx_off_ctrl()
598 adev->gfx.gfx_off_state = false; in amdgpu_gfx_off_ctrl()
600 if (adev->gfx.funcs->init_spm_golden) { in amdgpu_gfx_off_ctrl()
601 dev_dbg(adev->dev, in amdgpu_gfx_off_ctrl()
603 amdgpu_gfx_init_spm_golden(adev); in amdgpu_gfx_off_ctrl()
608 adev->gfx.gfx_off_req_count++; in amdgpu_gfx_off_ctrl()
612 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_gfx_off_ctrl()
615 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) in amdgpu_get_gfx_off_status() argument
620 mutex_lock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
622 r = smu_get_status_gfxoff(adev, value); in amdgpu_get_gfx_off_status()
624 mutex_unlock(&adev->gfx.gfx_off_mutex); in amdgpu_get_gfx_off_status()
629 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) in amdgpu_gfx_ras_late_init() argument
639 if (!adev->gfx.ras_if) { in amdgpu_gfx_ras_late_init()
640 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); in amdgpu_gfx_ras_late_init()
641 if (!adev->gfx.ras_if) in amdgpu_gfx_ras_late_init()
643 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; in amdgpu_gfx_ras_late_init()
644 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in amdgpu_gfx_ras_late_init()
645 adev->gfx.ras_if->sub_block_index = 0; in amdgpu_gfx_ras_late_init()
647 fs_info.head = ih_info.head = *adev->gfx.ras_if; in amdgpu_gfx_ras_late_init()
648 r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, in amdgpu_gfx_ras_late_init()
653 if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { in amdgpu_gfx_ras_late_init()
654 if (!amdgpu_persistent_edc_harvesting_supported(adev)) in amdgpu_gfx_ras_late_init()
655 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); in amdgpu_gfx_ras_late_init()
657 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); in amdgpu_gfx_ras_late_init()
668 amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); in amdgpu_gfx_ras_late_init()
670 kfree(adev->gfx.ras_if); in amdgpu_gfx_ras_late_init()
671 adev->gfx.ras_if = NULL; in amdgpu_gfx_ras_late_init()
675 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) in amdgpu_gfx_ras_fini() argument
677 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && in amdgpu_gfx_ras_fini()
678 adev->gfx.ras_if) { in amdgpu_gfx_ras_fini()
679 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_ras_fini()
685 amdgpu_ras_late_fini(adev, ras_if, &ih_info); in amdgpu_gfx_ras_fini()
690 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, in amdgpu_gfx_process_ras_data_cb() argument
700 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_gfx_process_ras_data_cb()
701 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_gfx_process_ras_data_cb()
702 if (adev->gfx.ras_funcs && in amdgpu_gfx_process_ras_data_cb()
703 adev->gfx.ras_funcs->query_ras_error_count) in amdgpu_gfx_process_ras_data_cb()
704 adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); in amdgpu_gfx_process_ras_data_cb()
705 amdgpu_ras_reset_gpu(adev); in amdgpu_gfx_process_ras_data_cb()
710 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, in amdgpu_gfx_cp_ecc_error_irq() argument
714 struct ras_common_if *ras_if = adev->gfx.ras_if; in amdgpu_gfx_cp_ecc_error_irq()
725 amdgpu_ras_interrupt_dispatch(adev, &ih_data); in amdgpu_gfx_cp_ecc_error_irq()
729 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) in amdgpu_kiq_rreg() argument
734 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_kiq_rreg()
737 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_kiq_rreg()
743 if (amdgpu_device_wb_get(adev, &reg_val_offs)) { in amdgpu_kiq_rreg()
766 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) in amdgpu_kiq_rreg()
779 value = adev->wb.wb[reg_val_offs]; in amdgpu_kiq_rreg()
780 amdgpu_device_wb_free(adev, reg_val_offs); in amdgpu_kiq_rreg()
789 amdgpu_device_wb_free(adev, reg_val_offs); in amdgpu_kiq_rreg()
790 dev_err(adev->dev, "failed to read reg:%x\n", reg); in amdgpu_kiq_rreg()
794 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) in amdgpu_kiq_wreg() argument
799 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in amdgpu_kiq_wreg()
804 if (amdgpu_device_skip_hw_access(adev)) in amdgpu_kiq_wreg()
827 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) in amdgpu_kiq_wreg()
846 dev_err(adev->dev, "failed to write reg:%x\n", reg); in amdgpu_kiq_wreg()
849 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) in amdgpu_gfx_get_num_kcq() argument
854 …dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by use… in amdgpu_gfx_get_num_kcq()
866 void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state) in amdgpu_gfx_state_change_set() argument
868 mutex_lock(&adev->pm.mutex); in amdgpu_gfx_state_change_set()
869 if (adev->powerplay.pp_funcs && in amdgpu_gfx_state_change_set()
870 adev->powerplay.pp_funcs->gfx_state_change_set) in amdgpu_gfx_state_change_set()
871 ((adev)->powerplay.pp_funcs->gfx_state_change_set( in amdgpu_gfx_state_change_set()
872 (adev)->powerplay.pp_handle, state)); in amdgpu_gfx_state_change_set()
873 mutex_unlock(&adev->pm.mutex); in amdgpu_gfx_state_change_set()