• Home
  • Raw
  • Download

Lines Matching refs:adev

51 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,  in gmc_v11_0_ecc_interrupt_state()  argument
60 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v11_0_vm_fault_interrupt_state() argument
67 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); in gmc_v11_0_vm_fault_interrupt_state()
74 if (!adev->in_s0ix) in gmc_v11_0_vm_fault_interrupt_state()
75 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); in gmc_v11_0_vm_fault_interrupt_state()
79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); in gmc_v11_0_vm_fault_interrupt_state()
86 if (!adev->in_s0ix) in gmc_v11_0_vm_fault_interrupt_state()
87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); in gmc_v11_0_vm_fault_interrupt_state()
96 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, in gmc_v11_0_process_interrupt() argument
102 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; in gmc_v11_0_process_interrupt()
109 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_process_interrupt()
126 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v11_0_process_interrupt()
128 dev_err(adev->dev, in gmc_v11_0_process_interrupt()
134 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", in gmc_v11_0_process_interrupt()
136 if (!amdgpu_sriov_vf(adev)) in gmc_v11_0_process_interrupt()
137 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); in gmc_v11_0_process_interrupt()
153 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_irq_funcs() argument
155 adev->gmc.vm_fault.num_types = 1; in gmc_v11_0_set_irq_funcs()
156 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; in gmc_v11_0_set_irq_funcs()
158 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_set_irq_funcs()
159 adev->gmc.ecc_irq.num_types = 1; in gmc_v11_0_set_irq_funcs()
160 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs; in gmc_v11_0_set_irq_funcs()
171 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v11_0_use_invalidate_semaphore() argument
175 (!amdgpu_sriov_vf(adev))); in gmc_v11_0_use_invalidate_semaphore()
179 struct amdgpu_device *adev, in gmc_v11_0_get_vmid_pasid_mapping_info() argument
194 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, in gmc_v11_0_flush_vm_hub() argument
197 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); in gmc_v11_0_flush_vm_hub()
198 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v11_0_flush_vm_hub()
209 spin_lock(&adev->gmc.invalidate_lock); in gmc_v11_0_flush_vm_hub()
219 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v11_0_flush_vm_hub()
228 if (i >= adev->usec_timeout) in gmc_v11_0_flush_vm_hub()
235 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v11_0_flush_vm_hub()
257 !amdgpu_sriov_vf(adev)) { in gmc_v11_0_flush_vm_hub()
267 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v11_0_flush_vm_hub()
269 if (i < adev->usec_timeout) in gmc_v11_0_flush_vm_hub()
285 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v11_0_flush_gpu_tlb() argument
288 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) in gmc_v11_0_flush_gpu_tlb()
292 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v11_0_flush_gpu_tlb()
297 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && in gmc_v11_0_flush_gpu_tlb()
298 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { in gmc_v11_0_flush_gpu_tlb()
299 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v11_0_flush_gpu_tlb()
305 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v11_0_flush_gpu_tlb()
310 mutex_lock(&adev->mman.gtt_window_lock); in gmc_v11_0_flush_gpu_tlb()
311 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); in gmc_v11_0_flush_gpu_tlb()
312 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v11_0_flush_gpu_tlb()
326 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v11_0_flush_gpu_tlb_pasid() argument
335 struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; in gmc_v11_0_flush_gpu_tlb_pasid()
336 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; in gmc_v11_0_flush_gpu_tlb_pasid()
339 spin_lock(&adev->gfx.kiq[0].ring_lock); in gmc_v11_0_flush_gpu_tlb_pasid()
347 spin_unlock(&adev->gfx.kiq[0].ring_lock); in gmc_v11_0_flush_gpu_tlb_pasid()
352 spin_unlock(&adev->gfx.kiq[0].ring_lock); in gmc_v11_0_flush_gpu_tlb_pasid()
353 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); in gmc_v11_0_flush_gpu_tlb_pasid()
355 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); in gmc_v11_0_flush_gpu_tlb_pasid()
364 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, in gmc_v11_0_flush_gpu_tlb_pasid()
368 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) in gmc_v11_0_flush_gpu_tlb_pasid()
369 gmc_v11_0_flush_gpu_tlb(adev, vmid, in gmc_v11_0_flush_gpu_tlb_pasid()
372 gmc_v11_0_flush_gpu_tlb(adev, vmid, in gmc_v11_0_flush_gpu_tlb_pasid()
384 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); in gmc_v11_0_emit_flush_gpu_tlb()
385 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; in gmc_v11_0_emit_flush_gpu_tlb()
432 struct amdgpu_device *adev = ring->adev; in gmc_v11_0_emit_pasid_mapping() local
479 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v11_0_map_mtype() argument
497 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v11_0_get_vm_pde() argument
501 *addr = adev->vm_manager.vram_base_offset + *addr - in gmc_v11_0_get_vm_pde()
502 adev->gmc.vram_start; in gmc_v11_0_get_vm_pde()
505 if (!adev->gmc.translate_further) in gmc_v11_0_get_vm_pde()
521 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v11_0_get_vm_pte() argument
550 static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v11_0_get_vbios_fb_size() argument
583 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_gmc_funcs() argument
585 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs; in gmc_v11_0_set_gmc_funcs()
588 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_umc_funcs() argument
590 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v11_0_set_umc_funcs()
592 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; in gmc_v11_0_set_umc_funcs()
593 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; in gmc_v11_0_set_umc_funcs()
594 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); in gmc_v11_0_set_umc_funcs()
595 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; in gmc_v11_0_set_umc_funcs()
596 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; in gmc_v11_0_set_umc_funcs()
597 if (adev->umc.node_inst_num == 4) in gmc_v11_0_set_umc_funcs()
598 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0]; in gmc_v11_0_set_umc_funcs()
600 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0]; in gmc_v11_0_set_umc_funcs()
601 adev->umc.ras = &umc_v8_10_ras; in gmc_v11_0_set_umc_funcs()
611 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_mmhub_funcs() argument
613 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v11_0_set_mmhub_funcs()
615 adev->mmhub.funcs = &mmhub_v3_0_1_funcs; in gmc_v11_0_set_mmhub_funcs()
618 adev->mmhub.funcs = &mmhub_v3_0_2_funcs; in gmc_v11_0_set_mmhub_funcs()
621 adev->mmhub.funcs = &mmhub_v3_0_funcs; in gmc_v11_0_set_mmhub_funcs()
626 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_gfxhub_funcs() argument
628 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v11_0_set_gfxhub_funcs()
630 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; in gmc_v11_0_set_gfxhub_funcs()
633 adev->gfxhub.funcs = &gfxhub_v3_0_funcs; in gmc_v11_0_set_gfxhub_funcs()
640 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_early_init() local
642 gmc_v11_0_set_gfxhub_funcs(adev); in gmc_v11_0_early_init()
643 gmc_v11_0_set_mmhub_funcs(adev); in gmc_v11_0_early_init()
644 gmc_v11_0_set_gmc_funcs(adev); in gmc_v11_0_early_init()
645 gmc_v11_0_set_irq_funcs(adev); in gmc_v11_0_early_init()
646 gmc_v11_0_set_umc_funcs(adev); in gmc_v11_0_early_init()
648 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v11_0_early_init()
649 adev->gmc.shared_aperture_end = in gmc_v11_0_early_init()
650 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v11_0_early_init()
651 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v11_0_early_init()
652 adev->gmc.private_aperture_end = in gmc_v11_0_early_init()
653 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v11_0_early_init()
654 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; in gmc_v11_0_early_init()
661 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_late_init() local
664 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v11_0_late_init()
668 r = amdgpu_gmc_ras_late_init(adev); in gmc_v11_0_late_init()
672 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v11_0_late_init()
675 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v11_0_vram_gtt_location() argument
680 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v11_0_vram_gtt_location()
682 amdgpu_gmc_vram_location(adev, &adev->gmc, base); in gmc_v11_0_vram_gtt_location()
683 amdgpu_gmc_gart_location(adev, mc); in gmc_v11_0_vram_gtt_location()
684 amdgpu_gmc_agp_location(adev, mc); in gmc_v11_0_vram_gtt_location()
687 if (amdgpu_sriov_vf(adev)) in gmc_v11_0_vram_gtt_location()
688 adev->vm_manager.vram_base_offset = 0; in gmc_v11_0_vram_gtt_location()
690 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); in gmc_v11_0_vram_gtt_location()
702 static int gmc_v11_0_mc_init(struct amdgpu_device *adev) in gmc_v11_0_mc_init() argument
707 adev->gmc.mc_vram_size = in gmc_v11_0_mc_init()
708 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v11_0_mc_init()
709 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v11_0_mc_init()
711 if (!(adev->flags & AMD_IS_APU)) { in gmc_v11_0_mc_init()
712 r = amdgpu_device_resize_fb_bar(adev); in gmc_v11_0_mc_init()
716 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v11_0_mc_init()
717 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v11_0_mc_init()
720 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { in gmc_v11_0_mc_init()
721 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); in gmc_v11_0_mc_init()
722 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v11_0_mc_init()
726 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v11_0_mc_init()
727 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) in gmc_v11_0_mc_init()
728 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; in gmc_v11_0_mc_init()
732 adev->gmc.gart_size = 512ULL << 20; in gmc_v11_0_mc_init()
734 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v11_0_mc_init()
736 gmc_v11_0_vram_gtt_location(adev, &adev->gmc); in gmc_v11_0_mc_init()
741 static int gmc_v11_0_gart_init(struct amdgpu_device *adev) in gmc_v11_0_gart_init() argument
745 if (adev->gart.bo) { in gmc_v11_0_gart_init()
751 r = amdgpu_gart_init(adev); in gmc_v11_0_gart_init()
755 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v11_0_gart_init()
756 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | in gmc_v11_0_gart_init()
759 return amdgpu_gart_table_vram_alloc(adev); in gmc_v11_0_gart_init()
765 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_sw_init() local
767 adev->mmhub.funcs->init(adev); in gmc_v11_0_sw_init()
769 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v11_0_sw_init()
771 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v11_0_sw_init()
773 adev->gmc.vram_width = vram_width; in gmc_v11_0_sw_init()
775 adev->gmc.vram_type = vram_type; in gmc_v11_0_sw_init()
776 adev->gmc.vram_vendor = vram_vendor; in gmc_v11_0_sw_init()
778 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v11_0_sw_init()
784 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v11_0_sw_init()
785 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v11_0_sw_init()
791 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v11_0_sw_init()
798 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, in gmc_v11_0_sw_init()
800 &adev->gmc.vm_fault); in gmc_v11_0_sw_init()
805 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, in gmc_v11_0_sw_init()
807 &adev->gmc.vm_fault); in gmc_v11_0_sw_init()
811 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_sw_init()
813 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, in gmc_v11_0_sw_init()
814 &adev->gmc.ecc_irq); in gmc_v11_0_sw_init()
823 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v11_0_sw_init()
825 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v11_0_sw_init()
827 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); in gmc_v11_0_sw_init()
831 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v11_0_sw_init()
833 r = gmc_v11_0_mc_init(adev); in gmc_v11_0_sw_init()
837 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v11_0_sw_init()
840 r = amdgpu_bo_init(adev); in gmc_v11_0_sw_init()
844 r = gmc_v11_0_gart_init(adev); in gmc_v11_0_sw_init()
854 adev->vm_manager.first_kfd_vmid = 8; in gmc_v11_0_sw_init()
856 amdgpu_vm_manager_init(adev); in gmc_v11_0_sw_init()
858 r = amdgpu_gmc_ras_sw_init(adev); in gmc_v11_0_sw_init()
872 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) in gmc_v11_0_gart_fini() argument
874 amdgpu_gart_table_vram_free(adev); in gmc_v11_0_gart_fini()
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_sw_fini() local
881 amdgpu_vm_manager_fini(adev); in gmc_v11_0_sw_fini()
882 gmc_v11_0_gart_fini(adev); in gmc_v11_0_sw_fini()
883 amdgpu_gem_force_release(adev); in gmc_v11_0_sw_fini()
884 amdgpu_bo_fini(adev); in gmc_v11_0_sw_fini()
889 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v11_0_init_golden_registers() argument
891 if (amdgpu_sriov_vf(adev)) { in gmc_v11_0_init_golden_registers()
892 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; in gmc_v11_0_init_golden_registers()
904 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) in gmc_v11_0_gart_enable() argument
909 if (adev->gart.bo == NULL) { in gmc_v11_0_gart_enable()
910 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v11_0_gart_enable()
914 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v11_0_gart_enable()
916 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v11_0_gart_enable()
921 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v11_0_gart_enable()
926 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v11_0_gart_enable()
927 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); in gmc_v11_0_gart_enable()
930 (unsigned int)(adev->gmc.gart_size >> 20), in gmc_v11_0_gart_enable()
931 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v11_0_gart_enable()
939 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_hw_init() local
942 gmc_v11_0_init_golden_registers(adev); in gmc_v11_0_hw_init()
944 r = gmc_v11_0_gart_enable(adev); in gmc_v11_0_hw_init()
948 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v11_0_hw_init()
949 adev->umc.funcs->init_registers(adev); in gmc_v11_0_hw_init()
961 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) in gmc_v11_0_gart_disable() argument
963 adev->mmhub.funcs->gart_disable(adev); in gmc_v11_0_gart_disable()
968 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_hw_fini() local
970 if (amdgpu_sriov_vf(adev)) { in gmc_v11_0_hw_fini()
976 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v11_0_hw_fini()
978 if (adev->gmc.ecc_irq.funcs && in gmc_v11_0_hw_fini()
979 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) in gmc_v11_0_hw_fini()
980 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v11_0_hw_fini()
982 gmc_v11_0_gart_disable(adev); in gmc_v11_0_hw_fini()
989 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_suspend() local
991 gmc_v11_0_hw_fini(adev); in gmc_v11_0_suspend()
999 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_resume() local
1001 r = gmc_v11_0_hw_init(adev); in gmc_v11_0_resume()
1005 amdgpu_vmid_reset_all(adev); in gmc_v11_0_resume()
1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_set_clockgating_state() local
1033 r = adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v11_0_set_clockgating_state()
1037 return athub_v3_0_set_clockgating(adev, state); in gmc_v11_0_set_clockgating_state()
1042 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_get_clockgating_state() local
1044 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v11_0_get_clockgating_state()
1046 athub_v3_0_get_clockgating(adev, flags); in gmc_v11_0_get_clockgating_state()