Lines Matching refs:node
483 static int kfd_gws_init(struct kfd_node *node) in kfd_gws_init() argument
486 struct kfd_dev *kfd = node->kfd; in kfd_gws_init()
487 uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; in kfd_gws_init()
489 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init()
492 if (hws_gws_support || (KFD_IS_SOC15(node) && in kfd_gws_init()
493 ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1) in kfd_gws_init()
495 (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0) in kfd_gws_init()
497 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1) in kfd_gws_init()
499 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) in kfd_gws_init()
501 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) || in kfd_gws_init()
502 (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) in kfd_gws_init()
503 && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) in kfd_gws_init()
505 (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0) in kfd_gws_init()
506 && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0) in kfd_gws_init()
508 ret = amdgpu_amdkfd_alloc_gws(node->adev, in kfd_gws_init()
509 node->adev->gds.gws_size, &node->gws); in kfd_gws_init()
520 static int kfd_init_node(struct kfd_node *node) in kfd_init_node() argument
524 if (kfd_interrupt_init(node)) { in kfd_init_node()
529 node->dqm = device_queue_manager_init(node); in kfd_init_node()
530 if (!node->dqm) { in kfd_init_node()
535 if (kfd_gws_init(node)) { in kfd_init_node()
537 node->adev->gds.gws_size); in kfd_init_node()
541 if (kfd_resume(node)) in kfd_init_node()
544 if (kfd_topology_add_device(node)) { in kfd_init_node()
549 kfd_smi_init(node); in kfd_init_node()
556 device_queue_manager_uninit(node->dqm); in kfd_init_node()
558 kfd_interrupt_exit(node); in kfd_init_node()
560 if (node->gws) in kfd_init_node()
561 amdgpu_amdkfd_free_gws(node->adev, node->gws); in kfd_init_node()
564 kfree(node); in kfd_init_node()
585 static void kfd_setup_interrupt_bitmap(struct kfd_node *node, in kfd_setup_interrupt_bitmap() argument
588 struct amdgpu_device *adev = node->adev; in kfd_setup_interrupt_bitmap()
589 uint32_t xcc_mask = node->xcc_mask; in kfd_setup_interrupt_bitmap()
614 node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2)); in kfd_setup_interrupt_bitmap()
617 node->interrupt_bitmap); in kfd_setup_interrupt_bitmap()
624 struct kfd_node *node; in kgd2kfd_device_init() local
756 node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); in kgd2kfd_device_init()
757 if (!node) in kgd2kfd_device_init()
760 node->node_id = i; in kgd2kfd_device_init()
761 node->adev = kfd->adev; in kgd2kfd_device_init()
762 node->kfd = kfd; in kgd2kfd_device_init()
763 node->kfd2kgd = kfd->kfd2kgd; in kgd2kfd_device_init()
764 node->vm_info.vmid_num_kfd = vmid_num_kfd; in kgd2kfd_device_init()
765 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); in kgd2kfd_device_init()
767 if (node->xcp) { in kgd2kfd_device_init()
768 amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, in kgd2kfd_device_init()
769 &node->xcc_mask); in kgd2kfd_device_init()
772 node->xcc_mask = in kgd2kfd_device_init()
776 if (node->xcp) { in kgd2kfd_device_init()
778 node->node_id, node->xcp->mem_id, in kgd2kfd_device_init()
779 KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20); in kgd2kfd_device_init()
789 node->vm_info.first_vmid_kfd = (i%2 == 0) ? in kgd2kfd_device_init()
792 node->vm_info.last_vmid_kfd = (i%2 == 0) ? in kgd2kfd_device_init()
795 node->compute_vmid_bitmap = in kgd2kfd_device_init()
796 ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) - in kgd2kfd_device_init()
797 ((0x1 << (node->vm_info.first_vmid_kfd)) - 1); in kgd2kfd_device_init()
799 node->vm_info.first_vmid_kfd = first_vmid_kfd; in kgd2kfd_device_init()
800 node->vm_info.last_vmid_kfd = last_vmid_kfd; in kgd2kfd_device_init()
801 node->compute_vmid_bitmap = in kgd2kfd_device_init()
804 node->max_proc_per_quantum = max_proc_per_quantum; in kgd2kfd_device_init()
805 atomic_set(&node->sram_ecc_flag, 0); in kgd2kfd_device_init()
808 &node->local_mem_info, node->xcp); in kgd2kfd_device_init()
811 kfd_setup_interrupt_bitmap(node, i); in kgd2kfd_device_init()
814 if (kfd_init_node(node)) { in kgd2kfd_device_init()
818 kfd->nodes[i] = node; in kgd2kfd_device_init()
830 node->dqm->sched_policy); in kgd2kfd_device_init()
867 struct kfd_node *node; in kgd2kfd_pre_reset() local
874 node = kfd->nodes[i]; in kgd2kfd_pre_reset()
875 kfd_smi_event_update_gpu_reset(node, false); in kgd2kfd_pre_reset()
876 node->dqm->ops.pre_reset(node->dqm); in kgd2kfd_pre_reset()
896 struct kfd_node *node; in kgd2kfd_post_reset() local
913 node = kfd->nodes[i]; in kgd2kfd_post_reset()
914 atomic_set(&node->sram_ecc_flag, 0); in kgd2kfd_post_reset()
915 kfd_smi_event_update_gpu_reset(node, true); in kgd2kfd_post_reset()
929 struct kfd_node *node; in kgd2kfd_suspend() local
945 node = kfd->nodes[i]; in kgd2kfd_suspend()
946 node->dqm->ops.stop(node->dqm); in kgd2kfd_suspend()
975 static int kfd_resume(struct kfd_node *node) in kfd_resume() argument
979 err = node->dqm->ops.start(node->dqm); in kfd_resume()
983 node->adev->pdev->vendor, node->adev->pdev->device); in kfd_resume()
1009 struct kfd_node *node; in kgd2kfd_interrupt() local
1020 node = kfd->nodes[i]; in kgd2kfd_interrupt()
1021 spin_lock_irqsave(&node->interrupt_lock, flags); in kgd2kfd_interrupt()
1023 if (node->interrupts_active in kgd2kfd_interrupt()
1024 && interrupt_is_wanted(node, ih_ring_entry, in kgd2kfd_interrupt()
1026 && enqueue_ih_ring_entry(node, in kgd2kfd_interrupt()
1028 kfd_queue_work(node->ih_wq, &node->interrupt_work); in kgd2kfd_interrupt()
1029 spin_unlock_irqrestore(&node->interrupt_lock, flags); in kgd2kfd_interrupt()
1032 spin_unlock_irqrestore(&node->interrupt_lock, flags); in kgd2kfd_interrupt()
1172 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, in kfd_gtt_sa_allocate() argument
1176 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_allocate()
1276 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) in kfd_gtt_sa_free() argument
1278 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_free()
1310 void kfd_inc_compute_active(struct kfd_node *node) in kfd_inc_compute_active() argument
1312 if (atomic_inc_return(&node->kfd->compute_profile) == 1) in kfd_inc_compute_active()
1313 amdgpu_amdkfd_set_compute_idle(node->adev, false); in kfd_inc_compute_active()
1316 void kfd_dec_compute_active(struct kfd_node *node) in kfd_dec_compute_active() argument
1318 int count = atomic_dec_return(&node->kfd->compute_profile); in kfd_dec_compute_active()
1321 amdgpu_amdkfd_set_compute_idle(node->adev, true); in kfd_dec_compute_active()
1342 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) in kfd_get_num_sdma_engines() argument
1345 if (!node->adev->gmc.xgmi.supported) in kfd_get_num_sdma_engines()
1346 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; in kfd_get_num_sdma_engines()
1348 return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); in kfd_get_num_sdma_engines()
1351 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) in kfd_get_num_xgmi_sdma_engines() argument
1354 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - in kfd_get_num_xgmi_sdma_engines()
1355 kfd_get_num_sdma_engines(node); in kfd_get_num_xgmi_sdma_engines()