• Home
  • Raw
  • Download

Lines Matching refs:node

550 static int kfd_gws_init(struct kfd_node *node)  in kfd_gws_init()  argument
553 struct kfd_dev *kfd = node->kfd; in kfd_gws_init()
554 uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; in kfd_gws_init()
556 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init()
559 if (hws_gws_support || (KFD_IS_SOC15(node) && in kfd_gws_init()
560 ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1) in kfd_gws_init()
562 (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0) in kfd_gws_init()
564 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1) in kfd_gws_init()
566 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) in kfd_gws_init()
568 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) || in kfd_gws_init()
569 KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) || in kfd_gws_init()
570 (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) in kfd_gws_init()
571 && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) in kfd_gws_init()
573 (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0) in kfd_gws_init()
574 && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0) in kfd_gws_init()
576 ret = amdgpu_amdkfd_alloc_gws(node->adev, in kfd_gws_init()
577 node->adev->gds.gws_size, &node->gws); in kfd_gws_init()
588 static int kfd_init_node(struct kfd_node *node) in kfd_init_node() argument
592 if (kfd_interrupt_init(node)) { in kfd_init_node()
597 node->dqm = device_queue_manager_init(node); in kfd_init_node()
598 if (!node->dqm) { in kfd_init_node()
603 if (kfd_gws_init(node)) { in kfd_init_node()
605 node->adev->gds.gws_size); in kfd_init_node()
609 if (kfd_resume(node)) in kfd_init_node()
612 if (kfd_topology_add_device(node)) { in kfd_init_node()
617 kfd_smi_init(node); in kfd_init_node()
624 device_queue_manager_uninit(node->dqm); in kfd_init_node()
626 kfd_interrupt_exit(node); in kfd_init_node()
628 if (node->gws) in kfd_init_node()
629 amdgpu_amdkfd_free_gws(node->adev, node->gws); in kfd_init_node()
632 kfree(node); in kfd_init_node()
661 static void kfd_setup_interrupt_bitmap(struct kfd_node *node, in kfd_setup_interrupt_bitmap() argument
664 struct amdgpu_device *adev = node->adev; in kfd_setup_interrupt_bitmap()
665 uint32_t xcc_mask = node->xcc_mask; in kfd_setup_interrupt_bitmap()
690 node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2)); in kfd_setup_interrupt_bitmap()
693 node->interrupt_bitmap); in kfd_setup_interrupt_bitmap()
700 struct kfd_node *node; in kgd2kfd_device_init() local
835 node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); in kgd2kfd_device_init()
836 if (!node) in kgd2kfd_device_init()
839 node->node_id = i; in kgd2kfd_device_init()
840 node->adev = kfd->adev; in kgd2kfd_device_init()
841 node->kfd = kfd; in kgd2kfd_device_init()
842 node->kfd2kgd = kfd->kfd2kgd; in kgd2kfd_device_init()
843 node->vm_info.vmid_num_kfd = vmid_num_kfd; in kgd2kfd_device_init()
844 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); in kgd2kfd_device_init()
846 if (node->xcp) { in kgd2kfd_device_init()
847 amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, in kgd2kfd_device_init()
848 &node->xcc_mask); in kgd2kfd_device_init()
851 node->xcc_mask = in kgd2kfd_device_init()
855 if (node->xcp) { in kgd2kfd_device_init()
857 node->node_id, node->xcp->mem_id, in kgd2kfd_device_init()
858 KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20); in kgd2kfd_device_init()
869 node->vm_info.first_vmid_kfd = (i%2 == 0) ? in kgd2kfd_device_init()
872 node->vm_info.last_vmid_kfd = (i%2 == 0) ? in kgd2kfd_device_init()
875 node->compute_vmid_bitmap = in kgd2kfd_device_init()
876 ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) - in kgd2kfd_device_init()
877 ((0x1 << (node->vm_info.first_vmid_kfd)) - 1); in kgd2kfd_device_init()
879 node->vm_info.first_vmid_kfd = first_vmid_kfd; in kgd2kfd_device_init()
880 node->vm_info.last_vmid_kfd = last_vmid_kfd; in kgd2kfd_device_init()
881 node->compute_vmid_bitmap = in kgd2kfd_device_init()
884 node->max_proc_per_quantum = max_proc_per_quantum; in kgd2kfd_device_init()
885 atomic_set(&node->sram_ecc_flag, 0); in kgd2kfd_device_init()
888 &node->local_mem_info, node->xcp); in kgd2kfd_device_init()
892 kfd_setup_interrupt_bitmap(node, i); in kgd2kfd_device_init()
895 if (kfd_init_node(node)) { in kgd2kfd_device_init()
900 spin_lock_init(&node->watch_points_lock); in kgd2kfd_device_init()
902 kfd->nodes[i] = node; in kgd2kfd_device_init()
912 node->dqm->sched_policy); in kgd2kfd_device_init()
950 struct kfd_node *node; in kgd2kfd_pre_reset() local
957 node = kfd->nodes[i]; in kgd2kfd_pre_reset()
958 kfd_smi_event_update_gpu_reset(node, false, reset_context); in kgd2kfd_pre_reset()
978 struct kfd_node *node; in kgd2kfd_post_reset() local
995 node = kfd->nodes[i]; in kgd2kfd_post_reset()
996 atomic_set(&node->sram_ecc_flag, 0); in kgd2kfd_post_reset()
997 kfd_smi_event_update_gpu_reset(node, true, NULL); in kgd2kfd_post_reset()
1011 struct kfd_node *node; in kgd2kfd_suspend() local
1027 node = kfd->nodes[i]; in kgd2kfd_suspend()
1028 node->dqm->ops.stop(node->dqm); in kgd2kfd_suspend()
1057 static int kfd_resume(struct kfd_node *node) in kfd_resume() argument
1061 err = node->dqm->ops.start(node->dqm); in kfd_resume()
1065 node->adev->pdev->vendor, node->adev->pdev->device); in kfd_resume()
1076 struct kfd_node *node; in kgd2kfd_interrupt() local
1087 node = kfd->nodes[i]; in kgd2kfd_interrupt()
1088 spin_lock_irqsave(&node->interrupt_lock, flags); in kgd2kfd_interrupt()
1090 if (node->interrupts_active in kgd2kfd_interrupt()
1091 && interrupt_is_wanted(node, ih_ring_entry, in kgd2kfd_interrupt()
1093 && enqueue_ih_ring_entry(node, in kgd2kfd_interrupt()
1095 queue_work(node->kfd->ih_wq, &node->interrupt_work); in kgd2kfd_interrupt()
1096 spin_unlock_irqrestore(&node->interrupt_lock, flags); in kgd2kfd_interrupt()
1099 spin_unlock_irqrestore(&node->interrupt_lock, flags); in kgd2kfd_interrupt()
1239 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, in kfd_gtt_sa_allocate() argument
1243 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_allocate()
1343 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) in kfd_gtt_sa_free() argument
1345 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_free()
1377 void kfd_inc_compute_active(struct kfd_node *node) in kfd_inc_compute_active() argument
1379 if (atomic_inc_return(&node->kfd->compute_profile) == 1) in kfd_inc_compute_active()
1380 amdgpu_amdkfd_set_compute_idle(node->adev, false); in kfd_inc_compute_active()
1383 void kfd_dec_compute_active(struct kfd_node *node) in kfd_dec_compute_active() argument
1385 int count = atomic_dec_return(&node->kfd->compute_profile); in kfd_dec_compute_active()
1388 amdgpu_amdkfd_set_compute_idle(node->adev, true); in kfd_dec_compute_active()
1409 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) in kfd_get_num_sdma_engines() argument
1412 if (!node->adev->gmc.xgmi.supported) in kfd_get_num_sdma_engines()
1413 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; in kfd_get_num_sdma_engines()
1415 return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); in kfd_get_num_sdma_engines()
1418 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) in kfd_get_num_xgmi_sdma_engines() argument
1421 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - in kfd_get_num_xgmi_sdma_engines()
1422 kfd_get_num_sdma_engines(node); in kfd_get_num_xgmi_sdma_engines()
1448 struct kfd_node *node; in kgd2kfd_start_sched() local
1459 node = kfd->nodes[node_id]; in kgd2kfd_start_sched()
1461 ret = node->dqm->ops.unhalt(node->dqm); in kgd2kfd_start_sched()
1470 struct kfd_node *node; in kgd2kfd_stop_sched() local
1481 node = kfd->nodes[node_id]; in kgd2kfd_stop_sched()
1482 return node->dqm->ops.halt(node->dqm); in kgd2kfd_stop_sched()