/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_flat_memory.c | 315 static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) in kfd_init_apertures_vi() argument 321 pdd->lds_base = MAKE_LDS_APP_BASE_VI(); in kfd_init_apertures_vi() 322 pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); in kfd_init_apertures_vi() 324 if (!pdd->dev->device_info->needs_iommu_device) { in kfd_init_apertures_vi() 329 pdd->gpuvm_base = SVM_USER_BASE; in kfd_init_apertures_vi() 330 pdd->gpuvm_limit = in kfd_init_apertures_vi() 331 pdd->dev->shared_resources.gpuvm_size - 1; in kfd_init_apertures_vi() 336 pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1); in kfd_init_apertures_vi() 337 pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base, in kfd_init_apertures_vi() 338 pdd->dev->shared_resources.gpuvm_size); in kfd_init_apertures_vi() [all …]
|
D | kfd_process.c | 162 struct kfd_process_device *pdd) in kfd_process_free_gpuvm() argument 164 struct kfd_dev *dev = pdd->dev; in kfd_process_free_gpuvm() 166 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); in kfd_process_free_gpuvm() 176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, in kfd_process_alloc_gpuvm() argument 180 struct kfd_dev *kdev = pdd->dev; in kfd_process_alloc_gpuvm() 186 pdd->vm, &mem, NULL, flags); in kfd_process_alloc_gpuvm() 190 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); in kfd_process_alloc_gpuvm() 205 handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_process_alloc_gpuvm() 224 kfd_process_device_remove_obj_handle(pdd, handle); in kfd_process_alloc_gpuvm() 227 kfd_process_free_gpuvm(mem, pdd); in kfd_process_alloc_gpuvm() [all …]
|
D | kfd_iommu.c | 109 int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) in kfd_iommu_bind_process_to_device() argument 111 struct kfd_dev *dev = pdd->dev; in kfd_iommu_bind_process_to_device() 112 struct kfd_process *p = pdd->process; in kfd_iommu_bind_process_to_device() 115 if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND) in kfd_iommu_bind_process_to_device() 118 if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { in kfd_iommu_bind_process_to_device() 125 pdd->bound = PDD_BOUND; in kfd_iommu_bind_process_to_device() 137 struct kfd_process_device *pdd; in kfd_iommu_unbind_process() local 139 list_for_each_entry(pdd, &p->per_device_data, per_device_list) in kfd_iommu_unbind_process() 140 if (pdd->bound == PDD_BOUND) in kfd_iommu_unbind_process() 141 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid); in kfd_iommu_unbind_process() [all …]
|
D | kfd_chardev.c | 259 struct kfd_process_device *pdd; in kfd_ioctl_create_queue() local 279 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_create_queue() 280 if (IS_ERR(pdd)) { in kfd_ioctl_create_queue() 472 struct kfd_process_device *pdd; in kfd_ioctl_set_memory_policy() local 491 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_set_memory_policy() 492 if (IS_ERR(pdd)) { in kfd_ioctl_set_memory_policy() 505 &pdd->qpd, in kfd_ioctl_set_memory_policy() 524 struct kfd_process_device *pdd; in kfd_ioctl_set_trap_handler() local 532 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_set_trap_handler() 533 if (IS_ERR(pdd)) { in kfd_ioctl_set_trap_handler() [all …]
|
D | kfd_process_queue_manager.c | 67 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) in kfd_process_dequeue_from_device() argument 69 struct kfd_dev *dev = pdd->dev; in kfd_process_dequeue_from_device() 71 if (pdd->already_dequeued) in kfd_process_dequeue_from_device() 74 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device() 75 pdd->already_dequeued = true; in kfd_process_dequeue_from_device() 83 struct kfd_process_device *pdd; in pqm_set_gws() local 98 pdd = kfd_get_process_device_data(dev, pqm->process); in pqm_set_gws() 99 if (!pdd) { in pqm_set_gws() 105 if (gws && pdd->qpd.num_gws) in pqm_set_gws() 108 if (!gws && pdd->qpd.num_gws == 0) in pqm_set_gws() [all …]
|
D | kfd_device_queue_manager_v10.c | 42 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument 44 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit() 45 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit() 54 struct kfd_process_device *pdd; in update_qpd_v10() local 56 pdd = qpd_to_pdd(qpd); in update_qpd_v10() 76 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v10()
|
D | kfd_device_queue_manager_v9.c | 43 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument 45 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit() 46 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit() 55 struct kfd_process_device *pdd; in update_qpd_v9() local 57 pdd = qpd_to_pdd(qpd); in update_qpd_v9() 73 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v9()
|
D | kfd_device_queue_manager_cik.c | 119 struct kfd_process_device *pdd; in update_qpd_cik() local 122 pdd = qpd_to_pdd(qpd); in update_qpd_cik() 135 temp = get_sh_mem_bases_32(pdd); in update_qpd_cik() 139 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_cik() 153 struct kfd_process_device *pdd; in update_qpd_cik_hawaii() local 156 pdd = qpd_to_pdd(qpd); in update_qpd_cik_hawaii() 171 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_cik_hawaii()
|
D | kfd_device_queue_manager_vi.c | 156 struct kfd_process_device *pdd; in update_qpd_vi() local 159 pdd = qpd_to_pdd(qpd); in update_qpd_vi() 175 temp = get_sh_mem_bases_32(pdd); in update_qpd_vi() 180 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_vi() 197 struct kfd_process_device *pdd; in update_qpd_vi_tonga() local 200 pdd = qpd_to_pdd(qpd); in update_qpd_vi_tonga() 219 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_vi_tonga()
|
D | kfd_dbgdev.c | 275 struct kfd_process_device *pdd; in dbgdev_address_watch_nodiq() local 279 pdd = kfd_get_process_device_data(dbgdev->dev, in dbgdev_address_watch_nodiq() 281 if (!pdd) { in dbgdev_address_watch_nodiq() 303 &cntl, i, pdd->qpd.vmid); in dbgdev_address_watch_nodiq() 307 pr_debug("\t\t%20s %08x\n", "vmid is :", pdd->qpd.vmid); in dbgdev_address_watch_nodiq() 324 pdd->dev->kfd2kgd->address_watch_execute( in dbgdev_address_watch_nodiq() 703 struct kfd_process_device *pdd; in dbgdev_wave_control_nodiq() local 708 pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process); in dbgdev_wave_control_nodiq() 710 if (!pdd) { in dbgdev_wave_control_nodiq() 723 reg_sq_cmd.bits.vm_id = pdd->qpd.vmid; in dbgdev_wave_control_nodiq() [all …]
|
D | kfd_device_queue_manager.c | 500 struct kfd_process_device *pdd; in update_queue() local 504 pdd = kfd_get_process_device_data(q->device, q->process); in update_queue() 505 if (!pdd) { in update_queue() 574 struct kfd_process_device *pdd; in evict_process_queues_nocpsch() local 581 pdd = qpd_to_pdd(qpd); in evict_process_queues_nocpsch() 583 pdd->process->pasid); in evict_process_queues_nocpsch() 616 struct kfd_process_device *pdd; in evict_process_queues_cpsch() local 623 pdd = qpd_to_pdd(qpd); in evict_process_queues_cpsch() 625 pdd->process->pasid); in evict_process_queues_cpsch() 654 struct kfd_process_device *pdd; in restore_process_queues_nocpsch() local [all …]
|
D | kfd_device_queue_manager.h | 223 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) in get_sh_mem_bases_32() argument 225 return (pdd->lds_base >> 16) & 0xFF; in get_sh_mem_bases_32() 229 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) in get_sh_mem_bases_nybble_64() argument 231 return (pdd->lds_base >> 60) & 0x0E; in get_sh_mem_bases_nybble_64()
|
D | kfd_iommu.h | 33 int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd); 53 struct kfd_process_device *pdd) in kfd_iommu_bind_process_to_device() argument
|
D | kfd_priv.h | 772 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 785 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 789 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 797 struct kfd_process_device *pdd); 898 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 1033 void kfd_flush_tlb(struct kfd_process_device *pdd);
|
/drivers/input/touchscreen/ |
D | wm9705.c | 77 static int pdd = 8; variable 78 module_param(pdd, int, 0); 79 MODULE_PARM_DESC(pdd, "Set pen detect comparator threshold"); 168 dig2 |= (pdd & 0x000f); in wm9705_phy_init() 169 dev_dbg(wm->dev, "setting pdd to Vmid/%d", 1 - (pdd & 0x000f)); in wm9705_phy_init()
|
/drivers/soc/xilinx/ |
D | zynqmp_pm_domains.c | 107 struct pm_domain_data *pdd, *tmp; in zynqmp_gpd_power_off() local 124 list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) { in zynqmp_gpd_power_off() 126 may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL); in zynqmp_gpd_power_off() 128 dev_dbg(pdd->dev, "device is in wakeup path in %s\n", in zynqmp_gpd_power_off()
|
/drivers/base/power/ |
D | domain_governor.c | 125 struct pm_domain_data *pdd; in __default_power_down_ok() local 160 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in __default_power_down_ok() 169 td = &to_gpd_data(pdd)->td; in __default_power_down_ok()
|
D | domain.c | 244 struct pm_domain_data *pdd; in _genpd_reeval_performance_state() local 256 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in _genpd_reeval_performance_state() 257 pd_data = to_gpd_data(pdd); in _genpd_reeval_performance_state() 497 struct pm_domain_data *pdd; in genpd_power_off() local 519 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in genpd_power_off() 522 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF); in genpd_power_off() 530 if (!pm_runtime_suspended(pdd->dev) || in genpd_power_off() 531 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) in genpd_power_off() 648 struct pm_domain_data *pdd; in genpd_dev_pm_qos_notifier() local 652 pdd = dev->power.subsys_data ? in genpd_dev_pm_qos_notifier() [all …]
|