Home
last modified time | relevance | path

Searched refs:process (Results 1 – 25 of 181) sorted by relevance

12345678

/drivers/gpu/drm/amd/amdkfd/
Dkfd_process.c185 mm = get_task_mm(pdd->process->lead_thread); in kfd_sdma_activity_worker()
284 proc = pdd->process; in kfd_get_cu_occupancy()
529 if (!q || !q->process) in kfd_procfs_add_queue()
531 proc = q->process; in kfd_procfs_add_queue()
833 struct kfd_process *process; in kfd_create_process() local
862 process = ERR_PTR(-EINVAL); in kfd_create_process()
867 process = find_process(thread, false); in kfd_create_process()
868 if (process) { in kfd_create_process()
871 process = create_process(thread); in kfd_create_process()
872 if (IS_ERR(process)) in kfd_create_process()
[all …]
Dkfd_debug.c32 int kfd_dbg_ev_query_debug_event(struct kfd_process *process, in kfd_dbg_ev_query_debug_event() argument
42 if (!(process && process->debug_trap_enabled)) in kfd_dbg_ev_query_debug_event()
45 mutex_lock(&process->event_mutex); in kfd_dbg_ev_query_debug_event()
51 pqm = &process->pqm; in kfd_dbg_ev_query_debug_event()
53 uint64_t tmp = process->exception_enable_mask; in kfd_dbg_ev_query_debug_event()
71 for (i = 0; i < process->n_pdds; i++) { in kfd_dbg_ev_query_debug_event()
72 struct kfd_process_device *pdd = process->pdds[i]; in kfd_dbg_ev_query_debug_event()
73 uint64_t tmp = process->exception_enable_mask in kfd_dbg_ev_query_debug_event()
86 if (process->exception_enable_mask & process->exception_status) { in kfd_dbg_ev_query_debug_event()
87 *event_status = process->exception_status; in kfd_dbg_ev_query_debug_event()
[all …]
Dkfd_process_queue_manager.c73 pqm->process->pasid); in find_available_queue_slot()
124 pdd = kfd_get_process_device_data(dev, pqm->process); in pqm_set_gws()
141 ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info, in pqm_set_gws()
144 ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info, in pqm_set_gws()
181 pqm->process = p; in pqm_init()
194 pdd = kfd_get_process_device_data(dev, pqm->process); in pqm_clean_queue_resource()
205 pqm->process->kgd_process_info, pqn->q->gws); in pqm_clean_queue_resource()
222 pqm->process); in pqm_uninit()
262 (*q)->process = pqm->process; in init_user_queue()
337 pdd = kfd_get_process_device_data(dev, pqm->process); in pqm_create_queue()
[all …]
Dkfd_flat_memory.c363 int kfd_init_apertures(struct kfd_process *process) in kfd_init_apertures() argument
381 pdd = kfd_create_process_device_data(dev, process); in kfd_init_apertures()
392 if (process->is_32bit_user_mode) { in kfd_init_apertures()
Dkfd_packet_manager_v9.c50 packet->bitfields2.pasid = qpd->pqm->process->pasid; in pm_map_process_v9()
58 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9()
59 pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { in pm_map_process_v9()
109 packet->bitfields2.pasid = qpd->pqm->process->pasid; in pm_map_process_aldebaran()
119 if (pdd->process->debug_trap_enabled) { in pm_map_process_aldebaran()
124 !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); in pm_map_process_aldebaran()
Dkfd_chardev.c86 mutex_unlock(&pdd->process->mutex); in kfd_unlock_pdd()
130 struct kfd_process *process; in kfd_open() local
146 process = kfd_create_process(current); in kfd_open()
147 if (IS_ERR(process)) in kfd_open()
148 return PTR_ERR(process); in kfd_open()
150 if (kfd_process_init_cwsr_apu(process, filep)) { in kfd_open()
151 kfd_unref_process(process); in kfd_open()
156 filep->private_data = process; in kfd_open()
159 process->pasid, process->is_32bit_user_mode); in kfd_open()
166 struct kfd_process *process = filep->private_data; in kfd_release() local
[all …]
Dkfd_priv.h606 struct kfd_process *process; member
646 struct kfd_process *process; member
747 struct kfd_process *process; member
1059 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process,
1075 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
1099 int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
1182 int kfd_init_apertures(struct kfd_process *process);
1191 int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep);
1471 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
Dkfd_device_queue_manager.c209 queue_input.process_id = qpd->pqm->process->pasid; in add_queue_mes()
237 qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED && in add_queue_mes()
238 (qpd->pqm->process->debug_trap_enabled || in add_queue_mes()
517 dqm->vmid_pasid[allocated_vmid] = q->process->pasid; in allocate_vmid()
519 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid); in allocate_vmid()
662 if (WARN(q->process->mm != current->mm, in create_queue_nocpsch()
861 qpd->pqm->process); in destroy_queue_nocpsch_locked()
916 pdd = kfd_get_process_device_data(q->device, q->process); in update_queue()
1001 if (WARN(q->process->mm != current->mm, in update_queue()
1033 pdd->process->pasid, in suspend_single_queue()
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/
Dgt215.c31 u32 process, u32 message, u32 data0, u32 data1) in gt215_pmu_send() argument
55 pmu->recv.process = process; in gt215_pmu_send()
66 nvkm_wr32(device, 0x10a1c4, process); in gt215_pmu_send()
77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in gt215_pmu_send()
91 u32 process, message, data0, data1; in gt215_pmu_recv() local
106 process = nvkm_rd32(device, 0x10a1c4); in gt215_pmu_recv()
116 if (pmu->recv.process) { in gt215_pmu_recv()
117 if (process == pmu->recv.process && in gt215_pmu_recv()
121 pmu->recv.process = 0; in gt215_pmu_recv()
131 (char)((process & 0x000000ff) >> 0), in gt215_pmu_recv()
[all …]
/drivers/android/binder/
Dthread.rs31 process::Process,
418 process: offset_of!(Thread, process),
426 pub(crate) process: Arc<Process>, field
458 pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> { in new()
469 process, in new()
500 } else if Arc::ptr_eq(&t.to, &self.process) { in debug_print()
580 return Ok(self.process.get_work()); in get_work()
589 return self.process.get_work().ok_or(EAGAIN).map(Some); in get_work()
593 let reg = match self.process.get_work_or_register(self) { in get_work()
604 self.restore_priority(&self.process.default_priority); in get_work()
[all …]
Dallocation.rs22 process::Process,
56 pub(crate) process: Arc<Process>, field
66 process: Arc<Process>, in new()
74 process, in new()
105 self.process in copy_into()
116 unsafe { self.process.pages.read(self.offset + offset) } in read()
124 unsafe { self.process.pages.write(self.offset + offset, obj) } in write()
130 unsafe { self.process.pages.fill_zero(self.offset, self.size) } in fill_zero()
134 self.process in keep_alive()
286 self.process.buffer_raw_free(self.ptr); in drop()
[all …]
Drust_binder_main.rs22 use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
37 mod process; module
91 p: process::PROCESS_LAYOUT,
118 self.thread.process.stats.inc_br(code); in write_code()
394 let process = match Process::open(ctx, file) { in rust_binder_open() localVariable
395 Ok(process) => process, in rust_binder_open()
400 match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } { in rust_binder_open()
401 Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file), in rust_binder_open()
407 unsafe { (*file_ptr).private_data = process.into_foreign().cast_mut() }; in rust_binder_open()
416 let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) }; in rust_binder_release() localVariable
[all …]
Dtransaction.rs22 process::{Process, ProcessInner},
118 from.process.default_priority in new()
125 sender_euid: from.process.task.euid(), in new()
168 sender_euid: from.process.task.euid(), in new_reply()
194 self.from.process.task.pid(), in debug_print_inner()
233 if Arc::ptr_eq(&transaction.from.process, &self.to) { in find_target_thread()
285 let process = self.to.clone(); in submit() localVariable
286 let mut process_inner = process.inner.lock(); in submit()
353 if self.from.process.task.pid() != old.from.process.task.pid() { in can_replace()
432 tr.sender_pid = self.from.process.pid_in_current_ns(); in do_work()
Dnode.rs18 process::{NodeRefInfo, Process, ProcessInner},
283 seq_print!(m, " {}", node_ref.process.task.pid()); in full_debug_print()
682 process: &Arc<Process>, in add_freeze_listener()
703 inner.freeze_list.push_within_capacity(process.clone())?; in add_freeze_listener()
952 process: Arc<Process>, field
981 process: Arc<Process>, in new()
987 process, in new()
1064 let process = death.process.clone(); in set_dead() localVariable
1065 let _ = process.push_work(death); in set_dead()
1126 let process = self.process.clone(); in do_work() localVariable
[all …]
Drust_binder.h40 size_t process; member
102 void *p = * (void **) (t + RUST_BINDER_LAYOUT.th.process); in rust_binder_thread_proc()
/drivers/gpu/drm/amd/display/modules/hdcp/
Dhdcp_psp.c40 in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; in hdcp2_message_init()
41 in->process.msg1_desc.msg_size = 0; in hdcp2_message_init()
42 in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; in hdcp2_message_init()
43 in->process.msg2_desc.msg_size = 0; in hdcp2_message_init()
44 in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; in hdcp2_message_init()
45 in->process.msg3_desc.msg_size = 0; in hdcp2_message_init()
630 msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT; in mod_hdcp_hdcp2_validate_ake_cert()
631 msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT; in mod_hdcp_hdcp2_validate_ake_cert()
633 memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert, in mod_hdcp_hdcp2_validate_ake_cert()
654 if (msg_out->process.msg1_status == in mod_hdcp_hdcp2_validate_ake_cert()
[all …]
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_mes.c274 struct amdgpu_mes_process *process; in amdgpu_mes_create_process() local
278 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); in amdgpu_mes_create_process()
279 if (!process) { in amdgpu_mes_create_process()
287 &process->proc_ctx_bo, in amdgpu_mes_create_process()
288 &process->proc_ctx_gpu_addr, in amdgpu_mes_create_process()
289 &process->proc_ctx_cpu_ptr); in amdgpu_mes_create_process()
294 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); in amdgpu_mes_create_process()
303 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, in amdgpu_mes_create_process()
310 INIT_LIST_HEAD(&process->gang_list); in amdgpu_mes_create_process()
311 process->vm = vm; in amdgpu_mes_create_process()
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/
Dkernel.fuc30 process(PROC_KERN, 0, 0)
137 // $r14 - process
140 // read process' timer status, skip if not enabled
145 // subtract last timer's value from process' timer,
156 // process' timer is the soonest
168 // update process' timer status, and advance
317 // request the current process be sent a message after a timeout expires
329 // if current process already has a timer set, bail
377 // send message to another process
380 // $r14 - process
[all …]
Didle.fuc26 process(PROC_IDLE, #idle, #idle_recv)
60 // keep looping while there's pending messages for any process
65 // process the process' messages until there's none left
74 // next process!
Dhost.fuc26 process(PROC_HOST, #host_init, #host_recv)
62 // HOST->PWR comms - dequeue message(s) for process(es) from FIFO
76 // read message data, and pass to appropriate process
94 // $r14 - process
/drivers/soc/ux500/
Dux500-soc-id.c32 u8 process; member
119 dbx500_id.process = asicid >> 24; in ux500_setup_id()
152 if (dbx500_id.process == 0x00) in process_show()
155 return sprintf(buf, "%02xnm\n", dbx500_id.process); in process_show()
158 static DEVICE_ATTR_RO(process);
/drivers/gpu/drm/amd/display/amdgpu_dm/
Damdgpu_dm_irq.c821 .process = amdgpu_dm_irq_handler,
826 .process = amdgpu_dm_irq_handler,
831 .process = amdgpu_dm_irq_handler,
836 .process = amdgpu_dm_irq_handler,
841 .process = amdgpu_dm_irq_handler,
846 .process = amdgpu_dm_irq_handler,
851 .process = amdgpu_dm_irq_handler,
/drivers/connector/
DKconfig16 bool "Report process events to userspace"
20 Provide a connector that reports process events to userspace. Send
/drivers/gpu/drm/radeon/
Dradeon_asic.c218 .process = &r100_irq_process,
286 .process = &r100_irq_process,
382 .process = &r100_irq_process,
450 .process = &r100_irq_process,
518 .process = &r100_irq_process,
586 .process = &r100_irq_process,
654 .process = &rs600_irq_process,
722 .process = &rs600_irq_process,
790 .process = &rs600_irq_process,
858 .process = &rs600_irq_process,
[all …]
/drivers/gpu/drm/nouveau/include/nvkm/subdev/
Dpmu.h32 u32 process; member
38 int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,

12345678