Home
last modified time | relevance | path

Searched refs:job (Results 1 – 25 of 238) sorted by relevance

12345678910

/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_job.c34 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
41 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
47 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
49 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
55 amdgpu_device_gpu_recover(ring->adev, job); in amdgpu_job_timedout()
64 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument
73 *job = kzalloc(size, GFP_KERNEL); in amdgpu_job_alloc()
74 if (!*job) in amdgpu_job_alloc()
81 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
82 (*job)->vm = vm; in amdgpu_job_alloc()
[all …]
Damdgpu_ib.c125 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument
147 if (job) { in amdgpu_ib_schedule()
148 vm = job->vm; in amdgpu_ib_schedule()
149 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule()
150 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule()
161 if (vm && !job->vmid) { in amdgpu_ib_schedule()
182 if (ring->funcs->emit_pipeline_sync && job && in amdgpu_ib_schedule()
183 ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || in amdgpu_ib_schedule()
185 amdgpu_vm_need_pipeline_sync(ring, job))) { in amdgpu_ib_schedule()
189 trace_amdgpu_ib_pipe_sync(job, tmp); in amdgpu_ib_schedule()
[all …]
Damdgpu_job.h38 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) argument
68 struct amdgpu_job **job, struct amdgpu_vm *vm);
70 enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
71 void amdgpu_job_free_resources(struct amdgpu_job *job);
72 void amdgpu_job_free(struct amdgpu_job *job);
73 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
75 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
Damdgpu_trace.h35 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ argument
36 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
155 __entry->dw = p->job->ibs[i].length_dw;
165 TP_PROTO(struct amdgpu_job *job),
166 TP_ARGS(job),
169 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
173 __string(ring, to_amdgpu_ring(job->base.sched)->name)
178 __entry->sched_job_id = job->base.id;
179 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
180 __entry->context = job->base.s_fence->finished.context;
[all …]
Damdgpu_vm_sdma.c69 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); in amdgpu_vm_sdma_prepare()
78 return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); in amdgpu_vm_sdma_prepare()
93 struct amdgpu_ib *ib = p->job->ibs; in amdgpu_vm_sdma_commit()
105 r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); in amdgpu_vm_sdma_commit()
124 amdgpu_job_free(p->job); in amdgpu_vm_sdma_commit()
142 struct amdgpu_ib *ib = p->job->ibs; in amdgpu_vm_sdma_copy_ptes()
172 struct amdgpu_ib *ib = p->job->ibs; in amdgpu_vm_sdma_set_ptes()
211 r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving); in amdgpu_vm_sdma_update()
217 ndw -= p->job->ibs->length_dw; in amdgpu_vm_sdma_update()
232 &p->job); in amdgpu_vm_sdma_update()
[all …]
/kernel/linux/linux-5.10/drivers/md/
Ddm-kcopyd.c414 struct kcopyd_job *job; in pop_io_job() local
420 list_for_each_entry(job, jobs, list) { in pop_io_job()
421 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { in pop_io_job()
422 list_del(&job->list); in pop_io_job()
423 return job; in pop_io_job()
426 if (job->write_offset == job->master_job->write_offset) { in pop_io_job()
427 job->master_job->write_offset += job->source.count; in pop_io_job()
428 list_del(&job->list); in pop_io_job()
429 return job; in pop_io_job()
439 struct kcopyd_job *job = NULL; in pop() local
[all …]
/kernel/linux/linux-5.10/drivers/gpu/host1x/
Djob.c29 struct host1x_job *job = NULL; in host1x_job_alloc() local
47 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
48 if (!job) in host1x_job_alloc()
51 kref_init(&job->ref); in host1x_job_alloc()
52 job->channel = ch; in host1x_job_alloc()
56 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
58 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
60 job->gathers = num_cmdbufs ? mem : NULL; in host1x_job_alloc()
62 job->addr_phys = num_unpins ? mem : NULL; in host1x_job_alloc()
64 job->reloc_addr_phys = job->addr_phys; in host1x_job_alloc()
[all …]
Dcdma.c274 struct host1x_job *job) in cdma_start_timer_locked() argument
283 cdma->timeout.client = job->client; in cdma_start_timer_locked()
284 cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id); in cdma_start_timer_locked()
285 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked()
289 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked()
316 struct host1x_job *job, *n; in update_cdma_locked() local
326 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked()
328 host1x_syncpt_get(host1x, job->syncpt_id); in update_cdma_locked()
331 if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) { in update_cdma_locked()
333 if (job->timeout) in update_cdma_locked()
[all …]
/kernel/linux/linux-5.10/block/
Dbsg-lib.c39 struct bsg_job *job = blk_mq_rq_to_pdu(rq); in bsg_transport_fill_hdr() local
42 job->request_len = hdr->request_len; in bsg_transport_fill_hdr()
43 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_fill_hdr()
44 if (IS_ERR(job->request)) in bsg_transport_fill_hdr()
45 return PTR_ERR(job->request); in bsg_transport_fill_hdr()
48 job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); in bsg_transport_fill_hdr()
49 if (IS_ERR(job->bidi_rq)) { in bsg_transport_fill_hdr()
50 ret = PTR_ERR(job->bidi_rq); in bsg_transport_fill_hdr()
54 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_fill_hdr()
60 job->bidi_bio = job->bidi_rq->bio; in bsg_transport_fill_hdr()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/v3d/
Dv3d_sched.c60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local
63 v3d_job_put(job); in v3d_job_free()
76 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local
82 if (!xa_empty(&job->deps)) in v3d_job_dependency()
83 return xa_erase(&job->deps, job->last_dep++); in v3d_job_dependency()
90 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local
91 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run()
96 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_bin_job_run()
103 v3d->bin_job = job; in v3d_bin_job_run()
116 if (job->base.irq_fence) in v3d_bin_job_run()
[all …]
Dv3d_gem.c250 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument
255 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
259 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations()
260 ret = drm_gem_fence_array_add_implicit(&job->deps, in v3d_lock_bo_reservations()
261 job->bo[i], true); in v3d_lock_bo_reservations()
263 drm_gem_unlock_reservations(job->bo, job->bo_count, in v3d_lock_bo_reservations()
289 struct v3d_job *job, in v3d_lookup_bos() argument
297 job->bo_count = bo_count; in v3d_lookup_bos()
299 if (!job->bo_count) { in v3d_lookup_bos()
307 job->bo = kvmalloc_array(job->bo_count, in v3d_lookup_bos()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/
Dpanfrost_job.c112 static int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument
118 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
123 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
124 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
125 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
127 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
151 static void panfrost_job_hw_submit(struct panfrost_job *job, int js) in panfrost_job_hw_submit() argument
153 struct panfrost_device *pfdev = job->pfdev; in panfrost_job_hw_submit()
155 u64 jc_head = job->jc; in panfrost_job_hw_submit()
168 cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); in panfrost_job_hw_submit()
[all …]
Dpanfrost_drv.c137 struct panfrost_job *job) in panfrost_lookup_bos() argument
144 job->bo_count = args->bo_handle_count; in panfrost_lookup_bos()
146 if (!job->bo_count) in panfrost_lookup_bos()
149 job->implicit_fences = kvmalloc_array(job->bo_count, in panfrost_lookup_bos()
152 if (!job->implicit_fences) in panfrost_lookup_bos()
157 job->bo_count, &job->bos); in panfrost_lookup_bos()
161 job->mappings = kvmalloc_array(job->bo_count, in panfrost_lookup_bos()
164 if (!job->mappings) in panfrost_lookup_bos()
167 for (i = 0; i < job->bo_count; i++) { in panfrost_lookup_bos()
170 bo = to_panfrost_bo(job->bos[i]); in panfrost_lookup_bos()
[all …]
/kernel/linux/linux-5.10/drivers/misc/habanalabs/common/
Dcommand_submission.c123 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) in is_cb_patched() argument
129 return (job->queue_type == QUEUE_TYPE_EXT || in is_cb_patched()
130 (job->queue_type == QUEUE_TYPE_HW && in is_cb_patched()
131 job->is_kernel_allocated_cb && in is_cb_patched()
146 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) in cs_parser() argument
152 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
153 parser.cs_sequence = job->cs->sequence; in cs_parser()
154 parser.job_id = job->id; in cs_parser()
156 parser.hw_queue_id = job->hw_queue_id; in cs_parser()
157 parser.job_userptr_list = &job->userptr_list; in cs_parser()
[all …]
Dhw_queue.c268 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument
270 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job()
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job()
286 cb = job->patched_cb; in ext_queue_schedule_job()
287 len = job->job_cb_size; in ext_queue_schedule_job()
310 job->contains_dma_pkt); in ext_queue_schedule_job()
312 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; in ext_queue_schedule_job()
327 static void int_queue_schedule_job(struct hl_cs_job *job) in int_queue_schedule_job() argument
329 struct hl_device *hdev = job->cs->ctx->hdev; in int_queue_schedule_job()
330 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in int_queue_schedule_job()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/host1x/hw/
Dchannel_hw.c50 static void submit_gathers(struct host1x_job *job) in submit_gathers() argument
52 struct host1x_cdma *cdma = &job->channel->cdma; in submit_gathers()
54 struct device *dev = job->channel->dev; in submit_gathers()
58 for (i = 0; i < job->num_gathers; i++) { in submit_gathers()
59 struct host1x_job_gather *g = &job->gathers[i]; in submit_gathers()
87 static inline void synchronize_syncpt_base(struct host1x_job *job) in synchronize_syncpt_base() argument
89 struct host1x *host = dev_get_drvdata(job->channel->dev->parent); in synchronize_syncpt_base()
90 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; in synchronize_syncpt_base()
97 host1x_cdma_push(&job->channel->cdma, in synchronize_syncpt_base()
118 static int channel_submit(struct host1x_job *job) in channel_submit() argument
[all …]
Ddebug_hw.c196 struct host1x_job *job; in show_channel_gathers() local
203 list_for_each_entry(job, &cdma->sync_queue, list) { in show_channel_gathers()
207 job, job->syncpt_id, job->syncpt_end, in show_channel_gathers()
208 job->first_get, job->timeout, in show_channel_gathers()
209 job->num_slots, job->num_unpins); in show_channel_gathers()
211 for (i = 0; i < job->num_gathers; i++) { in show_channel_gathers()
212 struct host1x_job_gather *g = &job->gathers[i]; in show_channel_gathers()
215 if (job->gather_copy_mapped) in show_channel_gathers()
216 mapped = (u32 *)job->gather_copy_mapped; in show_channel_gathers()
231 if (!job->gather_copy_mapped) in show_channel_gathers()
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_writeback.c262 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument
264 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job()
270 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job()
275 job->prepared = true; in drm_writeback_prepare_job()
302 struct drm_writeback_job *job; in drm_writeback_queue_job() local
305 job = conn_state->writeback_job; in drm_writeback_queue_job()
309 list_add_tail(&job->list_entry, &wb_connector->job_queue); in drm_writeback_queue_job()
314 void drm_writeback_cleanup_job(struct drm_writeback_job *job) in drm_writeback_cleanup_job() argument
316 struct drm_writeback_connector *connector = job->connector; in drm_writeback_cleanup_job()
320 if (job->prepared && funcs->cleanup_writeback_job) in drm_writeback_cleanup_job()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/lpfc/
Dlpfc_bsg.c304 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local
318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp()
319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp()
322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp()
340 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
360 lpfc_bsg_copy_data(rmp, &job->reply_payload, in lpfc_bsg_send_mgmt_cmd_cmp()
375 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
377 bsg_job_done(job, bsg_reply->result, in lpfc_bsg_send_mgmt_cmd_cmp()
388 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) in lpfc_bsg_send_mgmt_cmd() argument
[all …]
/kernel/linux/linux-5.10/drivers/scsi/ufs/
Dufs_bsg.c42 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument
46 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer()
60 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer()
70 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer()
71 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer()
80 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument
82 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_request()
83 struct ufs_bsg_reply *bsg_reply = job->reply; in ufs_bsg_request()
84 struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); in ufs_bsg_request()
85 unsigned int req_len = job->request_len; in ufs_bsg_request()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/libsas/
Dsas_host_smp.c225 void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost) in sas_smp_host_handler() argument
233 if (job->request_payload.payload_len < 8 || in sas_smp_host_handler()
234 job->reply_payload.payload_len < 8) in sas_smp_host_handler()
238 req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL); in sas_smp_host_handler()
241 sg_copy_to_buffer(job->request_payload.sg_list, in sas_smp_host_handler()
242 job->request_payload.sg_cnt, req_data, in sas_smp_host_handler()
243 job->request_payload.payload_len); in sas_smp_host_handler()
247 resp_data = kzalloc(max(job->reply_payload.payload_len, 128U), in sas_smp_host_handler()
282 if (job->request_payload.payload_len < 16) in sas_smp_host_handler()
294 if (job->request_payload.payload_len < 16) in sas_smp_host_handler()
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/powerpc/fsl/
Draideng.txt30 There must be a sub-node for each job queue present in RAID Engine
33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value
34 This identifies the job queue interface
35 - reg: offset and length of the register set for job queue
42 compatible = "fsl,raideng-v1.0-job-queue";
48 There must be a sub-node for each job ring present in RAID Engine
49 This node must be a sub-node of job queue node
51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value
52 This identifies job ring. Should contain either
55 - reg: offset and length of the register set for job ring
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/scheduler/
Dsched_main.c283 struct drm_sched_job *job; in drm_sched_job_timedout() local
289 job = list_first_entry_or_null(&sched->ring_mirror_list, in drm_sched_job_timedout()
292 if (job) { in drm_sched_job_timedout()
298 list_del_init(&job->node); in drm_sched_job_timedout()
301 job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
308 job->sched->ops->free_job(job); in drm_sched_job_timedout()
548 int drm_sched_job_init(struct drm_sched_job *job, in drm_sched_job_init() argument
560 job->sched = sched; in drm_sched_job_init()
561 job->entity = entity; in drm_sched_job_init()
562 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_init()
[all …]
/kernel/linux/linux-5.10/arch/powerpc/boot/dts/fsl/
Dqoriq-sec6.0-0.dtsi42 compatible = "fsl,sec-v6.0-job-ring",
43 "fsl,sec-v5.2-job-ring",
44 "fsl,sec-v5.0-job-ring",
45 "fsl,sec-v4.4-job-ring",
46 "fsl,sec-v4.0-job-ring";
51 compatible = "fsl,sec-v6.0-job-ring",
52 "fsl,sec-v5.2-job-ring",
53 "fsl,sec-v5.0-job-ring",
54 "fsl,sec-v4.4-job-ring",
55 "fsl,sec-v4.0-job-ring";
/kernel/linux/linux-5.10/drivers/scsi/smartpqi/
Dsmartpqi_sas_transport.c448 struct bsg_job *job) in pqi_build_csmi_smp_passthru_buffer() argument
460 req_size = job->request_payload.payload_len; in pqi_build_csmi_smp_passthru_buffer()
461 resp_size = job->reply_payload.payload_len; in pqi_build_csmi_smp_passthru_buffer()
484 sg_copy_to_buffer(job->request_payload.sg_list, in pqi_build_csmi_smp_passthru_buffer()
485 job->reply_payload.sg_cnt, &parameters->request, in pqi_build_csmi_smp_passthru_buffer()
492 struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job, in pqi_build_sas_smp_handler_reply() argument
495 sg_copy_from_buffer(job->reply_payload.sg_list, in pqi_build_sas_smp_handler_reply()
496 job->reply_payload.sg_cnt, &smp_buf->parameters.response, in pqi_build_sas_smp_handler_reply()
499 job->reply_len = le16_to_cpu(error_info->sense_data_length); in pqi_build_sas_smp_handler_reply()
500 memcpy(job->reply, error_info->data, in pqi_build_sas_smp_handler_reply()
[all …]

12345678910