| /kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/ |
| D | nouveau_sched.c | 18 * other jobs competing for the hardware. Otherwise we might end up with job 20 * want jobs to time out because of system load, but because of the job being 30 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument 36 job->file_priv = args->file_priv; in nouveau_job_init() 37 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init() 38 job->entity = entity; in nouveau_job_init() 40 job->sync = args->sync; in nouveau_job_init() 41 job->resv_usage = args->resv_usage; in nouveau_job_init() 43 job->ops = args->ops; in nouveau_job_init() 45 job->in_sync.count = args->in_sync.count; in nouveau_job_init() [all …]
|
| D | nouveau_exec.c | 67 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. 69 * A VM_BIND job can be executed either synchronously or asynchronously. If 70 * exectued asynchronously, userspace may provide a list of syncobjs this job 72 * VM_BIND job finished execution. If executed synchronously the ioctl will 73 * block until the bind job is finished. For synchronous jobs the kernel will 82 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have 89 nouveau_exec_job_submit(struct nouveau_job *job) in nouveau_exec_job_submit() argument 91 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit() 92 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit() 94 struct drm_exec *exec = &job->exec; in nouveau_exec_job_submit() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_job.c | 37 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local 47 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout() 55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout() 63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout() 78 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout() 95 unsigned int num_ibs, struct amdgpu_job **job) in amdgpu_job_alloc() argument 100 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc() 101 if (!*job) in amdgpu_job_alloc() 108 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/host1x/ |
| D | job.c | 3 * Tegra host1x Job 21 #include "job.h" 30 struct host1x_job *job = NULL; in host1x_job_alloc() local 51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 52 if (!job) in host1x_job_alloc() 55 job->enable_firewall = enable_firewall; in host1x_job_alloc() 57 kref_init(&job->ref); in host1x_job_alloc() 58 job->channel = ch; in host1x_job_alloc() 62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() [all …]
|
| D | cdma.c | 23 #include "job.h" 270 * Start timer that tracks the time spent by the job. 274 struct host1x_job *job) in cdma_start_timer_locked() argument 281 cdma->timeout.client = job->client; in cdma_start_timer_locked() 282 cdma->timeout.syncpt = job->syncpt; in cdma_start_timer_locked() 283 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked() 287 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked() 313 struct host1x_job *job, *n; in update_cdma_locked() local 319 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked() 320 struct host1x_syncpt *sp = job->syncpt; in update_cdma_locked() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_job.c | 34 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local 41 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 47 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout() 49 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout() 55 amdgpu_device_gpu_recover(ring->adev, job); in amdgpu_job_timedout() 64 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc() argument 73 *job = kzalloc(size, GFP_KERNEL); in amdgpu_job_alloc() 74 if (!*job) in amdgpu_job_alloc() 81 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc() 82 (*job)->vm = vm; in amdgpu_job_alloc() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/host1x/ |
| D | job.c | 3 * Tegra host1x Job 21 #include "job.h" 29 struct host1x_job *job = NULL; in host1x_job_alloc() local 47 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 48 if (!job) in host1x_job_alloc() 51 kref_init(&job->ref); in host1x_job_alloc() 52 job->channel = ch; in host1x_job_alloc() 56 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 58 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() 60 job->gathers = num_cmdbufs ? mem : NULL; in host1x_job_alloc() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/v3d/ |
| D | v3d_sched.c | 10 * scheduler will round-robin between clients to submit the next job. 13 * jobs when bulk background jobs are queued up, we submit a new job 60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_sched_job_free() local 62 v3d_job_cleanup(job); in v3d_sched_job_free() 66 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument 68 if (job->perfmon != v3d->active_perfmon) in v3d_switch_perfmon() 71 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon() 72 v3d_perfmon_start(v3d, job->perfmon); in v3d_switch_perfmon() 77 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local 78 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run() [all …]
|
| D | v3d_gem.c | 171 * need to wait for completion before dispatching the job -- in v3d_flush_l2t() 175 * synchronously clean after a job. in v3d_flush_l2t() 188 * signaling job completion. So, we synchronously wait before 254 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument 259 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 263 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations() 264 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); in v3d_lock_bo_reservations() 268 ret = drm_sched_job_add_implicit_dependencies(&job->base, in v3d_lock_bo_reservations() 269 job->bo[i], true); in v3d_lock_bo_reservations() 277 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() [all …]
|
| /kernel/linux/linux-5.10/block/ |
| D | bsg-lib.c | 39 struct bsg_job *job = blk_mq_rq_to_pdu(rq); in bsg_transport_fill_hdr() local 42 job->request_len = hdr->request_len; in bsg_transport_fill_hdr() 43 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_fill_hdr() 44 if (IS_ERR(job->request)) in bsg_transport_fill_hdr() 45 return PTR_ERR(job->request); in bsg_transport_fill_hdr() 48 job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); in bsg_transport_fill_hdr() 49 if (IS_ERR(job->bidi_rq)) { in bsg_transport_fill_hdr() 50 ret = PTR_ERR(job->bidi_rq); in bsg_transport_fill_hdr() 54 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_fill_hdr() 60 job->bidi_bio = job->bidi_rq->bio; in bsg_transport_fill_hdr() [all …]
|
| /kernel/linux/linux-5.10/drivers/md/ |
| D | dm-kcopyd.c | 40 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); 347 * Error state of the job. 367 * Set this to ensure you are notified when the job has 374 * These fields are only used if the job has been split 408 * Functions to push and pop a job onto the head of a given job 414 struct kcopyd_job *job; in pop_io_job() local 420 list_for_each_entry(job, jobs, list) { in pop_io_job() 421 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { in pop_io_job() 422 list_del(&job->list); in pop_io_job() 423 return job; in pop_io_job() [all …]
|
| /kernel/linux/linux-6.6/drivers/md/ |
| D | dm-kcopyd.c | 41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); 353 * Error state of the job. 373 * Set this to ensure you are notified when the job has 380 * These fields are only used if the job has been split 414 * Functions to push and pop a job onto the head of a given job 420 struct kcopyd_job *job; in pop_io_job() local 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 427 if (job->op == REQ_OP_READ || in pop_io_job() 428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job() 429 list_del(&job->list); in pop_io_job() [all …]
|
| /kernel/linux/linux-6.6/drivers/accel/ivpu/ |
| D | ivpu_job.c | 133 * Mark the doorbell as unregistered and reset job queue pointers. 135 * and FW looses job queue state. The next time job queue is used it 180 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) in ivpu_cmdq_push_job() argument 182 struct ivpu_device *vdev = job->vdev; in ivpu_cmdq_push_job() 188 /* Check if there is space left in job queue */ in ivpu_cmdq_push_job() 190 ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n", in ivpu_cmdq_push_job() 191 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail); in ivpu_cmdq_push_job() 195 entry = &cmdq->jobq->job[tail]; in ivpu_cmdq_push_job() 196 entry->batch_buf_addr = job->cmd_buf_vpu_addr; in ivpu_cmdq_push_job() 197 entry->job_id = job->job_id; in ivpu_cmdq_push_job() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/host1x/hw/ |
| D | channel_hw.c | 17 #include "../job.h" 50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, in submit_wait() argument 53 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait() 63 if (job->memory_context) in submit_wait() 64 stream_id = job->memory_context->stream_id; in submit_wait() 66 stream_id = job->engine_fallback_streamid; in submit_wait() 79 host1x_cdma_push_wide(&job->channel->cdma, in submit_wait() 80 host1x_opcode_setclass(job->class, 0, 0), in submit_wait() 82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait() 113 static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) in submit_gathers() argument [all …]
|
| /kernel/linux/linux-6.6/block/ |
| D | bsg-lib.c | 31 struct bsg_job *job; in bsg_transport_sg_io_fn() local 49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn() 50 reply = job->reply; in bsg_transport_sg_io_fn() 51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn() 52 job->reply = reply; in bsg_transport_sg_io_fn() 53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn() 54 job->dd_data = job + 1; in bsg_transport_sg_io_fn() 56 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn() 57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn() 58 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/v3d/ |
| D | v3d_sched.c | 10 * scheduler will round-robin between clients to submit the next job. 13 * jobs when bulk background jobs are queued up, we submit a new job 60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local 63 v3d_job_put(job); in v3d_job_free() 67 * Returns the fences that the job depends on, one by one. 76 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local 82 if (!xa_empty(&job->deps)) in v3d_job_dependency() 83 return xa_erase(&job->deps, job->last_dep++); in v3d_job_dependency() 90 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local 91 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run() [all …]
|
| D | v3d_gem.c | 167 * need to wait for completion before dispatching the job -- in v3d_flush_l2t() 171 * synchronously clean after a job. in v3d_flush_l2t() 184 * signaling job completion. So, we synchronously wait before 250 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument 255 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 259 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations() 260 ret = drm_gem_fence_array_add_implicit(&job->deps, in v3d_lock_bo_reservations() 261 job->bo[i], true); in v3d_lock_bo_reservations() 263 drm_gem_unlock_reservations(job->bo, job->bo_count, in v3d_lock_bo_reservations() 273 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/ |
| D | panfrost_job.c | 112 static int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument 118 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot() 123 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot() 124 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot() 125 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot() 127 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot() 151 static void panfrost_job_hw_submit(struct panfrost_job *job, int js) in panfrost_job_hw_submit() argument 153 struct panfrost_device *pfdev = job->pfdev; in panfrost_job_hw_submit() 155 u64 jc_head = job->jc; in panfrost_job_hw_submit() 168 cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); in panfrost_job_hw_submit() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/panfrost/ |
| D | panfrost_job.c | 106 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument 112 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot() 117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot() 118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot() 119 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot() 121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot() 146 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument 148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag() 150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag() 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/scheduler/ |
| D | sched_main.c | 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 46 * Note that once a job was taken from the entities queue and pushed to the 188 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run 232 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run 260 * drm_sched_job_done - complete a job 261 * @s_job: pointer to the job which is done 263 * Finish the job's fence and wake up the worker thread. 282 * drm_sched_job_done_cb - the callback for a done job 322 * drm_sched_suspend_timeout - Suspend scheduler job timeout [all …]
|
| /kernel/linux/linux-5.10/drivers/misc/habanalabs/common/ |
| D | command_submission.c | 123 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) in is_cb_patched() argument 129 return (job->queue_type == QUEUE_TYPE_EXT || in is_cb_patched() 130 (job->queue_type == QUEUE_TYPE_HW && in is_cb_patched() 131 job->is_kernel_allocated_cb && in is_cb_patched() 139 * @job : pointer to the job that holds the command submission info 146 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) in cs_parser() argument 152 parser.ctx_id = job->cs->ctx->asid; in cs_parser() 153 parser.cs_sequence = job->cs->sequence; in cs_parser() 154 parser.job_id = job->id; in cs_parser() 156 parser.hw_queue_id = job->hw_queue_id; in cs_parser() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/host1x/hw/ |
| D | channel_hw.c | 17 #include "../job.h" 50 static void submit_gathers(struct host1x_job *job) in submit_gathers() argument 52 struct host1x_cdma *cdma = &job->channel->cdma; in submit_gathers() 54 struct device *dev = job->channel->dev; in submit_gathers() 58 for (i = 0; i < job->num_gathers; i++) { in submit_gathers() 59 struct host1x_job_gather *g = &job->gathers[i]; in submit_gathers() 87 static inline void synchronize_syncpt_base(struct host1x_job *job) in synchronize_syncpt_base() argument 89 struct host1x *host = dev_get_drvdata(job->channel->dev->parent); in synchronize_syncpt_base() 90 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; in synchronize_syncpt_base() 97 host1x_cdma_push(&job->channel->cdma, in synchronize_syncpt_base() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/tegra/ |
| D | submit.c | 28 "%s: job submission failed: " fmt "\n", \ 327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument 337 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt() 344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt() 345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt() 350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument 370 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather() 381 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather() 385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather() 399 struct host1x_job *job; in submit_create_job() local [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/ |
| D | drm_writeback.c | 262 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument 264 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job() 270 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job() 275 job->prepared = true; in drm_writeback_prepare_job() 281 * drm_writeback_queue_job - Queue a writeback job for later signalling 282 * @wb_connector: The writeback connector to queue a job on 283 * @conn_state: The connector state containing the job to queue 285 * This function adds the job contained in @conn_state to the job_queue for a 286 * writeback connector. It takes ownership of the writeback job and sets the 287 * @conn_state->writeback_job to NULL, and so no access to the job may be [all …]
|
| /kernel/linux/linux-6.6/drivers/scsi/lpfc/ |
| D | lpfc_bsg.c | 71 /* driver data associated with the job */ 96 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 295 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local 306 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp() 308 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp() 309 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 310 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp() 311 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp() 312 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp() 332 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() [all …]
|