Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 25 of 105) sorted by relevance

12345

/drivers/gpu/drm/scheduler/
Dsched_main.c71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
77 rq->sched = sched; in drm_sched_rq_init()
171 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
179 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
193 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) in drm_sched_start_timeout() argument
195 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_start_timeout()
196 !list_empty(&sched->ring_mirror_list)) in drm_sched_start_timeout()
197 schedule_delayed_work(&sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
207 void drm_sched_fault(struct drm_gpu_scheduler *sched) in drm_sched_fault() argument
209 mod_delayed_work(system_wq, &sched->work_tdr, 0); in drm_sched_fault()
[all …]
Dsched_entity.c140 struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched; in drm_sched_entity_get_free_sched() local
142 if (!entity->rq_list[i]->sched->ready) { in drm_sched_entity_get_free_sched()
143 DRM_WARN("sched%s is not ready, skipping", sched->name); in drm_sched_entity_get_free_sched()
147 num_jobs = atomic_read(&sched->num_jobs); in drm_sched_entity_get_free_sched()
171 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
178 sched = entity->rq->sched; in drm_sched_entity_flush()
186 sched->job_scheduled, in drm_sched_entity_flush()
190 wait_event_killable(sched->job_scheduled, in drm_sched_entity_flush()
224 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_cb()
277 struct drm_gpu_scheduler *sched = NULL; in drm_sched_entity_fini() local
[all …]
Dgpu_scheduler_trace.h51 __entry->name = sched_job->sched->name;
54 &sched_job->sched->hw_rq_count);
87 __entry->name = sched_job->sched->name;
DMakefile23 gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
25 obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
Dsched_fence.c83 return (const char *)fence->sched->name; in drm_sched_fence_get_timeline_name()
166 fence->sched = entity->rq->sched; in drm_sched_fence_create()
/drivers/slimbus/
Dsched.c29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local
38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause()
40 if (sched->clk_state == SLIM_CLK_ACTIVE) { in slim_ctrl_clk_pause()
41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
49 ret = wait_for_completion_timeout(&sched->pause_comp, in slim_ctrl_clk_pause()
52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
63 if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) in slim_ctrl_clk_pause()
66 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause()
67 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
73 if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) { in slim_ctrl_clk_pause()
[all …]
Dmessaging.c120 if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE && in slim_do_transfer()
128 if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) { in slim_do_transfer()
130 ctrl->sched.clk_state, ret); in slim_do_transfer()
Dcore.c270 mutex_init(&ctrl->sched.m_reconf); in slim_register_controller()
271 init_completion(&ctrl->sched.pause_comp); in slim_register_controller()
500 if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) { in slim_device_report_present()
502 ctrl->sched.clk_state, ret); in slim_device_report_present()
/drivers/net/wireless/ath/ath9k/
Dchannel.c261 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
263 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
311 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
382 mod_timer(&sc->sched.timer, jiffies + timeout); in ath_chanctx_setup_timer()
399 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
401 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
408 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
410 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
423 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
451 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
[all …]
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_job.c33 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout()
41 s_job->sched->name); in amdgpu_job_timedout()
47 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
55 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout()
76 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
107 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources()
120 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_free_cb()
162 ring = to_amdgpu_ring(entity->rq->sched); in amdgpu_job_submit()
173 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
186 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); in amdgpu_job_dependency()
[all …]
Damdgpu_debugfs.c869 if (!ring || !ring->sched.thread) in amdgpu_debugfs_test_ib()
871 kthread_park(ring->sched.thread); in amdgpu_debugfs_test_ib()
885 if (!ring || !ring->sched.thread) in amdgpu_debugfs_test_ib()
887 kthread_unpark(ring->sched.thread); in amdgpu_debugfs_test_ib()
977 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) in amdgpu_ib_preempt_job_recovery() argument
982 spin_lock(&sched->job_list_lock); in amdgpu_ib_preempt_job_recovery()
983 list_for_each_entry(s_job, &sched->ring_mirror_list, node) { in amdgpu_ib_preempt_job_recovery()
984 fence = sched->ops->run_job(s_job); in amdgpu_ib_preempt_job_recovery()
987 spin_unlock(&sched->job_list_lock); in amdgpu_ib_preempt_job_recovery()
997 struct drm_gpu_scheduler *sched = &ring->sched; in amdgpu_ib_preempt_mark_partial_job() local
[all …]
Damdgpu_trace.h154 __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
157 to_amdgpu_ring(p->entity->rq->sched));
173 __string(ring, to_amdgpu_ring(job->base.sched)->name)
182 __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
198 __string(ring, to_amdgpu_ring(job->base.sched)->name)
207 __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
471 __string(ring, sched_job->base.sched->name);
479 __assign_str(ring, sched_job->base.sched->name)
Damdgpu_sync.c72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev()
279 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence()
Damdgpu_ib.c153 if (!ring->sched.ready) { in amdgpu_ib_schedule()
364 if (!ring->sched.ready || !ring->funcs->test_ib) in amdgpu_ib_ring_tests()
385 ring->sched.ready = false; in amdgpu_ib_ring_tests()
Dsdma_v2_4.c358 sdma0->sched.ready = false; in sdma_v2_4_gfx_stop()
359 sdma1->sched.ready = false; in sdma_v2_4_gfx_stop()
480 ring->sched.ready = true; in sdma_v2_4_gfx_resume()
1101 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); in sdma_v2_4_process_illegal_inst_irq()
1263 struct drm_gpu_scheduler *sched; in sdma_v2_4_set_vm_pte_funcs() local
1268 sched = &adev->sdma.instance[i].ring.sched; in sdma_v2_4_set_vm_pte_funcs()
1270 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; in sdma_v2_4_set_vm_pte_funcs()
/drivers/gpu/drm/panfrost/
Dpanfrost_job.c27 struct drm_gpu_scheduler sched; member
399 struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; in panfrost_job_timedout() local
401 drm_sched_stop(sched, sched_job); in panfrost_job_timedout()
404 cancel_delayed_work_sync(&sched->work_tdr); in panfrost_job_timedout()
424 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); in panfrost_job_timedout()
428 drm_sched_start(&pfdev->js->queue[i].sched, true); in panfrost_job_timedout()
470 drm_sched_fault(&pfdev->js->queue[j].sched); in panfrost_job_irq_handler()
522 ret = drm_sched_init(&js->queue[j].sched, in panfrost_job_init()
538 drm_sched_fini(&js->queue[j].sched); in panfrost_job_init()
551 drm_sched_fini(&js->queue[j].sched); in panfrost_job_fini()
[all …]
/drivers/gpu/drm/i915/
Di915_request.c214 list_del(&rq->sched.link); in remove_from_engine()
311 i915_sched_node_fini(&rq->sched); in i915_request_retire()
431 if (request->sched.semaphores && in __i915_request_submit()
433 engine->saturated |= request->sched.semaphores; in __i915_request_submit()
446 list_move_tail(&request->sched.link, &engine->active.requests); in __i915_request_submit()
502 if (request->sched.semaphores && i915_request_started(request)) { in __i915_request_unsubmit()
503 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; in __i915_request_unsubmit()
504 request->sched.semaphores = 0; in __i915_request_unsubmit()
696 i915_sched_node_init(&rq->sched); in __i915_request_create()
745 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); in __i915_request_create()
[all …]
Di915_scheduler.h18 list_for_each_entry(it, &(plist)->requests[idx], sched.link)
26 sched.link)
Di915_scheduler.c25 return container_of(node, const struct i915_request, sched); in node_to_request()
177 return rq->sched.attr.priority | __NO_PREEMPTION; in rq_prio()
363 __i915_schedule(&rq->sched, attr); in i915_schedule()
380 if (READ_ONCE(rq->sched.attr.priority) & bump) in i915_schedule_bump_priority()
384 __bump_priority(&rq->sched, bump); in i915_schedule_bump_priority()
/drivers/gpu/drm/v3d/
Dv3d_sched.c271 drm_sched_stop(&v3d->queue[q].sched, sched_job); in v3d_gpu_reset_for_timeout()
280 drm_sched_resubmit_jobs(&v3d->queue[q].sched); in v3d_gpu_reset_for_timeout()
284 drm_sched_start(&v3d->queue[q].sched, true); in v3d_gpu_reset_for_timeout()
400 ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, in v3d_sched_init()
410 ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, in v3d_sched_init()
422 ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, in v3d_sched_init()
435 ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, in v3d_sched_init()
447 ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, in v3d_sched_init()
469 if (v3d->queue[q].sched.ready) in v3d_sched_fini()
470 drm_sched_fini(&v3d->queue[q].sched); in v3d_sched_fini()
/drivers/gpu/drm/etnaviv/
Detnaviv_sched.c112 drm_sched_stop(&gpu->sched, sched_job); in etnaviv_sched_timedout_job()
121 drm_sched_resubmit_jobs(&gpu->sched); in etnaviv_sched_timedout_job()
124 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job()
185 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, in etnaviv_sched_init()
196 drm_sched_fini(&gpu->sched); in etnaviv_sched_fini()
/drivers/gpu/drm/lima/
Dlima_sched.c103 static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched) in to_lima_pipe() argument
105 return container_of(sched, struct lima_sched_pipe, base); in to_lima_pipe()
196 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_run_job()
289 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_timedout_job()
300 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_free_job()
/drivers/usb/host/
Dehci-sched.c1259 struct ehci_iso_sched *sched; in itd_urb_transaction() local
1262 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in itd_urb_transaction()
1263 if (unlikely(sched == NULL)) in itd_urb_transaction()
1266 itd_sched_init(ehci, sched, stream, urb); in itd_urb_transaction()
1269 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
1295 iso_sched_free(stream, sched); in itd_urb_transaction()
1304 list_add(&itd->itd_list, &sched->td_list); in itd_urb_transaction()
1309 urb->hcpriv = sched; in itd_urb_transaction()
1397 struct ehci_iso_sched *sched, in sitd_slot_ok() argument
1490 struct ehci_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
[all …]
/drivers/infiniband/sw/rxe/
Drxe_task.c154 void rxe_run_task(struct rxe_task *task, int sched) in rxe_run_task() argument
159 if (sched) in rxe_run_task()
/drivers/gpu/drm/i915/gt/
Dintel_lrc.c271 return rq->sched.attr.priority; in rq_prio()
350 if (!list_is_last(&rq->sched.link, &engine->active.requests) && in need_preempt()
351 rq_prio(list_next_entry(rq, sched.link)) > last_prio) in need_preempt()
491 sched.link) { in __unwind_incomplete_requests()
516 list_move(&rq->sched.link, pl); in __unwind_incomplete_requests()
937 list_move_tail(&rq->sched.link, pl); in defer_request()
939 list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { in defer_request()
941 container_of(p->waiter, typeof(*w), sched); in defer_request()
952 if (list_empty(&w->sched.link)) in defer_request()
959 list_move_tail(&w->sched.link, &list); in defer_request()
[all …]

12345