Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 25 of 116) sorted by relevance

12345

/drivers/gpu/drm/scheduler/
Dsched_main.c71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
77 rq->sched = sched; in drm_sched_rq_init()
94 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity()
113 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity()
173 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
175 atomic_dec(&sched->hw_rq_count); in drm_sched_job_done()
176 atomic_dec(sched->score); in drm_sched_job_done()
183 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_job_done()
209 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
217 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
[all …]
Dsched_entity.c158 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
165 sched = entity->rq->sched; in drm_sched_entity_flush()
173 sched->job_scheduled, in drm_sched_entity_flush()
177 wait_event_killable(sched->job_scheduled, in drm_sched_entity_flush()
211 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_cb()
232 while ((f = job->sched->ops->dependency(job, entity))) in drm_sched_entity_kill_jobs()
269 struct drm_gpu_scheduler *sched = NULL; in drm_sched_entity_fini() local
272 sched = entity->rq->sched; in drm_sched_entity_fini()
280 if (sched) { in drm_sched_entity_fini()
341 drm_sched_wakeup(entity->rq->sched); in drm_sched_entity_wakeup()
[all …]
Dgpu_scheduler_trace.h51 __entry->name = sched_job->sched->name;
54 &sched_job->sched->hw_rq_count);
78 __entry->name = sched_job->sched->name;
81 &sched_job->sched->hw_rq_count);
114 __entry->name = sched_job->sched->name;
DMakefile23 gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
25 obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
Dsched_fence.c83 return (const char *)fence->sched->name; in drm_sched_fence_get_timeline_name()
166 fence->sched = entity->rq->sched; in drm_sched_fence_create()
/drivers/slimbus/
Dsched.c29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local
38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause()
40 if (sched->clk_state == SLIM_CLK_ACTIVE) { in slim_ctrl_clk_pause()
41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
49 ret = wait_for_completion_timeout(&sched->pause_comp, in slim_ctrl_clk_pause()
52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
63 if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) in slim_ctrl_clk_pause()
66 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause()
67 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
73 if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) { in slim_ctrl_clk_pause()
[all …]
/drivers/net/wireless/ath/ath9k/
Dchannel.c261 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
263 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
311 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
382 mod_timer(&sc->sched.timer, jiffies + timeout); in ath_chanctx_setup_timer()
399 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
401 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
408 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
410 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
423 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
451 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
[all …]
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_job.c35 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout()
43 __func__, s_job->sched->name); in amdgpu_job_timedout()
54 s_job->sched->name); in amdgpu_job_timedout()
60 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
68 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout()
96 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
128 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources()
197 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
213 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); in amdgpu_job_dependency()
241 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); in amdgpu_job_run()
[all …]
Damdgpu_debugfs.c1150 if (!ring || !ring->sched.thread) in amdgpu_debugfs_test_ib_show()
1152 kthread_park(ring->sched.thread); in amdgpu_debugfs_test_ib_show()
1166 if (!ring || !ring->sched.thread) in amdgpu_debugfs_test_ib_show()
1168 kthread_unpark(ring->sched.thread); in amdgpu_debugfs_test_ib_show()
1302 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) in amdgpu_ib_preempt_job_recovery() argument
1307 spin_lock(&sched->job_list_lock); in amdgpu_ib_preempt_job_recovery()
1308 list_for_each_entry(s_job, &sched->pending_list, list) { in amdgpu_ib_preempt_job_recovery()
1309 fence = sched->ops->run_job(s_job); in amdgpu_ib_preempt_job_recovery()
1312 spin_unlock(&sched->job_list_lock); in amdgpu_ib_preempt_job_recovery()
1322 struct drm_gpu_scheduler *sched = &ring->sched; in amdgpu_ib_preempt_mark_partial_job() local
[all …]
Damdgpu_sync.c73 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev()
326 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence()
Damdgpu_trace.h154 __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
157 to_amdgpu_ring(p->entity->rq->sched));
173 __string(ring, to_amdgpu_ring(job->base.sched)->name)
182 __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
198 __string(ring, to_amdgpu_ring(job->base.sched)->name)
207 __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
520 __string(ring, sched_job->base.sched->name)
528 __assign_str(ring, sched_job->base.sched->name);
/drivers/gpu/drm/etnaviv/
Detnaviv_sched.c94 drm_sched_stop(&gpu->sched, sched_job); in etnaviv_sched_timedout_job()
124 drm_sched_resubmit_jobs(&gpu->sched); in etnaviv_sched_timedout_job()
126 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job()
131 drm_sched_start(&gpu->sched, true); in etnaviv_sched_timedout_job()
193 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, in etnaviv_sched_init()
205 drm_sched_fini(&gpu->sched); in etnaviv_sched_fini()
/drivers/net/wireless/mediatek/mt76/mt7615/
Dsdio_txrx.c49 sdio->sched.pse_mcu_quota += pse_mcu_quota; in mt7663s_refill_sched_quota()
50 sdio->sched.pse_data_quota += pse_data_quota; in mt7663s_refill_sched_quota()
51 sdio->sched.ple_data_quota += ple_data_quota; in mt7663s_refill_sched_quota()
173 pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); in mt7663s_tx_pick_quota()
176 if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz) in mt7663s_tx_pick_quota()
179 if (sdio->sched.pse_data_quota < *pse_size + pse_sz || in mt7663s_tx_pick_quota()
180 sdio->sched.ple_data_quota < *ple_size + 1) in mt7663s_tx_pick_quota()
194 sdio->sched.pse_mcu_quota -= pse_size; in mt7663s_tx_update_quota()
196 sdio->sched.pse_data_quota -= pse_size; in mt7663s_tx_update_quota()
197 sdio->sched.ple_data_quota -= ple_size; in mt7663s_tx_update_quota()
Dsdio_mcu.c24 sdio->sched.pse_data_quota = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, in mt7663s_mcu_init_sched()
26 sdio->sched.pse_mcu_quota = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, in mt7663s_mcu_init_sched()
28 sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, in mt7663s_mcu_init_sched()
32 sdio->sched.deficit = txdwcnt << 2; in mt7663s_mcu_init_sched()
/drivers/gpu/drm/v3d/
Dv3d_sched.c287 drm_sched_stop(&v3d->queue[q].sched, sched_job); in v3d_gpu_reset_for_timeout()
296 drm_sched_resubmit_jobs(&v3d->queue[q].sched); in v3d_gpu_reset_for_timeout()
300 drm_sched_start(&v3d->queue[q].sched, true); in v3d_gpu_reset_for_timeout()
418 ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, in v3d_sched_init()
428 ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, in v3d_sched_init()
440 ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, in v3d_sched_init()
453 ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, in v3d_sched_init()
465 ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, in v3d_sched_init()
487 if (v3d->queue[q].sched.ready) in v3d_sched_fini()
488 drm_sched_fini(&v3d->queue[q].sched); in v3d_sched_fini()
Dv3d_drv.c111 struct drm_gpu_scheduler *sched; in v3d_open() local
121 sched = &v3d->queue[i].sched; in v3d_open()
123 DRM_SCHED_PRIORITY_NORMAL, &sched, in v3d_open()
/drivers/gpu/drm/i915/
Di915_request.c363 i915_sched_node_fini(&rq->sched); in i915_request_retire()
582 list_del_init(&request->sched.link); in __i915_request_submit()
608 if (request->sched.semaphores && in __i915_request_submit()
610 engine->saturated |= request->sched.semaphores; in __i915_request_submit()
687 if (request->sched.semaphores && __i915_request_has_started(request)) in __i915_request_unsubmit()
688 request->sched.semaphores = 0; in __i915_request_unsubmit()
828 i915_sched_node_init(&rq->sched); in __i915_request_ctor()
925 i915_sched_node_reinit(&rq->sched); in __i915_request_create()
971 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); in __i915_request_create()
972 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); in __i915_request_create()
[all …]
Di915_scheduler_types.h86 &(rq__)->sched.waiters_list, \
91 &(rq__)->sched.signalers_list, \
/drivers/gpu/drm/panfrost/
Dpanfrost_job.c30 struct drm_gpu_scheduler sched; member
469 drm_sched_fault(&pfdev->js->queue[js].sched); in panfrost_job_handle_err()
649 drm_sched_stop(&pfdev->js->queue[i].sched, bad); in panfrost_reset()
714 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); in panfrost_reset()
719 drm_sched_start(&pfdev->js->queue[i].sched, true); in panfrost_reset()
836 ret = drm_sched_init(&js->queue[j].sched, in panfrost_job_init()
854 drm_sched_fini(&js->queue[j].sched); in panfrost_job_init()
868 drm_sched_fini(&js->queue[j].sched); in panfrost_job_fini()
879 struct drm_gpu_scheduler *sched; in panfrost_job_open() local
883 sched = &js->queue[i].sched; in panfrost_job_open()
[all …]
/drivers/gpu/drm/lima/
Dlima_sched.c107 static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched) in to_lima_pipe() argument
109 return container_of(sched, struct lima_sched_pipe, base); in to_lima_pipe()
166 struct drm_gpu_scheduler *sched = &pipe->base; in lima_sched_context_init() local
169 &sched, 1, guilty); in lima_sched_context_init()
224 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_run_job()
291 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); in lima_sched_build_error_task_list()
420 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_timedout_job()
459 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_free_job()
Dlima_trace.h20 __string(pipe, task->base.sched->name)
27 __assign_str(pipe, task->base.sched->name);
/drivers/gpu/drm/i915/gt/
Dintel_engine_heartbeat.c83 rq->sched.attr.priority); in show_heartbeat()
156 rq->sched.attr.priority < I915_PRIORITY_BARRIER) { in heartbeat()
164 if (rq->sched.attr.priority >= attr.priority) in heartbeat()
166 if (rq->sched.attr.priority >= attr.priority) in heartbeat()
263 GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); in __intel_engine_pulse()
Dintel_execlists_submission.c253 return READ_ONCE(rq->sched.attr.priority); in rq_prio()
326 if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) && in need_preempt()
327 rq_prio(list_next_entry(rq, sched.link)) > last_prio) in need_preempt()
372 sched.link) { in __unwind_incomplete_requests()
374 list_del_init(&rq->sched.link); in __unwind_incomplete_requests()
388 list_move(&rq->sched.link, pl); in __unwind_incomplete_requests()
1073 list_move_tail(&rq->sched.link, pl); in defer_request()
1077 container_of(p->waiter, typeof(*w), sched); in defer_request()
1099 list_move_tail(&w->sched.link, &list); in defer_request()
1102 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); in defer_request()
[all …]
/drivers/usb/host/
Dehci-sched.c1245 struct ehci_iso_sched *sched; in itd_urb_transaction() local
1248 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in itd_urb_transaction()
1249 if (unlikely(sched == NULL)) in itd_urb_transaction()
1252 itd_sched_init(ehci, sched, stream, urb); in itd_urb_transaction()
1255 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
1281 iso_sched_free(stream, sched); in itd_urb_transaction()
1290 list_add(&itd->itd_list, &sched->td_list); in itd_urb_transaction()
1295 urb->hcpriv = sched; in itd_urb_transaction()
1389 struct ehci_iso_sched *sched, in sitd_slot_ok() argument
1482 struct ehci_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
[all …]
/drivers/gpu/drm/i915/gem/
Di915_gem_context_types.h188 struct i915_sched_attr sched; member
316 struct i915_sched_attr sched; member

12345