| /kernel/linux/linux-5.10/include/drm/ |
| D | gpu_scheduler.h | 55 * @rq: runqueue on which this entity is currently scheduled. 57 * Jobs from this entity can be scheduled on any scheduler 67 * The &drm_sched_fence.scheduled uses the 75 * @last_scheduled: points to the finished fence of the last scheduled job. 107 * struct drm_sched_rq - queue of entities to be scheduled. 111 * @entities: list of the entities to be scheduled. 112 * @current_entity: the entity which is to be scheduled. 130 * @scheduled: this fence is what will be signaled by the scheduler 131 * when the job is scheduled. 133 struct dma_fence scheduled; member [all …]
|
| /kernel/linux/linux-4.19/include/drm/ |
| D | gpu_scheduler.h | 60 * The &drm_sched_fence.scheduled uses the 68 * @last_scheduled: points to the finished fence of the last scheduled job. 93 * struct drm_sched_rq - queue of entities to be scheduled. 97 * @entities: list of the entities to be scheduled. 98 * @current_entity: the entity which is to be scheduled. 116 * @scheduled: this fence is what will be signaled by the scheduler 117 * when the job is scheduled. 119 struct dma_fence scheduled; member 149 * @lock: the lock used by the scheduled and the finished fences. 164 * @sched: the scheduler instance on which this job is scheduled. [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/scheduler/ |
| D | sched_fence.c | 53 int ret = dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled() 56 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 59 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 122 * Drop the extra reference from the scheduled fence to the base fence. 128 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished() 146 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence() 170 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_create()
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/scheduler/ |
| D | sched_fence.c | 51 int ret = dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled() 54 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 57 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled() 120 * Drop the extra reference from the scheduled fence to the base fence. 126 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished() 144 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence() 168 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_create()
|
| D | gpu_scheduler.c | 28 * into software queues which are then scheduled on a hardware run queue. 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 44 * The jobs in a entity are always scheduled in the order that they were pushed. 445 * Fence is a scheduled/finished fence from a job in drm_sched_entity_add_dependency_cb() 458 * it to be scheduled in drm_sched_entity_add_dependency_cb() 460 fence = dma_fence_get(&s_fence->scheduled); in drm_sched_entity_add_dependency_cb() 467 /* Ignore it when it is already scheduled */ in drm_sched_entity_add_dependency_cb() 646 if (bad->s_fence->scheduled.context == entity->fence_context) { in drm_sched_hw_job_reset() 686 guilty_context = s_job->s_fence->scheduled.context; in drm_sched_job_recovery() 689 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) in drm_sched_job_recovery()
|
| /kernel/linux/linux-5.10/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| D | pno.h | 17 * brcmf_pno_start_sched_scan - initiate scheduled scan on device. 20 * @req: configuration parameters for scheduled scan. 26 * brcmf_pno_stop_sched_scan - terminate scheduled scan on device. 34 * brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
|
| /kernel/linux/linux-4.19/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| D | pno.h | 28 * brcmf_pno_start_sched_scan - initiate scheduled scan on device. 31 * @req: configuration parameters for scheduled scan. 37 * brcmf_pno_stop_sched_scan - terminate scheduled scan on device. 45 * brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
|
| /kernel/linux/linux-5.10/Documentation/powerpc/ |
| D | pmu-ebb.rst | 44 user process. This means once an EBB event is scheduled on the PMU, no non-EBB 56 first will be scheduled and the other will be put in error state. See the 84 userspace is able to reliably determine which PMC the event is scheduled on. 95 guarantee that it has been scheduled on the PMU. To ensure that the EBB event 96 has been scheduled on the PMU, you must perform a read() on the event. If the 97 read() returns EOF, then the event has not been scheduled and EBBs are not
|
| /kernel/linux/linux-4.19/Documentation/powerpc/ |
| D | pmu-ebb.txt | 43 user process. This means once an EBB event is scheduled on the PMU, no non-EBB 55 first will be scheduled and the other will be put in error state. See the 83 userspace is able to reliably determine which PMC the event is scheduled on. 94 guarantee that it has been scheduled on the PMU. To ensure that the EBB event 95 has been scheduled on the PMU, you must perform a read() on the event. If the 96 read() returns EOF, then the event has not been scheduled and EBBs are not
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/ |
| D | drm_vblank_work.c | 99 * If @work is already scheduled, this function will reschedule said work 103 * %1 if @work was successfully (re)scheduled, %0 if it was either already 104 * scheduled or cancelled, or a negative error code on failure. 131 /* Already scheduled w/ same vbl count */ in drm_vblank_work_schedule() 172 * Cancel an already scheduled vblank work and wait for its 175 * On return, @work is guaranteed to no longer be scheduled or running, even 212 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
|
| /kernel/linux/linux-5.10/net/sctp/ |
| D | stream_sched_prio.c | 51 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head() 52 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head() 92 bool scheduled = false; in sctp_sched_prio_unsched() local 97 /* Scheduled */ in sctp_sched_prio_unsched() 98 scheduled = true; in sctp_sched_prio_unsched() 114 return scheduled; in sctp_sched_prio_unsched() 124 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
|
| /kernel/linux/linux-4.19/net/sctp/ |
| D | stream_sched_prio.c | 66 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head() 67 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head() 107 bool scheduled = false; in sctp_sched_prio_unsched() local 112 /* Scheduled */ in sctp_sched_prio_unsched() 113 scheduled = true; in sctp_sched_prio_unsched() 129 return scheduled; in sctp_sched_prio_unsched() 139 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
|
| /kernel/linux/linux-4.19/drivers/soc/fsl/dpio/ |
| D | qbman-portal.h | 287 * qbman_swp_fq_schedule() - Move the fq to the scheduled state 289 * @fqid: the index of frame queue to be scheduled 302 * qbman_swp_fq_force() - Force the FQ to fully scheduled state 306 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled 339 * XOFF FQs will remain in the tenatively-scheduled state, even when 340 * non-empty, meaning they won't be selected for scheduled dequeuing. 341 * If a FQ is changed to XOFF after it had already become truly-scheduled
|
| /kernel/linux/linux-4.19/drivers/usb/host/ |
| D | xhci-mtk.h | 41 * (@repeat==1) scheduled within the interval 46 * scheduled first time within the interval 48 * scheduled within a interval. in the simple algorithm, only 52 * @pkts: number of packets to be transferred in the scheduled uframes
|
| /kernel/linux/linux-4.19/drivers/net/wireless/intel/iwlwifi/fw/api/ |
| D | time-event.h | 106 * the first fragment is scheduled. 108 * the first 2 fragments are scheduled. 114 * scheduled. 172 * the first fragment is scheduled. 174 * the first 2 fragments are scheduled. 180 * scheduled. 309 * @status: true if scheduled, false otherwise (not executed)
|
| /kernel/linux/linux-5.10/drivers/usb/host/ |
| D | xhci-mtk.h | 56 * (@repeat==1) scheduled within the interval 66 * scheduled first time within the interval 68 * scheduled within a interval. in the simple algorithm, only 72 * @pkts: number of packets to be transferred in the scheduled uframes
|
| /kernel/linux/linux-5.10/drivers/net/wireless/intel/iwlwifi/fw/api/ |
| D | time-event.h | 108 * the first fragment is scheduled. 110 * the first 2 fragments are scheduled. 116 * scheduled. 174 * the first fragment is scheduled. 176 * the first 2 fragments are scheduled. 182 * scheduled. 311 * @status: true if scheduled, false otherwise (not executed) 444 * Note: the session protection will always be scheduled to start as
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | posix-timers.h | 131 * @work: The task work to be scheduled 132 * @scheduled: @work has been scheduled already, no further processing 136 unsigned int scheduled; member
|
| /kernel/linux/linux-5.10/drivers/soc/fsl/dpio/ |
| D | qbman-portal.h | 391 * qbman_swp_fq_schedule() - Move the fq to the scheduled state 393 * @fqid: the index of frame queue to be scheduled 406 * qbman_swp_fq_force() - Force the FQ to fully scheduled state 410 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled 443 * XOFF FQs will remain in the tenatively-scheduled state, even when 444 * non-empty, meaning they won't be selected for scheduled dequeuing. 445 * If a FQ is changed to XOFF after it had already become truly-scheduled
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
| D | i915_priolist_types.h | 23 /* Interactive workload, scheduled for immediate pageflipping */ 38 * another context. They get scheduled with their default priority and
|
| /kernel/linux/linux-4.19/arch/alpha/kernel/ |
| D | perf_event.c | 36 /* Number of events scheduled; also number entries valid in arrays below. */ 40 /* Events currently scheduled. */ 42 /* Event type of each scheduled event. */ 44 /* Current index of each scheduled event; if not yet determined 149 * Check that a group of events can be simultaneously scheduled on to the 369 * Check that a group of events can be simultaneously scheduled on to the PMU. 387 * If new events have been scheduled then update cpuc with the new 643 * scheduled on to the PMU. At that point the code to programme the in __hw_perf_event_init() 652 * be scheduled on to the PMU. in __hw_perf_event_init() 737 /* Update cpuc with information from any new scheduled events. */ in alpha_pmu_enable()
|
| /kernel/linux/linux-5.10/arch/alpha/kernel/ |
| D | perf_event.c | 36 /* Number of events scheduled; also number entries valid in arrays below. */ 40 /* Events currently scheduled. */ 42 /* Event type of each scheduled event. */ 44 /* Current index of each scheduled event; if not yet determined 149 * Check that a group of events can be simultaneously scheduled on to the 369 * Check that a group of events can be simultaneously scheduled on to the PMU. 387 * If new events have been scheduled then update cpuc with the new 637 * scheduled on to the PMU. At that point the code to programme the in __hw_perf_event_init() 646 * be scheduled on to the PMU. in __hw_perf_event_init() 731 /* Update cpuc with information from any new scheduled events. */ in alpha_pmu_enable()
|
| /kernel/linux/linux-4.19/sound/soc/ |
| D | soc-io.c | 137 * operation is scheduled asynchronously. This means it may not be completed 138 * when the function returns. To make sure that all scheduled updates have been 169 * scheduled using snd_soc_component_update_bits_async() has completed.
|
| /kernel/linux/linux-5.10/arch/s390/pci/ |
| D | pci_irq.c | 147 atomic_t scheduled; member 153 atomic_t *scheduled = data; in zpci_handle_remote_irq() local 157 } while (atomic_dec_return(scheduled)); in zpci_handle_remote_irq() 179 if (atomic_inc_return(&cpu_data->scheduled) > 1) in zpci_handle_fallback_irq() 183 cpu_data->csd.info = &cpu_data->scheduled; in zpci_handle_fallback_irq()
|
| /kernel/linux/linux-5.10/kernel/ |
| D | workqueue_internal.h | 34 struct list_head scheduled; /* L: scheduled works */ member
|