/kernel/linux/linux-5.10/drivers/gpu/drm/ |
D | drm_flip_work.c | 31 * drm_flip_work_allocate_task - allocate a flip-work task 51 * @work: the flip-work 55 * func) on a work queue after drm_flip_work_commit() is called. 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 69 * drm_flip_work_queue - queue work 70 * @work: the flip-work 73 * Queues work, that will later be run (passed back to drm_flip_func_t [all …]
|
D | drm_vblank_work.c | 38 * generic delayed work implementation which delays work execution until a 39 * particular vblank has passed, and then executes the work at realtime 43 * re-arming work items can be easily implemented. 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 67 /* Handle cancelling any pending vblank work items and drop respective vblank 72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local [all …]
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work->func; 46 TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u", [all …]
|
/kernel/linux/linux-5.10/virt/kvm/ |
D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 59 * This work is run asynchronously to the task which owns in async_pf_execute() 98 /* cancel outstanding work queue item */ in kvm_clear_async_pf_completion_queue() 100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local 102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue() 103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue() 109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue() 114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue() 116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue() [all …]
|
/kernel/linux/linux-5.10/fs/btrfs/ |
D | async-thread.c | 30 /* List head pointing to ordered work list */ 61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 63 return work->wq->fs_info; in btrfs_work_owner() 173 * Hook for threshold which will be called before executing the work, 224 struct btrfs_work *work; in run_ordered_work() local 233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 241 * updates from ordinary work function. in run_ordered_work() 247 * we leave the work item on the list as a barrier so in run_ordered_work() 248 * that later work items that are done don't have their in run_ordered_work() [all …]
|
/kernel/linux/linux-5.10/kernel/ |
D | task_work.c | 9 * task_work_add - ask the @task to execute @work->func() 11 * @work: the callback to run 14 * Queue @work for task_work_run() below and notify the @task if @notify 17 * work is run only when the task exits the kernel and returns to user mode, 19 * it can't process this @work. Otherwise @work->func() will be called when the 22 * If the targeted task is exiting, then an error is returned and the work item 32 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 42 work->next = head; in task_work_add() 43 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 73 * task_work_cancel - cancel a pending work added by task_work_add() [all …]
|
D | irq_work.c | 30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); in irq_work_claim() 36 * If the work is already pending, no need to raise the IPI. in irq_work_claim() 52 /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 55 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local() 56 if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local() 57 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local() 61 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) in __irq_work_queue_local() 66 /* Enqueue the irq work @work on the current CPU */ [all …]
|
D | kthread.c | 710 * when they finish. There is defined a safe point for freezing when one work 719 struct kthread_work *work; in kthread_worker_fn() local 742 work = NULL; in kthread_worker_fn() 745 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 747 list_del_init(&work->node); in kthread_worker_fn() 749 worker->current_work = work; in kthread_worker_fn() 752 if (work) { in kthread_worker_fn() 754 work->func(work); in kthread_worker_fn() 855 * Returns true when the work could not be queued at the moment. 860 struct kthread_work *work) in queuing_blocked() argument [all …]
|
D | workqueue.c | 18 * This is the generic async execution mechanism. Work items as are 21 * normal work items and the other for high priority ones) and some extra 236 * The externally visible workqueue. It relays the issued work items to 244 int work_color; /* WQ: current work color */ 311 /* PL: allowable cpus for unbound wqs and work items */ 314 /* CPU where unbound work was last round robin scheduled from this CPU */ 318 * Local execution of unbound work items is no longer guaranteed. The 319 * following always forces round-robin CPU selection on unbound work items 440 struct work_struct *work = addr; in work_is_static_object() local 442 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object() [all …]
|
/kernel/linux/linux-5.10/include/linux/ |
D | workqueue.h | 3 * workqueue.h --- work queue handling for Linux. 21 typedef void (*work_func_t)(struct work_struct *work); 25 * The first word is the work queue pointer and the flags rolled into 28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 32 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 79 * When a work item is off queue, its high bits point to the last 116 struct work_struct work; member 119 /* target workqueue and CPU ->timer uses to queue ->work */ [all …]
|
D | completion.h | 36 #define COMPLETION_INITIALIZER(work) \ argument 37 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 39 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 40 (*({ init_completion_map(&(work), &(map)); &(work); })) 42 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 43 (*({ init_completion(&work); &work; })) 47 * @work: identifier for the completion structure 53 #define DECLARE_COMPLETION(work) \ argument 54 struct completion work = COMPLETION_INITIALIZER(work) 63 * @work: identifier for the completion structure [all …]
|
D | jump_label_ratelimit.h | 12 struct delayed_work work; member 18 struct delayed_work work; member 24 struct delayed_work work; member 28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) 30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) 33 __static_key_deferred_flush((x), &(x)->work) 37 struct delayed_work *work, 39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work); 43 extern void jump_label_update_timeout(struct work_struct *work); 49 .work = __DELAYED_WORK_INITIALIZER((name).work, \ [all …]
|
D | kthread.h | 77 * Simple work processor based on kthread. 85 typedef void (*kthread_work_func_t)(struct kthread_work *work); 110 struct kthread_work work; member 120 #define KTHREAD_WORK_INIT(work, fn) { \ argument 121 .node = LIST_HEAD_INIT((work).node), \ 126 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 134 #define DEFINE_KTHREAD_WORK(work, fn) \ argument 135 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 163 #define kthread_init_work(work, fn) \ argument 165 memset((work), 0, sizeof(struct kthread_work)); \ [all …]
|
/kernel/linux/linux-5.10/Documentation/core-api/ |
D | workqueue.rst | 17 When such an asynchronous execution context is needed, a work item 22 While there are work items on the workqueue the worker executes the 23 functions associated with the work items one after the other. When 24 there is no work item left on the workqueue the worker becomes idle. 25 When a new work item gets queued, the worker begins executing again. 43 while an ST wq one for the whole system. Work items had to compete for 72 abstraction, the work item, is introduced. 74 A work item is a simple struct that holds a pointer to the function 76 wants a function to be executed asynchronously it has to set up a work 77 item pointing to that function and queue that work item on a [all …]
|
/kernel/linux/linux-5.10/fs/ |
D | io-wq.c | 34 IO_WORKER_F_BOUND = 16, /* is doing bounded work */ 39 IO_WQ_BIT_CANCEL = 1, /* cancel work on list */ 202 struct io_wq_work *work) in io_work_get_acct() argument 204 if (work->flags & IO_WQ_WORK_UNBOUND) in io_work_get_acct() 299 * Most likely an attempt to queue unbounded work on an io_wq that in io_wqe_wake_worker() 343 * Worker will start processing some work. Move it to the busy list, if 347 struct io_wq_work *work) in __io_worker_busy() argument 362 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; in __io_worker_busy() 381 * No work, worker going to sleep. Move to freelist, and unuse mm if we 398 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument [all …]
|
/kernel/linux/linux-5.10/drivers/staging/octeon/ |
D | ethernet-rx.c | 59 * @work: Work queue entry pointing to the packet. 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error() 111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error() [all …]
|
D | ethernet-tx.c | 516 /* Get a work queue entry */ in cvm_oct_xmit_pow() 517 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); in cvm_oct_xmit_pow() local 519 if (unlikely(!work)) { in cvm_oct_xmit_pow() 520 printk_ratelimited("%s: Failed to allocate a work queue entry\n", in cvm_oct_xmit_pow() 532 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); in cvm_oct_xmit_pow() 558 * Fill in some of the work queue fields. We may need to add in cvm_oct_xmit_pow() 562 work->word0.pip.cn38xx.hw_chksum = skb->csum; in cvm_oct_xmit_pow() 563 work->word1.len = skb->len; in cvm_oct_xmit_pow() 564 cvmx_wqe_set_port(work, priv->port); in cvm_oct_xmit_pow() 565 cvmx_wqe_set_qos(work, priv->port & 0x7); in cvm_oct_xmit_pow() [all …]
|
/kernel/linux/linux-5.10/LICENSES/dual/ |
D | Apache-2.0 | 49 "Work" shall mean the work of authorship, whether in Source or Object form, 51 is included in or attached to the work (an example is provided in the 54 "Derivative Works" shall mean any work, whether in Source or Object form, 55 that is based on (or derived from) the Work and for which the editorial 57 a whole, an original work of authorship. For the purposes of this License, 59 merely link (or bind by name) to the interfaces of, the Work and Derivative 62 "Contribution" shall mean any work of authorship, including the original 63 version of the Work and any modifications or additions to that Work or 65 inclusion in the Work by the copyright owner or by an individual or Legal 72 and improving the Work, but excluding communication that is conspicuously [all …]
|
/kernel/linux/linux-5.10/LICENSES/preferred/ |
D | LGPL-2.1 | 88 work, a derivative of the original library. The ordinary General Public 121 follow. Pay close attention to the difference between a "work based on the 122 library" and a "work that uses the library". The former contains code 138 The "Library", below, refers to any such software library or work which 139 has been distributed under these terms. A "work based on the Library" 140 means either the Library or any derivative work under copyright law: 141 that is to say, a work containing the Library or a portion of it, either 146 "Source code" for a work means the preferred form of the work for making 155 program is covered only if its contents constitute a work based on the 171 thus forming a work based on the Library, and copy and distribute such [all …]
|
D | LGPL-2.0 | 88 a textual and legal sense, the linked executable is a combined work, a 108 follow. Pay close attention to the difference between a "work based on the 109 library" and a "work that uses the library". The former contains code 128 The "Library", below, refers to any such software library or work which 129 has been distributed under these terms. A "work based on the Library" 130 means either the Library or any derivative work under copyright law: 131 that is to say, a work containing the Library or a portion of it, either 136 "Source code" for a work means the preferred form of the work for making 145 program is covered only if its contents constitute a work based on the 161 thus forming a work based on the Library, and copy and distribute such [all …]
|
/kernel/linux/linux-5.10/drivers/accessibility/speakup/ |
D | selection.c | 20 struct work_struct work; member 25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument 28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection() 58 .work = __WORK_INITIALIZER(speakup_sel_work.work, 65 * cancelling selection work. getting kref first establishes the in speakup_set_selection() 87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection() 96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection() 97 /* setting to null so that if work fails to run and we cancel it, in speakup_cancel_selection() 106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument 109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
D | cm.c | 91 struct cm_work *work); 228 struct delayed_work work; member 239 struct cm_work work; member 299 static void cm_work_handler(struct work_struct *work); 718 __be32 remote_id = timewait_info->work.remote_id; in cm_insert_remote_id() 724 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 726 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) in cm_insert_remote_id() 752 if (be32_lt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 754 else if (be32_gt(remote_id, timewait_info->work.remote_id)) in cm_find_remote_id() 761 res = cm_acquire_id(timewait_info->work.local_id, in cm_find_remote_id() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
D | i915_gem_client_blt.c | 98 struct work_struct work; member 133 static void clear_pages_signal_irq_worker(struct irq_work *work) in clear_pages_signal_irq_worker() argument 135 struct clear_pages_work *w = container_of(work, typeof(*w), irq_work); in clear_pages_signal_irq_worker() 156 static void clear_pages_worker(struct work_struct *work) in clear_pages_worker() argument 158 struct clear_pages_work *w = container_of(work, typeof(*w), work); in clear_pages_worker() 298 schedule_work(&w->work); in clear_pages_work_notify() 318 struct clear_pages_work *work; in i915_gem_schedule_fill_pages_blt() local 326 work = kmalloc(sizeof(*work), GFP_KERNEL); in i915_gem_schedule_fill_pages_blt() 327 if (!work) { in i915_gem_schedule_fill_pages_blt() 332 work->value = value; in i915_gem_schedule_fill_pages_blt() [all …]
|
/kernel/linux/linux-5.10/include/drm/ |
D | drm_flip_work.h | 34 * Util to queue up work to run from work-queue context after flip/vblank. 46 * @work: the flip work 49 * Callback function to be called for each of the queue'd work items after 52 typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); 55 * struct drm_flip_task - flip work task 65 * struct drm_flip_work - flip work queue 83 void drm_flip_work_queue_task(struct drm_flip_work *work, 85 void drm_flip_work_queue(struct drm_flip_work *work, void *val); 86 void drm_flip_work_commit(struct drm_flip_work *work, 88 void drm_flip_work_init(struct drm_flip_work *work, [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_display.c | 46 struct amdgpu_flip_work *work = in amdgpu_display_flip_callback() local 50 schedule_work(&work->flip_work.work); in amdgpu_display_flip_callback() 53 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, in amdgpu_display_flip_handle_fence() argument 63 if (!dma_fence_add_callback(fence, &work->cb, in amdgpu_display_flip_handle_fence() 74 container_of(__work, struct delayed_work, work); in amdgpu_display_flip_work_func() 75 struct amdgpu_flip_work *work = in amdgpu_display_flip_work_func() local 77 struct amdgpu_device *adev = work->adev; in amdgpu_display_flip_work_func() 78 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; in amdgpu_display_flip_work_func() 85 if (amdgpu_display_flip_handle_fence(work, &work->excl)) in amdgpu_display_flip_work_func() 88 for (i = 0; i < work->shared_count; ++i) in amdgpu_display_flip_work_func() [all …]
|