| /kernel/linux/linux-6.6/fs/smb/server/ |
| D | ksmbd_work.c | 21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); in ksmbd_alloc_work_struct() local 23 if (work) { in ksmbd_alloc_work_struct() 24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct() 27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct() 28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct() 29 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct() 30 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct() 31 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), in ksmbd_alloc_work_struct() [all …]
|
| D | server.c | 88 * @work: smb work containing server thread information 92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument 96 if (ksmbd_conn_exiting(work->conn) || in check_conn_state() 97 ksmbd_conn_need_reconnect(work->conn)) { in check_conn_state() 98 rsp_hdr = work->response_buf; in check_conn_state() 108 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument 115 if (check_conn_state(work)) in __process_request() 118 if (ksmbd_verify_smb_message(work)) { in __process_request() 119 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request() 123 command = conn->ops->get_cmd_val(work); in __process_request() [all …]
|
| D | smb2pdu.c | 42 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument 44 if (work->next_smb2_rcv_hdr_off) { in __wbuf() 45 *req = ksmbd_req_buf_next(work); in __wbuf() 46 *rsp = ksmbd_resp_buf_next(work); in __wbuf() 48 *req = smb2_get_msg(work->request_buf); in __wbuf() 49 *rsp = smb2_get_msg(work->response_buf); in __wbuf() 85 * @work: smb work 90 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument 92 struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); in smb2_get_ksmbd_tcon() 103 if (xa_empty(&work->sess->tree_conns)) { in smb2_get_ksmbd_tcon() [all …]
|
| D | ksmbd_work.h | 86 struct work_struct work; member 96 * @work: smb work containing response buffer 98 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument 100 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next() 105 * @work: smb work containing response buffer 107 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument 109 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr() 114 * @work: smb work containing response buffer 116 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument 118 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/ |
| D | drm_flip_work.c | 31 * drm_flip_work_allocate_task - allocate a flip-work task 51 * @work: the flip-work 55 * func) on a work queue after drm_flip_work_commit() is called. 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 69 * drm_flip_work_queue - queue work 70 * @work: the flip-work 73 * Queues work, that will later be run (passed back to drm_flip_func_t [all …]
|
| D | drm_vblank_work.c | 38 * generic delayed work implementation which delays work execution until a 39 * particular vblank has passed, and then executes the work at realtime 43 * re-arming work items can be easily implemented. 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 67 /* Handle cancelling any pending vblank work items and drop respective vblank 72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/ |
| D | drm_flip_work.c | 31 * drm_flip_work_allocate_task - allocate a flip-work task 51 * @work: the flip-work 55 * func) on a work queue after drm_flip_work_commit() is called. 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 69 * drm_flip_work_queue - queue work 70 * @work: the flip-work 73 * Queues work, that will later be run (passed back to drm_flip_func_t [all …]
|
| D | drm_vblank_work.c | 38 * generic delayed work implementation which delays work execution until a 39 * particular vblank has passed, and then executes the work at realtime 43 * re-arming work items can be easily implemented. 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 67 /* Handle cancelling any pending vblank work items and drop respective vblank 72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local [all …]
|
| /kernel/linux/linux-5.10/include/trace/events/ |
| D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work->func; 46 TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u", [all …]
|
| /kernel/linux/linux-6.6/virt/kvm/ |
| D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 59 * This work is run asynchronously to the task which owns in async_pf_execute() 92 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument 95 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work() 98 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work() 104 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work() 106 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work() 107 WARN_ON_ONCE(work->work.func); in kvm_flush_and_free_async_pf_work() 109 flush_work(&work->work); in kvm_flush_and_free_async_pf_work() [all …]
|
| /kernel/linux/linux-6.6/include/trace/events/ |
| D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work->func; 46 TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d", [all …]
|
| /kernel/linux/linux-5.10/virt/kvm/ |
| D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 59 * This work is run asynchronously to the task which owns in async_pf_execute() 98 /* cancel outstanding work queue item */ in kvm_clear_async_pf_completion_queue() 100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local 102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue() 103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue() 109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue() 114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue() 116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue() [all …]
|
| /kernel/linux/linux-5.10/kernel/ |
| D | task_work.c | 9 * task_work_add - ask the @task to execute @work->func() 11 * @work: the callback to run 14 * Queue @work for task_work_run() below and notify the @task if @notify 17 * work is run only when the task exits the kernel and returns to user mode, 19 * it can't process this @work. Otherwise @work->func() will be called when the 22 * If the targeted task is exiting, then an error is returned and the work item 32 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 41 work->next = head; in task_work_add() 42 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 62 * task_work_cancel_match - cancel a pending work added by task_work_add() [all …]
|
| D | irq_work.c | 30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); in irq_work_claim() 36 * If the work is already pending, no need to raise the IPI. in irq_work_claim() 52 /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 55 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local() 56 if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local() 57 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local() 61 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) in __irq_work_queue_local() 66 /* Enqueue the irq work @work on the current CPU */ [all …]
|
| D | kthread.c | 710 * when they finish. There is defined a safe point for freezing when one work 719 struct kthread_work *work; in kthread_worker_fn() local 742 work = NULL; in kthread_worker_fn() 745 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 747 list_del_init(&work->node); in kthread_worker_fn() 749 worker->current_work = work; in kthread_worker_fn() 752 if (work) { in kthread_worker_fn() 754 work->func(work); in kthread_worker_fn() 855 * Returns true when the work could not be queued at the moment. 860 struct kthread_work *work) in queuing_blocked() argument [all …]
|
| /kernel/linux/linux-6.6/fs/btrfs/ |
| D | async-thread.c | 29 /* List head pointing to ordered work list */ 55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 57 return work->wq->fs_info; in btrfs_work_owner() 163 * Hook for threshold which will be called before executing the work, 214 struct btrfs_work *work; in run_ordered_work() local 223 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 225 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 231 * updates from ordinary work function. in run_ordered_work() 237 * we leave the work item on the list as a barrier so in run_ordered_work() 238 * that later work items that are done don't have their in run_ordered_work() [all …]
|
| /kernel/linux/linux-5.10/fs/btrfs/ |
| D | async-thread.c | 30 /* List head pointing to ordered work list */ 61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 63 return work->wq->fs_info; in btrfs_work_owner() 173 * Hook for threshold which will be called before executing the work, 224 struct btrfs_work *work; in run_ordered_work() local 233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 241 * updates from ordinary work function. in run_ordered_work() 247 * we leave the work item on the list as a barrier so in run_ordered_work() 248 * that later work items that are done don't have their in run_ordered_work() [all …]
|
| /kernel/linux/linux-6.6/kernel/ |
| D | task_work.c | 19 * task_work_add - ask the @task to execute @work->func() 21 * @work: the callback to run 24 * Queue @work for task_work_run() below and notify the @task if @notify 35 * @TWA_RESUME work is run only when the task exits the kernel and returns to 40 * Fails if the @task is exiting/exited and thus it can't process this @work. 41 * Otherwise @work->func() will be called when the @task goes through one of 44 * If the targeted task is exiting, then an error is returned and the work item 54 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 68 * Record the work call stack in order to print it in KASAN in task_work_add() 75 kasan_record_aux_stack_noalloc(work); in task_work_add() [all …]
|
| D | irq_work.c | 57 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim() 63 * If the work is already pending, no need to raise the IPI. in irq_work_claim() 79 static __always_inline void irq_work_raise(struct irq_work *work) in irq_work_raise() argument 82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); in irq_work_raise() 87 /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 88 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 95 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local() 107 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local() 110 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local() [all …]
|
| /kernel/linux/linux-6.6/LICENSES/dual/ |
| D | copyleft-next-0.3.1 | 26 of, publicly perform and publicly display My Work. 40 Legal Notices contained in My Work (to the extent they remain 47 If You Distribute a Derived Work, You must license the entire Derived 48 Work as a whole under this License, with prominent notice of such 50 separate Distribution of portions of the Derived Work. 52 If the Derived Work includes material licensed under the GPL, You may 53 instead license the Derived Work under the GPL. 57 When Distributing a Covered Work, You may not impose further 58 restrictions on the exercise of rights in the Covered Work granted under 64 However, You may Distribute a Covered Work incorporating material [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | workqueue.h | 3 * workqueue.h --- work queue handling for Linux. 21 typedef void (*work_func_t)(struct work_struct *work); 25 * The first word is the work queue pointer and the flags rolled into 28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ 34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 78 * When a work item is off queue, its high bits point to the last 117 struct work_struct work; member 120 /* target workqueue and CPU ->timer uses to queue ->work */ [all …]
|
| D | completion.h | 36 #define COMPLETION_INITIALIZER(work) \ argument 37 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 39 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 40 (*({ init_completion_map(&(work), &(map)); &(work); })) 42 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 43 (*({ init_completion(&work); &work; })) 47 * @work: identifier for the completion structure 53 #define DECLARE_COMPLETION(work) \ argument 54 struct completion work = COMPLETION_INITIALIZER(work) 63 * @work: identifier for the completion structure [all …]
|
| /kernel/linux/linux-6.6/include/linux/ |
| D | workqueue.h | 3 * workqueue.h --- work queue handling for Linux. 21 typedef void (*work_func_t)(struct work_struct *work); 25 * The first word is the work queue pointer and the flags rolled into 28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ 34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 73 * When a work item is off queue, its high bits point to the last 112 struct work_struct work; member 115 /* target workqueue and CPU ->timer uses to queue ->work */ [all …]
|
| D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ argument 36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 42 (*({ init_completion(&work); &work; })) 46 * @work: identifier for the completion structure 52 #define DECLARE_COMPLETION(work) \ argument 53 struct completion work = COMPLETION_INITIALIZER(work) 62 * @work: identifier for the completion structure [all …]
|
| /kernel/linux/linux-5.10/Documentation/core-api/ |
| D | workqueue.rst | 17 When such an asynchronous execution context is needed, a work item 22 While there are work items on the workqueue the worker executes the 23 functions associated with the work items one after the other. When 24 there is no work item left on the workqueue the worker becomes idle. 25 When a new work item gets queued, the worker begins executing again. 43 while an ST wq one for the whole system. Work items had to compete for 72 abstraction, the work item, is introduced. 74 A work item is a simple struct that holds a pointer to the function 76 wants a function to be executed asynchronously it has to set up a work 77 item pointing to that function and queue that work item on a [all …]
|