Lines Matching refs:work
636 struct kthread_work *work; in kthread_worker_fn() local
659 work = NULL; in kthread_worker_fn()
662 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
664 list_del_init(&work->node); in kthread_worker_fn()
666 worker->current_work = work; in kthread_worker_fn()
669 if (work) { in kthread_worker_fn()
671 work->func(work); in kthread_worker_fn()
777 struct kthread_work *work) in queuing_blocked() argument
781 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
785 struct kthread_work *work) in kthread_insert_work_sanity_check() argument
788 WARN_ON_ONCE(!list_empty(&work->node)); in kthread_insert_work_sanity_check()
790 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
795 struct kthread_work *work, in kthread_insert_work() argument
798 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
800 list_add_tail(&work->node, pos); in kthread_insert_work()
801 work->worker = worker; in kthread_insert_work()
819 struct kthread_work *work) in kthread_queue_work() argument
825 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
826 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
845 struct kthread_work *work = &dwork->work; in kthread_delayed_work_timer_fn() local
846 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn()
858 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
861 WARN_ON_ONCE(list_empty(&work->node)); in kthread_delayed_work_timer_fn()
862 list_del_init(&work->node); in kthread_delayed_work_timer_fn()
863 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
874 struct kthread_work *work = &dwork->work; in __kthread_queue_delayed_work() local
885 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
890 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
892 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
893 work->worker = worker; in __kthread_queue_delayed_work()
917 struct kthread_work *work = &dwork->work; in kthread_queue_delayed_work() local
923 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
934 struct kthread_work work; member
938 static void kthread_flush_work_fn(struct kthread_work *work) in kthread_flush_work_fn() argument
941 container_of(work, struct kthread_flush_work, work); in kthread_flush_work_fn()
951 void kthread_flush_work(struct kthread_work *work) in kthread_flush_work() argument
954 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_work()
960 worker = work->worker; in kthread_flush_work()
966 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
968 if (!list_empty(&work->node)) in kthread_flush_work()
969 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
970 else if (worker->current_work == work) in kthread_flush_work()
971 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
993 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, in __kthread_cancel_work() argument
999 container_of(work, struct kthread_delayed_work, work); in __kthread_cancel_work()
1000 struct kthread_worker *worker = work->worker; in __kthread_cancel_work()
1008 work->canceling++; in __kthread_cancel_work()
1012 work->canceling--; in __kthread_cancel_work()
1019 if (!list_empty(&work->node)) { in __kthread_cancel_work()
1020 list_del_init(&work->node); in __kthread_cancel_work()
1054 struct kthread_work *work = &dwork->work; in kthread_mod_delayed_work() local
1061 if (!work->worker) in kthread_mod_delayed_work()
1065 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1068 if (work->canceling) in kthread_mod_delayed_work()
1071 ret = __kthread_cancel_work(work, true, &flags); in kthread_mod_delayed_work()
1080 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) in __kthread_cancel_work_sync() argument
1082 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync()
1091 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1093 ret = __kthread_cancel_work(work, is_dwork, &flags); in __kthread_cancel_work_sync()
1095 if (worker->current_work != work) in __kthread_cancel_work_sync()
1102 work->canceling++; in __kthread_cancel_work_sync()
1104 kthread_flush_work(work); in __kthread_cancel_work_sync()
1106 work->canceling--; in __kthread_cancel_work_sync()
1130 bool kthread_cancel_work_sync(struct kthread_work *work) in kthread_cancel_work_sync() argument
1132 return __kthread_cancel_work_sync(work, false); in kthread_cancel_work_sync()
1147 return __kthread_cancel_work_sync(&dwork->work, true); in kthread_cancel_delayed_work_sync()
1161 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_worker()
1165 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()