• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/spinlock.h>
3 #include <linux/task_work.h>
4 #include <linux/tracehook.h>
5 
6 static struct callback_head work_exited; /* all we need is ->next == NULL */
7 
8 /**
9  * task_work_add - ask the @task to execute @work->func()
10  * @task: the task which should run the callback
11  * @work: the callback to run
12  * @notify: how to notify the targeted task
13  *
14  * Queue @work for task_work_run() below and notify the @task if @notify
15  * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the
16  * it will interrupt the targeted task and run the task_work. @TWA_RESUME
17  * work is run only when the task exits the kernel and returns to user mode,
18  * or before entering guest mode. Fails if the @task is exiting/exited and thus
19  * it can't process this @work. Otherwise @work->func() will be called when the
20  * @task goes through one of the aforementioned transitions, or exits.
21  *
22  * If the targeted task is exiting, then an error is returned and the work item
23  * is not queued. It's up to the caller to arrange for an alternative mechanism
24  * in that case.
25  *
26  * Note: there is no ordering guarantee on works queued here. The task_work
27  * list is LIFO.
28  *
29  * RETURNS:
30  * 0 if succeeds or -ESRCH.
31  */
task_work_add(struct task_struct * task,struct callback_head * work,enum task_work_notify_mode notify)32 int task_work_add(struct task_struct *task, struct callback_head *work,
33 		  enum task_work_notify_mode notify)
34 {
35 	struct callback_head *head;
36 
37 	do {
38 		head = READ_ONCE(task->task_works);
39 		if (unlikely(head == &work_exited))
40 			return -ESRCH;
41 		work->next = head;
42 	} while (cmpxchg(&task->task_works, head, work) != head);
43 
44 	switch (notify) {
45 	case TWA_NONE:
46 		break;
47 	case TWA_RESUME:
48 		set_notify_resume(task);
49 		break;
50 	case TWA_SIGNAL:
51 		set_notify_signal(task);
52 		break;
53 	default:
54 		WARN_ON_ONCE(1);
55 		break;
56 	}
57 
58 	return 0;
59 }
60 
61 /**
62  * task_work_cancel_match - cancel a pending work added by task_work_add()
63  * @task: the task which should execute the work
64  * @match: match function to call
65  *
66  * RETURNS:
67  * The found work or NULL if not found.
68  */
69 struct callback_head *
task_work_cancel_match(struct task_struct * task,bool (* match)(struct callback_head *,void * data),void * data)70 task_work_cancel_match(struct task_struct *task,
71 		       bool (*match)(struct callback_head *, void *data),
72 		       void *data)
73 {
74 	struct callback_head **pprev = &task->task_works;
75 	struct callback_head *work;
76 	unsigned long flags;
77 
78 	if (likely(!task->task_works))
79 		return NULL;
80 	/*
81 	 * If cmpxchg() fails we continue without updating pprev.
82 	 * Either we raced with task_work_add() which added the
83 	 * new entry before this work, we will find it again. Or
84 	 * we raced with task_work_run(), *pprev == NULL/exited.
85 	 */
86 	raw_spin_lock_irqsave(&task->pi_lock, flags);
87 	while ((work = READ_ONCE(*pprev))) {
88 		if (!match(work, data))
89 			pprev = &work->next;
90 		else if (cmpxchg(pprev, work, work->next) == work)
91 			break;
92 	}
93 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
94 
95 	return work;
96 }
97 
task_work_func_match(struct callback_head * cb,void * data)98 static bool task_work_func_match(struct callback_head *cb, void *data)
99 {
100 	return cb->func == data;
101 }
102 
103 /**
104  * task_work_cancel - cancel a pending work added by task_work_add()
105  * @task: the task which should execute the work
106  * @func: identifies the work to remove
107  *
108  * Find the last queued pending work with ->func == @func and remove
109  * it from queue.
110  *
111  * RETURNS:
112  * The found work or NULL if not found.
113  */
114 struct callback_head *
task_work_cancel(struct task_struct * task,task_work_func_t func)115 task_work_cancel(struct task_struct *task, task_work_func_t func)
116 {
117 	return task_work_cancel_match(task, task_work_func_match, func);
118 }
119 
120 /**
121  * task_work_run - execute the works added by task_work_add()
122  *
123  * Flush the pending works. Should be used by the core kernel code.
124  * Called before the task returns to the user-mode or stops, or when
125  * it exits. In the latter case task_work_add() can no longer add the
126  * new work after task_work_run() returns.
127  */
task_work_run(void)128 void task_work_run(void)
129 {
130 	struct task_struct *task = current;
131 	struct callback_head *work, *head, *next;
132 
133 	for (;;) {
134 		/*
135 		 * work->func() can do task_work_add(), do not set
136 		 * work_exited unless the list is empty.
137 		 */
138 		do {
139 			head = NULL;
140 			work = READ_ONCE(task->task_works);
141 			if (!work) {
142 				if (task->flags & PF_EXITING)
143 					head = &work_exited;
144 				else
145 					break;
146 			}
147 		} while (cmpxchg(&task->task_works, work, head) != work);
148 
149 		if (!work)
150 			break;
151 		/*
152 		 * Synchronize with task_work_cancel(). It can not remove
153 		 * the first entry == work, cmpxchg(task_works) must fail.
154 		 * But it can remove another entry from the ->next list.
155 		 */
156 		raw_spin_lock_irq(&task->pi_lock);
157 		raw_spin_unlock_irq(&task->pi_lock);
158 
159 		do {
160 			next = work->next;
161 			work->func(work);
162 			work = next;
163 			cond_resched();
164 		} while (work);
165 	}
166 }
167