• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * padata.c - generic interface to process data streams in parallel
4  *
5  * See Documentation/padata.txt for an api documentation.
6  *
7  * Copyright (C) 2008, 2009 secunet Security Networks AG
8  * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms and conditions of the GNU General Public License,
12  * version 2, as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23 
24 #include <linux/export.h>
25 #include <linux/cpumask.h>
26 #include <linux/err.h>
27 #include <linux/cpu.h>
28 #include <linux/padata.h>
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/sysfs.h>
33 #include <linux/rcupdate.h>
34 #include <linux/module.h>
35 
36 #define MAX_OBJ_NUM 1000
37 
38 static void padata_free_pd(struct parallel_data *pd);
39 
padata_index_to_cpu(struct parallel_data * pd,int cpu_index)40 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
41 {
42 	int cpu, target_cpu;
43 
44 	target_cpu = cpumask_first(pd->cpumask.pcpu);
45 	for (cpu = 0; cpu < cpu_index; cpu++)
46 		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
47 
48 	return target_cpu;
49 }
50 
padata_cpu_hash(struct parallel_data * pd,unsigned int seq_nr)51 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
52 {
53 	/*
54 	 * Hash the sequence numbers to the cpus by taking
55 	 * seq_nr mod. number of cpus in use.
56 	 */
57 	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
58 
59 	return padata_index_to_cpu(pd, cpu_index);
60 }
61 
padata_parallel_worker(struct work_struct * parallel_work)62 static void padata_parallel_worker(struct work_struct *parallel_work)
63 {
64 	struct padata_parallel_queue *pqueue;
65 	LIST_HEAD(local_list);
66 
67 	local_bh_disable();
68 	pqueue = container_of(parallel_work,
69 			      struct padata_parallel_queue, work);
70 
71 	spin_lock(&pqueue->parallel.lock);
72 	list_replace_init(&pqueue->parallel.list, &local_list);
73 	spin_unlock(&pqueue->parallel.lock);
74 
75 	while (!list_empty(&local_list)) {
76 		struct padata_priv *padata;
77 
78 		padata = list_entry(local_list.next,
79 				    struct padata_priv, list);
80 
81 		list_del_init(&padata->list);
82 
83 		padata->parallel(padata);
84 	}
85 
86 	local_bh_enable();
87 }
88 
89 /**
90  * padata_do_parallel - padata parallelization function
91  *
92  * @ps: padatashell
93  * @padata: object to be parallelized
94  * @cb_cpu: pointer to the CPU that the serialization callback function should
95  *          run on.  If it's not in the serial cpumask of @pinst
96  *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
97  *          none found, returns -EINVAL.
98  *
99  * The parallelization callback function will run with BHs off.
100  * Note: Every object which is parallelized by padata_do_parallel
101  * must be seen by padata_do_serial.
102  */
padata_do_parallel(struct padata_shell * ps,struct padata_priv * padata,int * cb_cpu)103 int padata_do_parallel(struct padata_shell *ps,
104 		       struct padata_priv *padata, int *cb_cpu)
105 {
106 	struct padata_instance *pinst = ps->pinst;
107 	int i, cpu, cpu_index, target_cpu, err;
108 	struct padata_parallel_queue *queue;
109 	struct parallel_data *pd;
110 
111 	rcu_read_lock_bh();
112 
113 	pd = rcu_dereference_bh(ps->pd);
114 
115 	err = -EINVAL;
116 	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
117 		goto out;
118 
119 	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
120 		if (!cpumask_weight(pd->cpumask.cbcpu))
121 			goto out;
122 
123 		/* Select an alternate fallback CPU and notify the caller. */
124 		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
125 
126 		cpu = cpumask_first(pd->cpumask.cbcpu);
127 		for (i = 0; i < cpu_index; i++)
128 			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
129 
130 		*cb_cpu = cpu;
131 	}
132 
133 	err = -EBUSY;
134 	if ((pinst->flags & PADATA_RESET))
135 		goto out;
136 
137 	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
138 		goto out;
139 
140 	err = 0;
141 	atomic_inc(&pd->refcnt);
142 	padata->pd = pd;
143 	padata->cb_cpu = *cb_cpu;
144 
145 	padata->seq_nr = atomic_inc_return(&pd->seq_nr);
146 	target_cpu = padata_cpu_hash(pd, padata->seq_nr);
147 	padata->cpu = target_cpu;
148 	queue = per_cpu_ptr(pd->pqueue, target_cpu);
149 
150 	spin_lock(&queue->parallel.lock);
151 	list_add_tail(&padata->list, &queue->parallel.list);
152 	spin_unlock(&queue->parallel.lock);
153 
154 	queue_work(pinst->parallel_wq, &queue->work);
155 
156 out:
157 	rcu_read_unlock_bh();
158 
159 	return err;
160 }
161 EXPORT_SYMBOL(padata_do_parallel);
162 
163 /*
164  * padata_find_next - Find the next object that needs serialization.
165  *
166  * Return values are:
167  *
168  * A pointer to the control struct of the next object that needs
169  * serialization, if present in one of the percpu reorder queues.
170  *
171  * NULL, if the next object that needs serialization will
172  *  be parallel processed by another cpu and is not yet present in
173  *  the cpu's reorder queue.
174  */
padata_find_next(struct parallel_data * pd,bool remove_object)175 static struct padata_priv *padata_find_next(struct parallel_data *pd,
176 					    bool remove_object)
177 {
178 	struct padata_parallel_queue *next_queue;
179 	struct padata_priv *padata;
180 	struct padata_list *reorder;
181 	int cpu = pd->cpu;
182 
183 	next_queue = per_cpu_ptr(pd->pqueue, cpu);
184 	reorder = &next_queue->reorder;
185 
186 	spin_lock(&reorder->lock);
187 	if (list_empty(&reorder->list)) {
188 		spin_unlock(&reorder->lock);
189 		return NULL;
190 	}
191 
192 	padata = list_entry(reorder->list.next, struct padata_priv, list);
193 
194 	/*
195 	 * Checks the rare case where two or more parallel jobs have hashed to
196 	 * the same CPU and one of the later ones finishes first.
197 	 */
198 	if (padata->seq_nr != pd->processed) {
199 		spin_unlock(&reorder->lock);
200 		return NULL;
201 	}
202 
203 	if (remove_object) {
204 		list_del_init(&padata->list);
205 		atomic_dec(&pd->reorder_objects);
206 		++pd->processed;
207 		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
208 	}
209 
210 	spin_unlock(&reorder->lock);
211 	return padata;
212 }
213 
padata_reorder(struct parallel_data * pd)214 static void padata_reorder(struct parallel_data *pd)
215 {
216 	struct padata_instance *pinst = pd->ps->pinst;
217 	int cb_cpu;
218 	struct padata_priv *padata;
219 	struct padata_serial_queue *squeue;
220 	struct padata_parallel_queue *next_queue;
221 
222 	/*
223 	 * We need to ensure that only one cpu can work on dequeueing of
224 	 * the reorder queue the time. Calculating in which percpu reorder
225 	 * queue the next object will arrive takes some time. A spinlock
226 	 * would be highly contended. Also it is not clear in which order
227 	 * the objects arrive to the reorder queues. So a cpu could wait to
228 	 * get the lock just to notice that there is nothing to do at the
229 	 * moment. Therefore we use a trylock and let the holder of the lock
230 	 * care for all the objects enqueued during the holdtime of the lock.
231 	 */
232 	if (!spin_trylock_bh(&pd->lock))
233 		return;
234 
235 	while (1) {
236 		padata = padata_find_next(pd, true);
237 
238 		/*
239 		 * If the next object that needs serialization is parallel
240 		 * processed by another cpu and is still on it's way to the
241 		 * cpu's reorder queue, nothing to do for now.
242 		 */
243 		if (!padata)
244 			break;
245 
246 		cb_cpu = padata->cb_cpu;
247 		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
248 
249 		spin_lock(&squeue->serial.lock);
250 		list_add_tail(&padata->list, &squeue->serial.list);
251 		spin_unlock(&squeue->serial.lock);
252 
253 		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
254 	}
255 
256 	spin_unlock_bh(&pd->lock);
257 
258 	/*
259 	 * The next object that needs serialization might have arrived to
260 	 * the reorder queues in the meantime.
261 	 *
262 	 * Ensure reorder queue is read after pd->lock is dropped so we see
263 	 * new objects from another task in padata_do_serial.  Pairs with
264 	 * smp_mb__after_atomic in padata_do_serial.
265 	 */
266 	smp_mb();
267 
268 	next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
269 	if (!list_empty(&next_queue->reorder.list) &&
270 	    padata_find_next(pd, false))
271 		queue_work(pinst->serial_wq, &pd->reorder_work);
272 }
273 
invoke_padata_reorder(struct work_struct * work)274 static void invoke_padata_reorder(struct work_struct *work)
275 {
276 	struct parallel_data *pd;
277 
278 	local_bh_disable();
279 	pd = container_of(work, struct parallel_data, reorder_work);
280 	padata_reorder(pd);
281 	local_bh_enable();
282 }
283 
padata_serial_worker(struct work_struct * serial_work)284 static void padata_serial_worker(struct work_struct *serial_work)
285 {
286 	struct padata_serial_queue *squeue;
287 	struct parallel_data *pd;
288 	LIST_HEAD(local_list);
289 	int cnt;
290 
291 	local_bh_disable();
292 	squeue = container_of(serial_work, struct padata_serial_queue, work);
293 	pd = squeue->pd;
294 
295 	spin_lock(&squeue->serial.lock);
296 	list_replace_init(&squeue->serial.list, &local_list);
297 	spin_unlock(&squeue->serial.lock);
298 
299 	cnt = 0;
300 
301 	while (!list_empty(&local_list)) {
302 		struct padata_priv *padata;
303 
304 		padata = list_entry(local_list.next,
305 				    struct padata_priv, list);
306 
307 		list_del_init(&padata->list);
308 
309 		padata->serial(padata);
310 		cnt++;
311 	}
312 	local_bh_enable();
313 
314 	if (atomic_sub_and_test(cnt, &pd->refcnt))
315 		padata_free_pd(pd);
316 }
317 
318 /**
319  * padata_do_serial - padata serialization function
320  *
321  * @padata: object to be serialized.
322  *
323  * padata_do_serial must be called for every parallelized object.
324  * The serialization callback function will run with BHs off.
325  */
padata_do_serial(struct padata_priv * padata)326 void padata_do_serial(struct padata_priv *padata)
327 {
328 	struct parallel_data *pd = padata->pd;
329 	struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
330 							   padata->cpu);
331 	struct padata_priv *cur;
332 
333 	spin_lock(&pqueue->reorder.lock);
334 	/* Sort in ascending order of sequence number. */
335 	list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
336 		if (cur->seq_nr < padata->seq_nr)
337 			break;
338 	list_add(&padata->list, &cur->list);
339 	atomic_inc(&pd->reorder_objects);
340 	spin_unlock(&pqueue->reorder.lock);
341 
342 	/*
343 	 * Ensure the addition to the reorder list is ordered correctly
344 	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
345 	 * in padata_reorder.
346 	 */
347 	smp_mb__after_atomic();
348 
349 	padata_reorder(pd);
350 }
351 EXPORT_SYMBOL(padata_do_serial);
352 
padata_setup_cpumasks(struct padata_instance * pinst)353 static int padata_setup_cpumasks(struct padata_instance *pinst)
354 {
355 	struct workqueue_attrs *attrs;
356 	int err;
357 
358 	attrs = alloc_workqueue_attrs();
359 	if (!attrs)
360 		return -ENOMEM;
361 
362 	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
363 	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
364 	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
365 	free_workqueue_attrs(attrs);
366 
367 	return err;
368 }
369 
pd_setup_cpumasks(struct parallel_data * pd,const struct cpumask * pcpumask,const struct cpumask * cbcpumask)370 static int pd_setup_cpumasks(struct parallel_data *pd,
371 			     const struct cpumask *pcpumask,
372 			     const struct cpumask *cbcpumask)
373 {
374 	int err = -ENOMEM;
375 
376 	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
377 		goto out;
378 	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
379 		goto free_pcpu_mask;
380 
381 	cpumask_copy(pd->cpumask.pcpu, pcpumask);
382 	cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
383 
384 	return 0;
385 
386 free_pcpu_mask:
387 	free_cpumask_var(pd->cpumask.pcpu);
388 out:
389 	return err;
390 }
391 
__padata_list_init(struct padata_list * pd_list)392 static void __padata_list_init(struct padata_list *pd_list)
393 {
394 	INIT_LIST_HEAD(&pd_list->list);
395 	spin_lock_init(&pd_list->lock);
396 }
397 
398 /* Initialize all percpu queues used by serial workers */
padata_init_squeues(struct parallel_data * pd)399 static void padata_init_squeues(struct parallel_data *pd)
400 {
401 	int cpu;
402 	struct padata_serial_queue *squeue;
403 
404 	for_each_cpu(cpu, pd->cpumask.cbcpu) {
405 		squeue = per_cpu_ptr(pd->squeue, cpu);
406 		squeue->pd = pd;
407 		__padata_list_init(&squeue->serial);
408 		INIT_WORK(&squeue->work, padata_serial_worker);
409 	}
410 }
411 
412 /* Initialize all percpu queues used by parallel workers */
padata_init_pqueues(struct parallel_data * pd)413 static void padata_init_pqueues(struct parallel_data *pd)
414 {
415 	int cpu;
416 	struct padata_parallel_queue *pqueue;
417 
418 	for_each_cpu(cpu, pd->cpumask.pcpu) {
419 		pqueue = per_cpu_ptr(pd->pqueue, cpu);
420 
421 		__padata_list_init(&pqueue->reorder);
422 		__padata_list_init(&pqueue->parallel);
423 		INIT_WORK(&pqueue->work, padata_parallel_worker);
424 		atomic_set(&pqueue->num_obj, 0);
425 	}
426 }
427 
428 /* Allocate and initialize the internal cpumask dependend resources. */
padata_alloc_pd(struct padata_shell * ps)429 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
430 {
431 	struct padata_instance *pinst = ps->pinst;
432 	const struct cpumask *cbcpumask;
433 	const struct cpumask *pcpumask;
434 	struct parallel_data *pd;
435 
436 	cbcpumask = pinst->rcpumask.cbcpu;
437 	pcpumask = pinst->rcpumask.pcpu;
438 
439 	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
440 	if (!pd)
441 		goto err;
442 
443 	pd->pqueue = alloc_percpu(struct padata_parallel_queue);
444 	if (!pd->pqueue)
445 		goto err_free_pd;
446 
447 	pd->squeue = alloc_percpu(struct padata_serial_queue);
448 	if (!pd->squeue)
449 		goto err_free_pqueue;
450 
451 	pd->ps = ps;
452 	if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
453 		goto err_free_squeue;
454 
455 	padata_init_pqueues(pd);
456 	padata_init_squeues(pd);
457 	atomic_set(&pd->seq_nr, -1);
458 	atomic_set(&pd->reorder_objects, 0);
459 	atomic_set(&pd->refcnt, 1);
460 	spin_lock_init(&pd->lock);
461 	pd->cpu = cpumask_first(pd->cpumask.pcpu);
462 	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
463 
464 	return pd;
465 
466 err_free_squeue:
467 	free_percpu(pd->squeue);
468 err_free_pqueue:
469 	free_percpu(pd->pqueue);
470 err_free_pd:
471 	kfree(pd);
472 err:
473 	return NULL;
474 }
475 
padata_free_pd(struct parallel_data * pd)476 static void padata_free_pd(struct parallel_data *pd)
477 {
478 	free_cpumask_var(pd->cpumask.pcpu);
479 	free_cpumask_var(pd->cpumask.cbcpu);
480 	free_percpu(pd->pqueue);
481 	free_percpu(pd->squeue);
482 	kfree(pd);
483 }
484 
__padata_start(struct padata_instance * pinst)485 static void __padata_start(struct padata_instance *pinst)
486 {
487 	pinst->flags |= PADATA_INIT;
488 }
489 
__padata_stop(struct padata_instance * pinst)490 static void __padata_stop(struct padata_instance *pinst)
491 {
492 	if (!(pinst->flags & PADATA_INIT))
493 		return;
494 
495 	pinst->flags &= ~PADATA_INIT;
496 
497 	synchronize_rcu();
498 }
499 
500 /* Replace the internal control structure with a new one. */
padata_replace_one(struct padata_shell * ps)501 static int padata_replace_one(struct padata_shell *ps)
502 {
503 	struct parallel_data *pd_new;
504 
505 	pd_new = padata_alloc_pd(ps);
506 	if (!pd_new)
507 		return -ENOMEM;
508 
509 	ps->opd = rcu_dereference_protected(ps->pd, 1);
510 	rcu_assign_pointer(ps->pd, pd_new);
511 
512 	return 0;
513 }
514 
padata_replace(struct padata_instance * pinst)515 static int padata_replace(struct padata_instance *pinst)
516 {
517 	int notification_mask = 0;
518 	struct padata_shell *ps;
519 	int err = 0;
520 
521 	pinst->flags |= PADATA_RESET;
522 
523 	cpumask_copy(pinst->omask, pinst->rcpumask.pcpu);
524 	cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
525 		    cpu_online_mask);
526 	if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu))
527 		notification_mask |= PADATA_CPU_PARALLEL;
528 
529 	cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu);
530 	cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
531 		    cpu_online_mask);
532 	if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu))
533 		notification_mask |= PADATA_CPU_SERIAL;
534 
535 	list_for_each_entry(ps, &pinst->pslist, list) {
536 		err = padata_replace_one(ps);
537 		if (err)
538 			break;
539 	}
540 
541 	synchronize_rcu();
542 
543 	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
544 		if (atomic_dec_and_test(&ps->opd->refcnt))
545 			padata_free_pd(ps->opd);
546 
547 	if (notification_mask)
548 		blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
549 					     notification_mask,
550 					     &pinst->cpumask);
551 
552 	pinst->flags &= ~PADATA_RESET;
553 
554 	return err;
555 }
556 
557 /**
558  * padata_register_cpumask_notifier - Registers a notifier that will be called
559  *                             if either pcpu or cbcpu or both cpumasks change.
560  *
561  * @pinst: A poineter to padata instance
562  * @nblock: A pointer to notifier block.
563  */
padata_register_cpumask_notifier(struct padata_instance * pinst,struct notifier_block * nblock)564 int padata_register_cpumask_notifier(struct padata_instance *pinst,
565 				     struct notifier_block *nblock)
566 {
567 	return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
568 						nblock);
569 }
570 EXPORT_SYMBOL(padata_register_cpumask_notifier);
571 
572 /**
573  * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
574  *        registered earlier  using padata_register_cpumask_notifier
575  *
576  * @pinst: A pointer to data instance.
577  * @nlock: A pointer to notifier block.
578  */
padata_unregister_cpumask_notifier(struct padata_instance * pinst,struct notifier_block * nblock)579 int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
580 				       struct notifier_block *nblock)
581 {
582 	return blocking_notifier_chain_unregister(
583 		&pinst->cpumask_change_notifier,
584 		nblock);
585 }
586 EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
587 
588 
589 /* If cpumask contains no active cpu, we mark the instance as invalid. */
padata_validate_cpumask(struct padata_instance * pinst,const struct cpumask * cpumask)590 static bool padata_validate_cpumask(struct padata_instance *pinst,
591 				    const struct cpumask *cpumask)
592 {
593 	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
594 		pinst->flags |= PADATA_INVALID;
595 		return false;
596 	}
597 
598 	pinst->flags &= ~PADATA_INVALID;
599 	return true;
600 }
601 
__padata_set_cpumasks(struct padata_instance * pinst,cpumask_var_t pcpumask,cpumask_var_t cbcpumask)602 static int __padata_set_cpumasks(struct padata_instance *pinst,
603 				 cpumask_var_t pcpumask,
604 				 cpumask_var_t cbcpumask)
605 {
606 	int valid;
607 	int err;
608 
609 	valid = padata_validate_cpumask(pinst, pcpumask);
610 	if (!valid) {
611 		__padata_stop(pinst);
612 		goto out_replace;
613 	}
614 
615 	valid = padata_validate_cpumask(pinst, cbcpumask);
616 	if (!valid)
617 		__padata_stop(pinst);
618 
619 out_replace:
620 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
621 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
622 
623 	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
624 
625 	if (valid)
626 		__padata_start(pinst);
627 
628 	return err;
629 }
630 
631 /**
632  * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
633  *                     equivalent to @cpumask.
634  *
635  * @pinst: padata instance
636  * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
637  *                to parallel and serial cpumasks respectively.
638  * @cpumask: the cpumask to use
639  */
padata_set_cpumask(struct padata_instance * pinst,int cpumask_type,cpumask_var_t cpumask)640 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
641 		       cpumask_var_t cpumask)
642 {
643 	struct cpumask *serial_mask, *parallel_mask;
644 	int err = -EINVAL;
645 
646 	get_online_cpus();
647 	mutex_lock(&pinst->lock);
648 
649 	switch (cpumask_type) {
650 	case PADATA_CPU_PARALLEL:
651 		serial_mask = pinst->cpumask.cbcpu;
652 		parallel_mask = cpumask;
653 		break;
654 	case PADATA_CPU_SERIAL:
655 		parallel_mask = pinst->cpumask.pcpu;
656 		serial_mask = cpumask;
657 		break;
658 	default:
659 		 goto out;
660 	}
661 
662 	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
663 
664 out:
665 	mutex_unlock(&pinst->lock);
666 	put_online_cpus();
667 
668 	return err;
669 }
670 EXPORT_SYMBOL(padata_set_cpumask);
671 
672 /**
673  * padata_start - start the parallel processing
674  *
675  * @pinst: padata instance to start
676  */
padata_start(struct padata_instance * pinst)677 int padata_start(struct padata_instance *pinst)
678 {
679 	int err = 0;
680 
681 	mutex_lock(&pinst->lock);
682 
683 	if (pinst->flags & PADATA_INVALID)
684 		err = -EINVAL;
685 
686 	__padata_start(pinst);
687 
688 	mutex_unlock(&pinst->lock);
689 
690 	return err;
691 }
692 EXPORT_SYMBOL(padata_start);
693 
694 /**
695  * padata_stop - stop the parallel processing
696  *
697  * @pinst: padata instance to stop
698  */
padata_stop(struct padata_instance * pinst)699 void padata_stop(struct padata_instance *pinst)
700 {
701 	mutex_lock(&pinst->lock);
702 	__padata_stop(pinst);
703 	mutex_unlock(&pinst->lock);
704 }
705 EXPORT_SYMBOL(padata_stop);
706 
707 #ifdef CONFIG_HOTPLUG_CPU
708 
__padata_add_cpu(struct padata_instance * pinst,int cpu)709 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
710 {
711 	int err = 0;
712 
713 	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
714 		err = padata_replace(pinst);
715 
716 		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
717 		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
718 			__padata_start(pinst);
719 	}
720 
721 	return err;
722 }
723 
__padata_remove_cpu(struct padata_instance * pinst,int cpu)724 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
725 {
726 	int err = 0;
727 
728 	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
729 		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
730 		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
731 			__padata_stop(pinst);
732 
733 		err = padata_replace(pinst);
734 	}
735 
736 	return err;
737 }
738 
739  /**
740  * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
741  *                     padata cpumasks.
742  *
743  * @pinst: padata instance
744  * @cpu: cpu to remove
745  * @mask: bitmask specifying from which cpumask @cpu should be removed
746  *        The @mask may be any combination of the following flags:
747  *          PADATA_CPU_SERIAL   - serial cpumask
748  *          PADATA_CPU_PARALLEL - parallel cpumask
749  */
padata_remove_cpu(struct padata_instance * pinst,int cpu,int mask)750 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
751 {
752 	int err;
753 
754 	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
755 		return -EINVAL;
756 
757 	mutex_lock(&pinst->lock);
758 
759 	get_online_cpus();
760 	if (mask & PADATA_CPU_SERIAL)
761 		cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
762 	if (mask & PADATA_CPU_PARALLEL)
763 		cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
764 
765 	err = __padata_remove_cpu(pinst, cpu);
766 	put_online_cpus();
767 
768 	mutex_unlock(&pinst->lock);
769 
770 	return err;
771 }
772 EXPORT_SYMBOL(padata_remove_cpu);
773 
pinst_has_cpu(struct padata_instance * pinst,int cpu)774 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
775 {
776 	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
777 		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
778 }
779 
padata_cpu_online(unsigned int cpu,struct hlist_node * node)780 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
781 {
782 	struct padata_instance *pinst;
783 	int ret;
784 
785 	pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
786 	if (!pinst_has_cpu(pinst, cpu))
787 		return 0;
788 
789 	mutex_lock(&pinst->lock);
790 	ret = __padata_add_cpu(pinst, cpu);
791 	mutex_unlock(&pinst->lock);
792 	return ret;
793 }
794 
padata_cpu_dead(unsigned int cpu,struct hlist_node * node)795 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
796 {
797 	struct padata_instance *pinst;
798 	int ret;
799 
800 	pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
801 	if (!pinst_has_cpu(pinst, cpu))
802 		return 0;
803 
804 	mutex_lock(&pinst->lock);
805 	ret = __padata_remove_cpu(pinst, cpu);
806 	mutex_unlock(&pinst->lock);
807 	return ret;
808 }
809 
810 static enum cpuhp_state hp_online;
811 #endif
812 
__padata_free(struct padata_instance * pinst)813 static void __padata_free(struct padata_instance *pinst)
814 {
815 #ifdef CONFIG_HOTPLUG_CPU
816 	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
817 					    &pinst->cpu_dead_node);
818 	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
819 #endif
820 
821 	WARN_ON(!list_empty(&pinst->pslist));
822 
823 	padata_stop(pinst);
824 	free_cpumask_var(pinst->omask);
825 	free_cpumask_var(pinst->rcpumask.cbcpu);
826 	free_cpumask_var(pinst->rcpumask.pcpu);
827 	free_cpumask_var(pinst->cpumask.pcpu);
828 	free_cpumask_var(pinst->cpumask.cbcpu);
829 	destroy_workqueue(pinst->serial_wq);
830 	destroy_workqueue(pinst->parallel_wq);
831 	kfree(pinst);
832 }
833 
834 #define kobj2pinst(_kobj)					\
835 	container_of(_kobj, struct padata_instance, kobj)
836 #define attr2pentry(_attr)					\
837 	container_of(_attr, struct padata_sysfs_entry, attr)
838 
padata_sysfs_release(struct kobject * kobj)839 static void padata_sysfs_release(struct kobject *kobj)
840 {
841 	struct padata_instance *pinst = kobj2pinst(kobj);
842 	__padata_free(pinst);
843 }
844 
845 struct padata_sysfs_entry {
846 	struct attribute attr;
847 	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
848 	ssize_t (*store)(struct padata_instance *, struct attribute *,
849 			 const char *, size_t);
850 };
851 
show_cpumask(struct padata_instance * pinst,struct attribute * attr,char * buf)852 static ssize_t show_cpumask(struct padata_instance *pinst,
853 			    struct attribute *attr,  char *buf)
854 {
855 	struct cpumask *cpumask;
856 	ssize_t len;
857 
858 	mutex_lock(&pinst->lock);
859 	if (!strcmp(attr->name, "serial_cpumask"))
860 		cpumask = pinst->cpumask.cbcpu;
861 	else
862 		cpumask = pinst->cpumask.pcpu;
863 
864 	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
865 		       nr_cpu_ids, cpumask_bits(cpumask));
866 	mutex_unlock(&pinst->lock);
867 	return len < PAGE_SIZE ? len : -EINVAL;
868 }
869 
store_cpumask(struct padata_instance * pinst,struct attribute * attr,const char * buf,size_t count)870 static ssize_t store_cpumask(struct padata_instance *pinst,
871 			     struct attribute *attr,
872 			     const char *buf, size_t count)
873 {
874 	cpumask_var_t new_cpumask;
875 	ssize_t ret;
876 	int mask_type;
877 
878 	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
879 		return -ENOMEM;
880 
881 	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
882 			   nr_cpumask_bits);
883 	if (ret < 0)
884 		goto out;
885 
886 	mask_type = !strcmp(attr->name, "serial_cpumask") ?
887 		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
888 	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
889 	if (!ret)
890 		ret = count;
891 
892 out:
893 	free_cpumask_var(new_cpumask);
894 	return ret;
895 }
896 
897 #define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
898 	static struct padata_sysfs_entry _name##_attr =		\
899 		__ATTR(_name, 0644, _show_name, _store_name)
900 #define PADATA_ATTR_RO(_name, _show_name)		\
901 	static struct padata_sysfs_entry _name##_attr = \
902 		__ATTR(_name, 0400, _show_name, NULL)
903 
904 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
905 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
906 
907 /*
908  * Padata sysfs provides the following objects:
909  * serial_cpumask   [RW] - cpumask for serial workers
910  * parallel_cpumask [RW] - cpumask for parallel workers
911  */
912 static struct attribute *padata_default_attrs[] = {
913 	&serial_cpumask_attr.attr,
914 	&parallel_cpumask_attr.attr,
915 	NULL,
916 };
917 ATTRIBUTE_GROUPS(padata_default);
918 
padata_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)919 static ssize_t padata_sysfs_show(struct kobject *kobj,
920 				 struct attribute *attr, char *buf)
921 {
922 	struct padata_instance *pinst;
923 	struct padata_sysfs_entry *pentry;
924 	ssize_t ret = -EIO;
925 
926 	pinst = kobj2pinst(kobj);
927 	pentry = attr2pentry(attr);
928 	if (pentry->show)
929 		ret = pentry->show(pinst, attr, buf);
930 
931 	return ret;
932 }
933 
padata_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)934 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
935 				  const char *buf, size_t count)
936 {
937 	struct padata_instance *pinst;
938 	struct padata_sysfs_entry *pentry;
939 	ssize_t ret = -EIO;
940 
941 	pinst = kobj2pinst(kobj);
942 	pentry = attr2pentry(attr);
943 	if (pentry->show)
944 		ret = pentry->store(pinst, attr, buf, count);
945 
946 	return ret;
947 }
948 
949 static const struct sysfs_ops padata_sysfs_ops = {
950 	.show = padata_sysfs_show,
951 	.store = padata_sysfs_store,
952 };
953 
954 static struct kobj_type padata_attr_type = {
955 	.sysfs_ops = &padata_sysfs_ops,
956 	.default_groups = padata_default_groups,
957 	.release = padata_sysfs_release,
958 };
959 
960 /**
961  * padata_alloc - allocate and initialize a padata instance and specify
962  *                cpumasks for serial and parallel workers.
963  *
964  * @name: used to identify the instance
965  * @pcpumask: cpumask that will be used for padata parallelization
966  * @cbcpumask: cpumask that will be used for padata serialization
967  */
padata_alloc(const char * name,const struct cpumask * pcpumask,const struct cpumask * cbcpumask)968 static struct padata_instance *padata_alloc(const char *name,
969 					    const struct cpumask *pcpumask,
970 					    const struct cpumask *cbcpumask)
971 {
972 	struct padata_instance *pinst;
973 
974 	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
975 	if (!pinst)
976 		goto err;
977 
978 	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
979 					     name);
980 	if (!pinst->parallel_wq)
981 		goto err_free_inst;
982 
983 	get_online_cpus();
984 
985 	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
986 					   WQ_CPU_INTENSIVE, 1, name);
987 	if (!pinst->serial_wq)
988 		goto err_put_cpus;
989 
990 	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
991 		goto err_free_serial_wq;
992 	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
993 		free_cpumask_var(pinst->cpumask.pcpu);
994 		goto err_free_serial_wq;
995 	}
996 	if (!padata_validate_cpumask(pinst, pcpumask) ||
997 	    !padata_validate_cpumask(pinst, cbcpumask))
998 		goto err_free_masks;
999 
1000 	if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
1001 		goto err_free_masks;
1002 	if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
1003 		goto err_free_rcpumask_pcpu;
1004 	if (!alloc_cpumask_var(&pinst->omask, GFP_KERNEL))
1005 		goto err_free_rcpumask_cbcpu;
1006 
1007 	INIT_LIST_HEAD(&pinst->pslist);
1008 
1009 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1010 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1011 	cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
1012 	cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
1013 
1014 	if (padata_setup_cpumasks(pinst))
1015 		goto err_free_omask;
1016 
1017 	pinst->flags = 0;
1018 
1019 	BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1020 	kobject_init(&pinst->kobj, &padata_attr_type);
1021 	mutex_init(&pinst->lock);
1022 
1023 #ifdef CONFIG_HOTPLUG_CPU
1024 	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1025 						    &pinst->cpu_online_node);
1026 	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1027 						    &pinst->cpu_dead_node);
1028 #endif
1029 
1030 	put_online_cpus();
1031 
1032 	return pinst;
1033 
1034 err_free_omask:
1035 	free_cpumask_var(pinst->omask);
1036 err_free_rcpumask_cbcpu:
1037 	free_cpumask_var(pinst->rcpumask.cbcpu);
1038 err_free_rcpumask_pcpu:
1039 	free_cpumask_var(pinst->rcpumask.pcpu);
1040 err_free_masks:
1041 	free_cpumask_var(pinst->cpumask.pcpu);
1042 	free_cpumask_var(pinst->cpumask.cbcpu);
1043 err_free_serial_wq:
1044 	destroy_workqueue(pinst->serial_wq);
1045 err_put_cpus:
1046 	put_online_cpus();
1047 	destroy_workqueue(pinst->parallel_wq);
1048 err_free_inst:
1049 	kfree(pinst);
1050 err:
1051 	return NULL;
1052 }
1053 
1054 /**
1055  * padata_alloc_possible - Allocate and initialize padata instance.
1056  *                         Use the cpu_possible_mask for serial and
1057  *                         parallel workers.
1058  *
1059  * @name: used to identify the instance
1060  */
padata_alloc_possible(const char * name)1061 struct padata_instance *padata_alloc_possible(const char *name)
1062 {
1063 	return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
1064 }
1065 EXPORT_SYMBOL(padata_alloc_possible);
1066 
1067 /**
1068  * padata_free - free a padata instance
1069  *
1070  * @padata_inst: padata instance to free
1071  */
padata_free(struct padata_instance * pinst)1072 void padata_free(struct padata_instance *pinst)
1073 {
1074 	kobject_put(&pinst->kobj);
1075 }
1076 EXPORT_SYMBOL(padata_free);
1077 
1078 /**
1079  * padata_alloc_shell - Allocate and initialize padata shell.
1080  *
1081  * @pinst: Parent padata_instance object.
1082  */
padata_alloc_shell(struct padata_instance * pinst)1083 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1084 {
1085 	struct parallel_data *pd;
1086 	struct padata_shell *ps;
1087 
1088 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1089 	if (!ps)
1090 		goto out;
1091 
1092 	ps->pinst = pinst;
1093 
1094 	get_online_cpus();
1095 	pd = padata_alloc_pd(ps);
1096 	put_online_cpus();
1097 
1098 	if (!pd)
1099 		goto out_free_ps;
1100 
1101 	mutex_lock(&pinst->lock);
1102 	RCU_INIT_POINTER(ps->pd, pd);
1103 	list_add(&ps->list, &pinst->pslist);
1104 	mutex_unlock(&pinst->lock);
1105 
1106 	return ps;
1107 
1108 out_free_ps:
1109 	kfree(ps);
1110 out:
1111 	return NULL;
1112 }
1113 EXPORT_SYMBOL(padata_alloc_shell);
1114 
1115 /**
1116  * padata_free_shell - free a padata shell
1117  *
1118  * @ps: padata shell to free
1119  */
padata_free_shell(struct padata_shell * ps)1120 void padata_free_shell(struct padata_shell *ps)
1121 {
1122 	struct padata_instance *pinst = ps->pinst;
1123 
1124 	mutex_lock(&pinst->lock);
1125 	list_del(&ps->list);
1126 	padata_free_pd(rcu_dereference_protected(ps->pd, 1));
1127 	mutex_unlock(&pinst->lock);
1128 
1129 	kfree(ps);
1130 }
1131 EXPORT_SYMBOL(padata_free_shell);
1132 
1133 #ifdef CONFIG_HOTPLUG_CPU
1134 
padata_driver_init(void)1135 static __init int padata_driver_init(void)
1136 {
1137 	int ret;
1138 
1139 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1140 				      padata_cpu_online, NULL);
1141 	if (ret < 0)
1142 		return ret;
1143 	hp_online = ret;
1144 
1145 	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1146 				      NULL, padata_cpu_dead);
1147 	if (ret < 0) {
1148 		cpuhp_remove_multi_state(hp_online);
1149 		return ret;
1150 	}
1151 	return 0;
1152 }
1153 module_init(padata_driver_init);
1154 
padata_driver_exit(void)1155 static __exit void padata_driver_exit(void)
1156 {
1157 	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1158 	cpuhp_remove_multi_state(hp_online);
1159 }
1160 module_exit(padata_driver_exit);
1161 #endif
1162