1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * padata.c - generic interface to process data streams in parallel
4 *
5 * See Documentation/core-api/padata.rst for more information.
6 *
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include <linux/completion.h>
28 #include <linux/export.h>
29 #include <linux/cpumask.h>
30 #include <linux/err.h>
31 #include <linux/cpu.h>
32 #include <linux/padata.h>
33 #include <linux/mutex.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/sysfs.h>
37 #include <linux/rcupdate.h>
38
39 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
40
41 struct padata_work {
42 struct work_struct pw_work;
43 struct list_head pw_list; /* padata_free_works linkage */
44 void *pw_data;
45 };
46
47 static DEFINE_SPINLOCK(padata_works_lock);
48 static struct padata_work *padata_works;
49 static LIST_HEAD(padata_free_works);
50
51 struct padata_mt_job_state {
52 spinlock_t lock;
53 struct completion completion;
54 struct padata_mt_job *job;
55 int nworks;
56 int nworks_fini;
57 unsigned long chunk_size;
58 };
59
60 static void padata_free_pd(struct parallel_data *pd);
61 static void __init padata_mt_helper(struct work_struct *work);
62
padata_index_to_cpu(struct parallel_data * pd,int cpu_index)63 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
64 {
65 int cpu, target_cpu;
66
67 target_cpu = cpumask_first(pd->cpumask.pcpu);
68 for (cpu = 0; cpu < cpu_index; cpu++)
69 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
70
71 return target_cpu;
72 }
73
padata_cpu_hash(struct parallel_data * pd,unsigned int seq_nr)74 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
75 {
76 /*
77 * Hash the sequence numbers to the cpus by taking
78 * seq_nr mod. number of cpus in use.
79 */
80 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
81
82 return padata_index_to_cpu(pd, cpu_index);
83 }
84
padata_work_alloc(void)85 static struct padata_work *padata_work_alloc(void)
86 {
87 struct padata_work *pw;
88
89 lockdep_assert_held(&padata_works_lock);
90
91 if (list_empty(&padata_free_works))
92 return NULL; /* No more work items allowed to be queued. */
93
94 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
95 list_del(&pw->pw_list);
96 return pw;
97 }
98
padata_work_init(struct padata_work * pw,work_func_t work_fn,void * data,int flags)99 static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
100 void *data, int flags)
101 {
102 if (flags & PADATA_WORK_ONSTACK)
103 INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
104 else
105 INIT_WORK(&pw->pw_work, work_fn);
106 pw->pw_data = data;
107 }
108
padata_work_alloc_mt(int nworks,void * data,struct list_head * head)109 static int __init padata_work_alloc_mt(int nworks, void *data,
110 struct list_head *head)
111 {
112 int i;
113
114 spin_lock(&padata_works_lock);
115 /* Start at 1 because the current task participates in the job. */
116 for (i = 1; i < nworks; ++i) {
117 struct padata_work *pw = padata_work_alloc();
118
119 if (!pw)
120 break;
121 padata_work_init(pw, padata_mt_helper, data, 0);
122 list_add(&pw->pw_list, head);
123 }
124 spin_unlock(&padata_works_lock);
125
126 return i;
127 }
128
padata_work_free(struct padata_work * pw)129 static void padata_work_free(struct padata_work *pw)
130 {
131 lockdep_assert_held(&padata_works_lock);
132 list_add(&pw->pw_list, &padata_free_works);
133 }
134
padata_works_free(struct list_head * works)135 static void __init padata_works_free(struct list_head *works)
136 {
137 struct padata_work *cur, *next;
138
139 if (list_empty(works))
140 return;
141
142 spin_lock(&padata_works_lock);
143 list_for_each_entry_safe(cur, next, works, pw_list) {
144 list_del(&cur->pw_list);
145 padata_work_free(cur);
146 }
147 spin_unlock(&padata_works_lock);
148 }
149
padata_parallel_worker(struct work_struct * parallel_work)150 static void padata_parallel_worker(struct work_struct *parallel_work)
151 {
152 struct padata_work *pw = container_of(parallel_work, struct padata_work,
153 pw_work);
154 struct padata_priv *padata = pw->pw_data;
155
156 local_bh_disable();
157 padata->parallel(padata);
158 spin_lock(&padata_works_lock);
159 padata_work_free(pw);
160 spin_unlock(&padata_works_lock);
161 local_bh_enable();
162 }
163
164 /**
165 * padata_do_parallel - padata parallelization function
166 *
167 * @ps: padatashell
168 * @padata: object to be parallelized
169 * @cb_cpu: pointer to the CPU that the serialization callback function should
170 * run on. If it's not in the serial cpumask of @pinst
171 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
172 * none found, returns -EINVAL.
173 *
174 * The parallelization callback function will run with BHs off.
175 * Note: Every object which is parallelized by padata_do_parallel
176 * must be seen by padata_do_serial.
177 *
178 * Return: 0 on success or else negative error code.
179 */
padata_do_parallel(struct padata_shell * ps,struct padata_priv * padata,int * cb_cpu)180 int padata_do_parallel(struct padata_shell *ps,
181 struct padata_priv *padata, int *cb_cpu)
182 {
183 struct padata_instance *pinst = ps->pinst;
184 int i, cpu, cpu_index, err;
185 struct parallel_data *pd;
186 struct padata_work *pw;
187
188 rcu_read_lock_bh();
189
190 pd = rcu_dereference_bh(ps->pd);
191
192 err = -EINVAL;
193 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
194 goto out;
195
196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
197 if (!cpumask_weight(pd->cpumask.cbcpu))
198 goto out;
199
200 /* Select an alternate fallback CPU and notify the caller. */
201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
202
203 cpu = cpumask_first(pd->cpumask.cbcpu);
204 for (i = 0; i < cpu_index; i++)
205 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
206
207 *cb_cpu = cpu;
208 }
209
210 err = -EBUSY;
211 if ((pinst->flags & PADATA_RESET))
212 goto out;
213
214 refcount_inc(&pd->refcnt);
215 padata->pd = pd;
216 padata->cb_cpu = *cb_cpu;
217
218 spin_lock(&padata_works_lock);
219 padata->seq_nr = ++pd->seq_nr;
220 pw = padata_work_alloc();
221 spin_unlock(&padata_works_lock);
222
223 if (!pw) {
224 /* Maximum works limit exceeded, run in the current task. */
225 padata->parallel(padata);
226 }
227
228 rcu_read_unlock_bh();
229
230 if (pw) {
231 padata_work_init(pw, padata_parallel_worker, padata, 0);
232 queue_work(pinst->parallel_wq, &pw->pw_work);
233 }
234
235 return 0;
236 out:
237 rcu_read_unlock_bh();
238
239 return err;
240 }
241 EXPORT_SYMBOL(padata_do_parallel);
242
243 /*
244 * padata_find_next - Find the next object that needs serialization.
245 *
246 * Return:
247 * * A pointer to the control struct of the next object that needs
248 * serialization, if present in one of the percpu reorder queues.
249 * * NULL, if the next object that needs serialization will
250 * be parallel processed by another cpu and is not yet present in
251 * the cpu's reorder queue.
252 */
padata_find_next(struct parallel_data * pd,bool remove_object)253 static struct padata_priv *padata_find_next(struct parallel_data *pd,
254 bool remove_object)
255 {
256 struct padata_priv *padata;
257 struct padata_list *reorder;
258 int cpu = pd->cpu;
259
260 reorder = per_cpu_ptr(pd->reorder_list, cpu);
261
262 spin_lock(&reorder->lock);
263 if (list_empty(&reorder->list)) {
264 spin_unlock(&reorder->lock);
265 return NULL;
266 }
267
268 padata = list_entry(reorder->list.next, struct padata_priv, list);
269
270 /*
271 * Checks the rare case where two or more parallel jobs have hashed to
272 * the same CPU and one of the later ones finishes first.
273 */
274 if (padata->seq_nr != pd->processed) {
275 spin_unlock(&reorder->lock);
276 return NULL;
277 }
278
279 if (remove_object) {
280 list_del_init(&padata->list);
281 ++pd->processed;
282 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
283 }
284
285 spin_unlock(&reorder->lock);
286 return padata;
287 }
288
padata_reorder(struct parallel_data * pd)289 static void padata_reorder(struct parallel_data *pd)
290 {
291 struct padata_instance *pinst = pd->ps->pinst;
292 int cb_cpu;
293 struct padata_priv *padata;
294 struct padata_serial_queue *squeue;
295 struct padata_list *reorder;
296
297 /*
298 * We need to ensure that only one cpu can work on dequeueing of
299 * the reorder queue the time. Calculating in which percpu reorder
300 * queue the next object will arrive takes some time. A spinlock
301 * would be highly contended. Also it is not clear in which order
302 * the objects arrive to the reorder queues. So a cpu could wait to
303 * get the lock just to notice that there is nothing to do at the
304 * moment. Therefore we use a trylock and let the holder of the lock
305 * care for all the objects enqueued during the holdtime of the lock.
306 */
307 if (!spin_trylock_bh(&pd->lock))
308 return;
309
310 while (1) {
311 padata = padata_find_next(pd, true);
312
313 /*
314 * If the next object that needs serialization is parallel
315 * processed by another cpu and is still on it's way to the
316 * cpu's reorder queue, nothing to do for now.
317 */
318 if (!padata)
319 break;
320
321 cb_cpu = padata->cb_cpu;
322 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
323
324 spin_lock(&squeue->serial.lock);
325 list_add_tail(&padata->list, &squeue->serial.list);
326 spin_unlock(&squeue->serial.lock);
327
328 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
329 }
330
331 spin_unlock_bh(&pd->lock);
332
333 /*
334 * The next object that needs serialization might have arrived to
335 * the reorder queues in the meantime.
336 *
337 * Ensure reorder queue is read after pd->lock is dropped so we see
338 * new objects from another task in padata_do_serial. Pairs with
339 * smp_mb in padata_do_serial.
340 */
341 smp_mb();
342
343 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
344 if (!list_empty(&reorder->list) && padata_find_next(pd, false))
345 queue_work(pinst->serial_wq, &pd->reorder_work);
346 }
347
invoke_padata_reorder(struct work_struct * work)348 static void invoke_padata_reorder(struct work_struct *work)
349 {
350 struct parallel_data *pd;
351
352 local_bh_disable();
353 pd = container_of(work, struct parallel_data, reorder_work);
354 padata_reorder(pd);
355 local_bh_enable();
356 }
357
padata_serial_worker(struct work_struct * serial_work)358 static void padata_serial_worker(struct work_struct *serial_work)
359 {
360 struct padata_serial_queue *squeue;
361 struct parallel_data *pd;
362 LIST_HEAD(local_list);
363 int cnt;
364
365 local_bh_disable();
366 squeue = container_of(serial_work, struct padata_serial_queue, work);
367 pd = squeue->pd;
368
369 spin_lock(&squeue->serial.lock);
370 list_replace_init(&squeue->serial.list, &local_list);
371 spin_unlock(&squeue->serial.lock);
372
373 cnt = 0;
374
375 while (!list_empty(&local_list)) {
376 struct padata_priv *padata;
377
378 padata = list_entry(local_list.next,
379 struct padata_priv, list);
380
381 list_del_init(&padata->list);
382
383 padata->serial(padata);
384 cnt++;
385 }
386 local_bh_enable();
387
388 if (refcount_sub_and_test(cnt, &pd->refcnt))
389 padata_free_pd(pd);
390 }
391
392 /**
393 * padata_do_serial - padata serialization function
394 *
395 * @padata: object to be serialized.
396 *
397 * padata_do_serial must be called for every parallelized object.
398 * The serialization callback function will run with BHs off.
399 */
padata_do_serial(struct padata_priv * padata)400 void padata_do_serial(struct padata_priv *padata)
401 {
402 struct parallel_data *pd = padata->pd;
403 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
404 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
405 struct padata_priv *cur;
406 struct list_head *pos;
407
408 spin_lock(&reorder->lock);
409 /* Sort in ascending order of sequence number. */
410 list_for_each_prev(pos, &reorder->list) {
411 cur = list_entry(pos, struct padata_priv, list);
412 if (cur->seq_nr < padata->seq_nr)
413 break;
414 }
415 list_add(&padata->list, pos);
416 spin_unlock(&reorder->lock);
417
418 /*
419 * Ensure the addition to the reorder list is ordered correctly
420 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
421 * in padata_reorder.
422 */
423 smp_mb();
424
425 padata_reorder(pd);
426 }
427 EXPORT_SYMBOL(padata_do_serial);
428
padata_setup_cpumasks(struct padata_instance * pinst)429 static int padata_setup_cpumasks(struct padata_instance *pinst)
430 {
431 struct workqueue_attrs *attrs;
432 int err;
433
434 attrs = alloc_workqueue_attrs();
435 if (!attrs)
436 return -ENOMEM;
437
438 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
439 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
440 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
441 free_workqueue_attrs(attrs);
442
443 return err;
444 }
445
padata_mt_helper(struct work_struct * w)446 static void __init padata_mt_helper(struct work_struct *w)
447 {
448 struct padata_work *pw = container_of(w, struct padata_work, pw_work);
449 struct padata_mt_job_state *ps = pw->pw_data;
450 struct padata_mt_job *job = ps->job;
451 bool done;
452
453 spin_lock(&ps->lock);
454
455 while (job->size > 0) {
456 unsigned long start, size, end;
457
458 start = job->start;
459 /* So end is chunk size aligned if enough work remains. */
460 size = roundup(start + 1, ps->chunk_size) - start;
461 size = min(size, job->size);
462 end = start + size;
463
464 job->start = end;
465 job->size -= size;
466
467 spin_unlock(&ps->lock);
468 job->thread_fn(start, end, job->fn_arg);
469 spin_lock(&ps->lock);
470 }
471
472 ++ps->nworks_fini;
473 done = (ps->nworks_fini == ps->nworks);
474 spin_unlock(&ps->lock);
475
476 if (done)
477 complete(&ps->completion);
478 }
479
480 /**
481 * padata_do_multithreaded - run a multithreaded job
482 * @job: Description of the job.
483 *
484 * See the definition of struct padata_mt_job for more details.
485 */
padata_do_multithreaded(struct padata_mt_job * job)486 void __init padata_do_multithreaded(struct padata_mt_job *job)
487 {
488 /* In case threads finish at different times. */
489 static const unsigned long load_balance_factor = 4;
490 struct padata_work my_work, *pw;
491 struct padata_mt_job_state ps;
492 LIST_HEAD(works);
493 int nworks;
494
495 if (job->size == 0)
496 return;
497
498 /* Ensure at least one thread when size < min_chunk. */
499 nworks = max(job->size / job->min_chunk, 1ul);
500 nworks = min(nworks, job->max_threads);
501
502 if (nworks == 1) {
503 /* Single thread, no coordination needed, cut to the chase. */
504 job->thread_fn(job->start, job->start + job->size, job->fn_arg);
505 return;
506 }
507
508 spin_lock_init(&ps.lock);
509 init_completion(&ps.completion);
510 ps.job = job;
511 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
512 ps.nworks_fini = 0;
513
514 /*
515 * Chunk size is the amount of work a helper does per call to the
516 * thread function. Load balance large jobs between threads by
517 * increasing the number of chunks, guarantee at least the minimum
518 * chunk size from the caller, and honor the caller's alignment.
519 */
520 ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
521 ps.chunk_size = max(ps.chunk_size, job->min_chunk);
522 ps.chunk_size = roundup(ps.chunk_size, job->align);
523
524 list_for_each_entry(pw, &works, pw_list)
525 queue_work(system_unbound_wq, &pw->pw_work);
526
527 /* Use the current thread, which saves starting a workqueue worker. */
528 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
529 padata_mt_helper(&my_work.pw_work);
530
531 /* Wait for all the helpers to finish. */
532 wait_for_completion(&ps.completion);
533
534 destroy_work_on_stack(&my_work.pw_work);
535 padata_works_free(&works);
536 }
537
__padata_list_init(struct padata_list * pd_list)538 static void __padata_list_init(struct padata_list *pd_list)
539 {
540 INIT_LIST_HEAD(&pd_list->list);
541 spin_lock_init(&pd_list->lock);
542 }
543
544 /* Initialize all percpu queues used by serial workers */
padata_init_squeues(struct parallel_data * pd)545 static void padata_init_squeues(struct parallel_data *pd)
546 {
547 int cpu;
548 struct padata_serial_queue *squeue;
549
550 for_each_cpu(cpu, pd->cpumask.cbcpu) {
551 squeue = per_cpu_ptr(pd->squeue, cpu);
552 squeue->pd = pd;
553 __padata_list_init(&squeue->serial);
554 INIT_WORK(&squeue->work, padata_serial_worker);
555 }
556 }
557
558 /* Initialize per-CPU reorder lists */
padata_init_reorder_list(struct parallel_data * pd)559 static void padata_init_reorder_list(struct parallel_data *pd)
560 {
561 int cpu;
562 struct padata_list *list;
563
564 for_each_cpu(cpu, pd->cpumask.pcpu) {
565 list = per_cpu_ptr(pd->reorder_list, cpu);
566 __padata_list_init(list);
567 }
568 }
569
570 /* Allocate and initialize the internal cpumask dependend resources. */
padata_alloc_pd(struct padata_shell * ps)571 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
572 {
573 struct padata_instance *pinst = ps->pinst;
574 struct parallel_data *pd;
575
576 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
577 if (!pd)
578 goto err;
579
580 pd->reorder_list = alloc_percpu(struct padata_list);
581 if (!pd->reorder_list)
582 goto err_free_pd;
583
584 pd->squeue = alloc_percpu(struct padata_serial_queue);
585 if (!pd->squeue)
586 goto err_free_reorder_list;
587
588 pd->ps = ps;
589
590 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
591 goto err_free_squeue;
592 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
593 goto err_free_pcpu;
594
595 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
596 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
597
598 padata_init_reorder_list(pd);
599 padata_init_squeues(pd);
600 pd->seq_nr = -1;
601 refcount_set(&pd->refcnt, 1);
602 spin_lock_init(&pd->lock);
603 pd->cpu = cpumask_first(pd->cpumask.pcpu);
604 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
605
606 return pd;
607
608 err_free_pcpu:
609 free_cpumask_var(pd->cpumask.pcpu);
610 err_free_squeue:
611 free_percpu(pd->squeue);
612 err_free_reorder_list:
613 free_percpu(pd->reorder_list);
614 err_free_pd:
615 kfree(pd);
616 err:
617 return NULL;
618 }
619
padata_free_pd(struct parallel_data * pd)620 static void padata_free_pd(struct parallel_data *pd)
621 {
622 free_cpumask_var(pd->cpumask.pcpu);
623 free_cpumask_var(pd->cpumask.cbcpu);
624 free_percpu(pd->reorder_list);
625 free_percpu(pd->squeue);
626 kfree(pd);
627 }
628
__padata_start(struct padata_instance * pinst)629 static void __padata_start(struct padata_instance *pinst)
630 {
631 pinst->flags |= PADATA_INIT;
632 }
633
__padata_stop(struct padata_instance * pinst)634 static void __padata_stop(struct padata_instance *pinst)
635 {
636 if (!(pinst->flags & PADATA_INIT))
637 return;
638
639 pinst->flags &= ~PADATA_INIT;
640
641 synchronize_rcu();
642 }
643
644 /* Replace the internal control structure with a new one. */
padata_replace_one(struct padata_shell * ps)645 static int padata_replace_one(struct padata_shell *ps)
646 {
647 struct parallel_data *pd_new;
648
649 pd_new = padata_alloc_pd(ps);
650 if (!pd_new)
651 return -ENOMEM;
652
653 ps->opd = rcu_dereference_protected(ps->pd, 1);
654 rcu_assign_pointer(ps->pd, pd_new);
655
656 return 0;
657 }
658
padata_replace(struct padata_instance * pinst)659 static int padata_replace(struct padata_instance *pinst)
660 {
661 struct padata_shell *ps;
662 int err = 0;
663
664 pinst->flags |= PADATA_RESET;
665
666 list_for_each_entry(ps, &pinst->pslist, list) {
667 err = padata_replace_one(ps);
668 if (err)
669 break;
670 }
671
672 synchronize_rcu();
673
674 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
675 if (refcount_dec_and_test(&ps->opd->refcnt))
676 padata_free_pd(ps->opd);
677
678 pinst->flags &= ~PADATA_RESET;
679
680 return err;
681 }
682
683 /* If cpumask contains no active cpu, we mark the instance as invalid. */
padata_validate_cpumask(struct padata_instance * pinst,const struct cpumask * cpumask)684 static bool padata_validate_cpumask(struct padata_instance *pinst,
685 const struct cpumask *cpumask)
686 {
687 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
688 pinst->flags |= PADATA_INVALID;
689 return false;
690 }
691
692 pinst->flags &= ~PADATA_INVALID;
693 return true;
694 }
695
__padata_set_cpumasks(struct padata_instance * pinst,cpumask_var_t pcpumask,cpumask_var_t cbcpumask)696 static int __padata_set_cpumasks(struct padata_instance *pinst,
697 cpumask_var_t pcpumask,
698 cpumask_var_t cbcpumask)
699 {
700 int valid;
701 int err;
702
703 valid = padata_validate_cpumask(pinst, pcpumask);
704 if (!valid) {
705 __padata_stop(pinst);
706 goto out_replace;
707 }
708
709 valid = padata_validate_cpumask(pinst, cbcpumask);
710 if (!valid)
711 __padata_stop(pinst);
712
713 out_replace:
714 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
715 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
716
717 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
718
719 if (valid)
720 __padata_start(pinst);
721
722 return err;
723 }
724
725 /**
726 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
727 * equivalent to @cpumask.
728 * @pinst: padata instance
729 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
730 * to parallel and serial cpumasks respectively.
731 * @cpumask: the cpumask to use
732 *
733 * Return: 0 on success or negative error code
734 */
padata_set_cpumask(struct padata_instance * pinst,int cpumask_type,cpumask_var_t cpumask)735 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
736 cpumask_var_t cpumask)
737 {
738 struct cpumask *serial_mask, *parallel_mask;
739 int err = -EINVAL;
740
741 get_online_cpus();
742 mutex_lock(&pinst->lock);
743
744 switch (cpumask_type) {
745 case PADATA_CPU_PARALLEL:
746 serial_mask = pinst->cpumask.cbcpu;
747 parallel_mask = cpumask;
748 break;
749 case PADATA_CPU_SERIAL:
750 parallel_mask = pinst->cpumask.pcpu;
751 serial_mask = cpumask;
752 break;
753 default:
754 goto out;
755 }
756
757 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
758
759 out:
760 mutex_unlock(&pinst->lock);
761 put_online_cpus();
762
763 return err;
764 }
765 EXPORT_SYMBOL(padata_set_cpumask);
766
767 #ifdef CONFIG_HOTPLUG_CPU
768
__padata_add_cpu(struct padata_instance * pinst,int cpu)769 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
770 {
771 int err = 0;
772
773 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
774 err = padata_replace(pinst);
775
776 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
777 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
778 __padata_start(pinst);
779 }
780
781 return err;
782 }
783
__padata_remove_cpu(struct padata_instance * pinst,int cpu)784 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
785 {
786 int err = 0;
787
788 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
789 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
790 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
791 __padata_stop(pinst);
792
793 err = padata_replace(pinst);
794 }
795
796 return err;
797 }
798
pinst_has_cpu(struct padata_instance * pinst,int cpu)799 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
800 {
801 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
802 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
803 }
804
padata_cpu_online(unsigned int cpu,struct hlist_node * node)805 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
806 {
807 struct padata_instance *pinst;
808 int ret;
809
810 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
811 if (!pinst_has_cpu(pinst, cpu))
812 return 0;
813
814 mutex_lock(&pinst->lock);
815 ret = __padata_add_cpu(pinst, cpu);
816 mutex_unlock(&pinst->lock);
817 return ret;
818 }
819
padata_cpu_dead(unsigned int cpu,struct hlist_node * node)820 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
821 {
822 struct padata_instance *pinst;
823 int ret;
824
825 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
826 if (!pinst_has_cpu(pinst, cpu))
827 return 0;
828
829 mutex_lock(&pinst->lock);
830 ret = __padata_remove_cpu(pinst, cpu);
831 mutex_unlock(&pinst->lock);
832 return ret;
833 }
834
835 static enum cpuhp_state hp_online;
836 #endif
837
__padata_free(struct padata_instance * pinst)838 static void __padata_free(struct padata_instance *pinst)
839 {
840 #ifdef CONFIG_HOTPLUG_CPU
841 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
842 &pinst->cpu_dead_node);
843 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
844 #endif
845
846 WARN_ON(!list_empty(&pinst->pslist));
847
848 free_cpumask_var(pinst->cpumask.pcpu);
849 free_cpumask_var(pinst->cpumask.cbcpu);
850 destroy_workqueue(pinst->serial_wq);
851 destroy_workqueue(pinst->parallel_wq);
852 kfree(pinst);
853 }
854
855 #define kobj2pinst(_kobj) \
856 container_of(_kobj, struct padata_instance, kobj)
857 #define attr2pentry(_attr) \
858 container_of(_attr, struct padata_sysfs_entry, attr)
859
padata_sysfs_release(struct kobject * kobj)860 static void padata_sysfs_release(struct kobject *kobj)
861 {
862 struct padata_instance *pinst = kobj2pinst(kobj);
863 __padata_free(pinst);
864 }
865
866 struct padata_sysfs_entry {
867 struct attribute attr;
868 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
869 ssize_t (*store)(struct padata_instance *, struct attribute *,
870 const char *, size_t);
871 };
872
show_cpumask(struct padata_instance * pinst,struct attribute * attr,char * buf)873 static ssize_t show_cpumask(struct padata_instance *pinst,
874 struct attribute *attr, char *buf)
875 {
876 struct cpumask *cpumask;
877 ssize_t len;
878
879 mutex_lock(&pinst->lock);
880 if (!strcmp(attr->name, "serial_cpumask"))
881 cpumask = pinst->cpumask.cbcpu;
882 else
883 cpumask = pinst->cpumask.pcpu;
884
885 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
886 nr_cpu_ids, cpumask_bits(cpumask));
887 mutex_unlock(&pinst->lock);
888 return len < PAGE_SIZE ? len : -EINVAL;
889 }
890
store_cpumask(struct padata_instance * pinst,struct attribute * attr,const char * buf,size_t count)891 static ssize_t store_cpumask(struct padata_instance *pinst,
892 struct attribute *attr,
893 const char *buf, size_t count)
894 {
895 cpumask_var_t new_cpumask;
896 ssize_t ret;
897 int mask_type;
898
899 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
900 return -ENOMEM;
901
902 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
903 nr_cpumask_bits);
904 if (ret < 0)
905 goto out;
906
907 mask_type = !strcmp(attr->name, "serial_cpumask") ?
908 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
909 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
910 if (!ret)
911 ret = count;
912
913 out:
914 free_cpumask_var(new_cpumask);
915 return ret;
916 }
917
918 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
919 static struct padata_sysfs_entry _name##_attr = \
920 __ATTR(_name, 0644, _show_name, _store_name)
921 #define PADATA_ATTR_RO(_name, _show_name) \
922 static struct padata_sysfs_entry _name##_attr = \
923 __ATTR(_name, 0400, _show_name, NULL)
924
925 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
926 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
927
928 /*
929 * Padata sysfs provides the following objects:
930 * serial_cpumask [RW] - cpumask for serial workers
931 * parallel_cpumask [RW] - cpumask for parallel workers
932 */
933 static struct attribute *padata_default_attrs[] = {
934 &serial_cpumask_attr.attr,
935 ¶llel_cpumask_attr.attr,
936 NULL,
937 };
938 ATTRIBUTE_GROUPS(padata_default);
939
padata_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)940 static ssize_t padata_sysfs_show(struct kobject *kobj,
941 struct attribute *attr, char *buf)
942 {
943 struct padata_instance *pinst;
944 struct padata_sysfs_entry *pentry;
945 ssize_t ret = -EIO;
946
947 pinst = kobj2pinst(kobj);
948 pentry = attr2pentry(attr);
949 if (pentry->show)
950 ret = pentry->show(pinst, attr, buf);
951
952 return ret;
953 }
954
padata_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)955 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
956 const char *buf, size_t count)
957 {
958 struct padata_instance *pinst;
959 struct padata_sysfs_entry *pentry;
960 ssize_t ret = -EIO;
961
962 pinst = kobj2pinst(kobj);
963 pentry = attr2pentry(attr);
964 if (pentry->show)
965 ret = pentry->store(pinst, attr, buf, count);
966
967 return ret;
968 }
969
970 static const struct sysfs_ops padata_sysfs_ops = {
971 .show = padata_sysfs_show,
972 .store = padata_sysfs_store,
973 };
974
975 static struct kobj_type padata_attr_type = {
976 .sysfs_ops = &padata_sysfs_ops,
977 .default_groups = padata_default_groups,
978 .release = padata_sysfs_release,
979 };
980
981 /**
982 * padata_alloc - allocate and initialize a padata instance
983 * @name: used to identify the instance
984 *
985 * Return: new instance on success, NULL on error
986 */
padata_alloc(const char * name)987 struct padata_instance *padata_alloc(const char *name)
988 {
989 struct padata_instance *pinst;
990
991 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
992 if (!pinst)
993 goto err;
994
995 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
996 name);
997 if (!pinst->parallel_wq)
998 goto err_free_inst;
999
1000 get_online_cpus();
1001
1002 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1003 WQ_CPU_INTENSIVE, 1, name);
1004 if (!pinst->serial_wq)
1005 goto err_put_cpus;
1006
1007 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1008 goto err_free_serial_wq;
1009 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1010 free_cpumask_var(pinst->cpumask.pcpu);
1011 goto err_free_serial_wq;
1012 }
1013
1014 INIT_LIST_HEAD(&pinst->pslist);
1015
1016 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1017 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1018
1019 if (padata_setup_cpumasks(pinst))
1020 goto err_free_masks;
1021
1022 __padata_start(pinst);
1023
1024 kobject_init(&pinst->kobj, &padata_attr_type);
1025 mutex_init(&pinst->lock);
1026
1027 #ifdef CONFIG_HOTPLUG_CPU
1028 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1029 &pinst->cpu_online_node);
1030 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1031 &pinst->cpu_dead_node);
1032 #endif
1033
1034 put_online_cpus();
1035
1036 return pinst;
1037
1038 err_free_masks:
1039 free_cpumask_var(pinst->cpumask.pcpu);
1040 free_cpumask_var(pinst->cpumask.cbcpu);
1041 err_free_serial_wq:
1042 destroy_workqueue(pinst->serial_wq);
1043 err_put_cpus:
1044 put_online_cpus();
1045 destroy_workqueue(pinst->parallel_wq);
1046 err_free_inst:
1047 kfree(pinst);
1048 err:
1049 return NULL;
1050 }
1051 EXPORT_SYMBOL(padata_alloc);
1052
1053 /**
1054 * padata_free - free a padata instance
1055 *
1056 * @pinst: padata instance to free
1057 */
padata_free(struct padata_instance * pinst)1058 void padata_free(struct padata_instance *pinst)
1059 {
1060 kobject_put(&pinst->kobj);
1061 }
1062 EXPORT_SYMBOL(padata_free);
1063
1064 /**
1065 * padata_alloc_shell - Allocate and initialize padata shell.
1066 *
1067 * @pinst: Parent padata_instance object.
1068 *
1069 * Return: new shell on success, NULL on error
1070 */
padata_alloc_shell(struct padata_instance * pinst)1071 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1072 {
1073 struct parallel_data *pd;
1074 struct padata_shell *ps;
1075
1076 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1077 if (!ps)
1078 goto out;
1079
1080 ps->pinst = pinst;
1081
1082 get_online_cpus();
1083 pd = padata_alloc_pd(ps);
1084 put_online_cpus();
1085
1086 if (!pd)
1087 goto out_free_ps;
1088
1089 mutex_lock(&pinst->lock);
1090 RCU_INIT_POINTER(ps->pd, pd);
1091 list_add(&ps->list, &pinst->pslist);
1092 mutex_unlock(&pinst->lock);
1093
1094 return ps;
1095
1096 out_free_ps:
1097 kfree(ps);
1098 out:
1099 return NULL;
1100 }
1101 EXPORT_SYMBOL(padata_alloc_shell);
1102
1103 /**
1104 * padata_free_shell - free a padata shell
1105 *
1106 * @ps: padata shell to free
1107 */
padata_free_shell(struct padata_shell * ps)1108 void padata_free_shell(struct padata_shell *ps)
1109 {
1110 struct parallel_data *pd;
1111
1112 if (!ps)
1113 return;
1114
1115 mutex_lock(&ps->pinst->lock);
1116 list_del(&ps->list);
1117 pd = rcu_dereference_protected(ps->pd, 1);
1118 if (refcount_dec_and_test(&pd->refcnt))
1119 padata_free_pd(pd);
1120 mutex_unlock(&ps->pinst->lock);
1121
1122 kfree(ps);
1123 }
1124 EXPORT_SYMBOL(padata_free_shell);
1125
padata_init(void)1126 void __init padata_init(void)
1127 {
1128 unsigned int i, possible_cpus;
1129 #ifdef CONFIG_HOTPLUG_CPU
1130 int ret;
1131
1132 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1133 padata_cpu_online, NULL);
1134 if (ret < 0)
1135 goto err;
1136 hp_online = ret;
1137
1138 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1139 NULL, padata_cpu_dead);
1140 if (ret < 0)
1141 goto remove_online_state;
1142 #endif
1143
1144 possible_cpus = num_possible_cpus();
1145 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1146 GFP_KERNEL);
1147 if (!padata_works)
1148 goto remove_dead_state;
1149
1150 for (i = 0; i < possible_cpus; ++i)
1151 list_add(&padata_works[i].pw_list, &padata_free_works);
1152
1153 return;
1154
1155 remove_dead_state:
1156 #ifdef CONFIG_HOTPLUG_CPU
1157 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1158 remove_online_state:
1159 cpuhp_remove_multi_state(hp_online);
1160 err:
1161 #endif
1162 pr_warn("padata: initialization failed\n");
1163 }
1164