• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/smt.h>
12 #include <linux/unistd.h>
13 #include <linux/cpu.h>
14 #include <linux/oom.h>
15 #include <linux/rcupdate.h>
16 #include <linux/export.h>
17 #include <linux/bug.h>
18 #include <linux/kthread.h>
19 #include <linux/stop_machine.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <linux/suspend.h>
23 #include <linux/lockdep.h>
24 #include <linux/tick.h>
25 #include <linux/irq.h>
26 #include <trace/events/power.h>
27 
28 #include <trace/events/sched.h>
29 
30 #include "smpboot.h"
31 
32 #ifdef CONFIG_SMP
33 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
34 static DEFINE_MUTEX(cpu_add_remove_lock);
35 
36 /*
37  * The following two APIs (cpu_maps_update_begin/done) must be used when
38  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
39  * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
40  * hotplug callback (un)registration performed using __register_cpu_notifier()
41  * or __unregister_cpu_notifier().
42  */
cpu_maps_update_begin(void)43 void cpu_maps_update_begin(void)
44 {
45 	mutex_lock(&cpu_add_remove_lock);
46 }
47 EXPORT_SYMBOL(cpu_notifier_register_begin);
48 
cpu_maps_update_done(void)49 void cpu_maps_update_done(void)
50 {
51 	mutex_unlock(&cpu_add_remove_lock);
52 }
53 EXPORT_SYMBOL(cpu_notifier_register_done);
54 
55 static RAW_NOTIFIER_HEAD(cpu_chain);
56 
57 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58  * Should always be manipulated under cpu_add_remove_lock
59  */
60 static int cpu_hotplug_disabled;
61 
62 #ifdef CONFIG_HOTPLUG_CPU
63 
64 static struct {
65 	struct task_struct *active_writer;
66 	/* wait queue to wake up the active_writer */
67 	wait_queue_head_t wq;
68 	/* verifies that no writer will get active while readers are active */
69 	struct mutex lock;
70 	/*
71 	 * Also blocks the new readers during
72 	 * an ongoing cpu hotplug operation.
73 	 */
74 	atomic_t refcount;
75 
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 	struct lockdep_map dep_map;
78 #endif
79 } cpu_hotplug = {
80 	.active_writer = NULL,
81 	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
82 	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 	.dep_map = {.name = "cpu_hotplug.lock" },
85 #endif
86 };
87 
88 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
89 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
90 #define cpuhp_lock_acquire_tryread() \
91 				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
92 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
93 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
94 
95 
get_online_cpus(void)96 void get_online_cpus(void)
97 {
98 	might_sleep();
99 	if (cpu_hotplug.active_writer == current)
100 		return;
101 	cpuhp_lock_acquire_read();
102 	mutex_lock(&cpu_hotplug.lock);
103 	atomic_inc(&cpu_hotplug.refcount);
104 	mutex_unlock(&cpu_hotplug.lock);
105 }
106 EXPORT_SYMBOL_GPL(get_online_cpus);
107 
put_online_cpus(void)108 void put_online_cpus(void)
109 {
110 	int refcount;
111 
112 	if (cpu_hotplug.active_writer == current)
113 		return;
114 
115 	refcount = atomic_dec_return(&cpu_hotplug.refcount);
116 	if (WARN_ON(refcount < 0)) /* try to fix things up */
117 		atomic_inc(&cpu_hotplug.refcount);
118 
119 	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
120 		wake_up(&cpu_hotplug.wq);
121 
122 	cpuhp_lock_release();
123 
124 }
125 EXPORT_SYMBOL_GPL(put_online_cpus);
126 
127 /*
128  * This ensures that the hotplug operation can begin only when the
129  * refcount goes to zero.
130  *
131  * Note that during a cpu-hotplug operation, the new readers, if any,
132  * will be blocked by the cpu_hotplug.lock
133  *
134  * Since cpu_hotplug_begin() is always called after invoking
135  * cpu_maps_update_begin(), we can be sure that only one writer is active.
136  *
137  * Note that theoretically, there is a possibility of a livelock:
138  * - Refcount goes to zero, last reader wakes up the sleeping
139  *   writer.
140  * - Last reader unlocks the cpu_hotplug.lock.
141  * - A new reader arrives at this moment, bumps up the refcount.
142  * - The writer acquires the cpu_hotplug.lock finds the refcount
143  *   non zero and goes to sleep again.
144  *
145  * However, this is very difficult to achieve in practice since
146  * get_online_cpus() not an api which is called all that often.
147  *
148  */
cpu_hotplug_begin(void)149 void cpu_hotplug_begin(void)
150 {
151 	DEFINE_WAIT(wait);
152 
153 	cpu_hotplug.active_writer = current;
154 	cpuhp_lock_acquire();
155 
156 	for (;;) {
157 		mutex_lock(&cpu_hotplug.lock);
158 		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
159 		if (likely(!atomic_read(&cpu_hotplug.refcount)))
160 				break;
161 		mutex_unlock(&cpu_hotplug.lock);
162 		schedule();
163 	}
164 	finish_wait(&cpu_hotplug.wq, &wait);
165 }
166 
cpu_hotplug_done(void)167 void cpu_hotplug_done(void)
168 {
169 	cpu_hotplug.active_writer = NULL;
170 	mutex_unlock(&cpu_hotplug.lock);
171 	cpuhp_lock_release();
172 }
173 
174 /*
175  * Wait for currently running CPU hotplug operations to complete (if any) and
176  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
177  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
178  * hotplug path before performing hotplug operations. So acquiring that lock
179  * guarantees mutual exclusion from any currently running hotplug operations.
180  */
cpu_hotplug_disable(void)181 void cpu_hotplug_disable(void)
182 {
183 	cpu_maps_update_begin();
184 	cpu_hotplug_disabled++;
185 	cpu_maps_update_done();
186 }
187 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
188 
__cpu_hotplug_enable(void)189 static void __cpu_hotplug_enable(void)
190 {
191 	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
192 		return;
193 	cpu_hotplug_disabled--;
194 }
195 
cpu_hotplug_enable(void)196 void cpu_hotplug_enable(void)
197 {
198 	cpu_maps_update_begin();
199 	__cpu_hotplug_enable();
200 	cpu_maps_update_done();
201 }
202 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
203 #endif	/* CONFIG_HOTPLUG_CPU */
204 
205 /*
206  * Architectures that need SMT-specific errata handling during SMT hotplug
207  * should override this.
208  */
arch_smt_update(void)209 void __weak arch_smt_update(void) { }
210 
211 /* Need to know about CPUs going up/down? */
register_cpu_notifier(struct notifier_block * nb)212 int register_cpu_notifier(struct notifier_block *nb)
213 {
214 	int ret;
215 	cpu_maps_update_begin();
216 	ret = raw_notifier_chain_register(&cpu_chain, nb);
217 	cpu_maps_update_done();
218 	return ret;
219 }
220 
__register_cpu_notifier(struct notifier_block * nb)221 int __register_cpu_notifier(struct notifier_block *nb)
222 {
223 	return raw_notifier_chain_register(&cpu_chain, nb);
224 }
225 
__cpu_notify(unsigned long val,void * v,int nr_to_call,int * nr_calls)226 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
227 			int *nr_calls)
228 {
229 	int ret;
230 
231 	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
232 					nr_calls);
233 
234 	return notifier_to_errno(ret);
235 }
236 
cpu_notify(unsigned long val,void * v)237 static int cpu_notify(unsigned long val, void *v)
238 {
239 	return __cpu_notify(val, v, -1, NULL);
240 }
241 
242 EXPORT_SYMBOL(register_cpu_notifier);
243 EXPORT_SYMBOL(__register_cpu_notifier);
244 
unregister_cpu_notifier(struct notifier_block * nb)245 void unregister_cpu_notifier(struct notifier_block *nb)
246 {
247 	cpu_maps_update_begin();
248 	raw_notifier_chain_unregister(&cpu_chain, nb);
249 	cpu_maps_update_done();
250 }
251 EXPORT_SYMBOL(unregister_cpu_notifier);
252 
__unregister_cpu_notifier(struct notifier_block * nb)253 void __unregister_cpu_notifier(struct notifier_block *nb)
254 {
255 	raw_notifier_chain_unregister(&cpu_chain, nb);
256 }
257 EXPORT_SYMBOL(__unregister_cpu_notifier);
258 
259 #ifdef CONFIG_HOTPLUG_CPU
cpu_notify_nofail(unsigned long val,void * v)260 static void cpu_notify_nofail(unsigned long val, void *v)
261 {
262 	BUG_ON(cpu_notify(val, v));
263 }
264 
265 /**
266  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
267  * @cpu: a CPU id
268  *
269  * This function walks all processes, finds a valid mm struct for each one and
270  * then clears a corresponding bit in mm's cpumask.  While this all sounds
271  * trivial, there are various non-obvious corner cases, which this function
272  * tries to solve in a safe manner.
273  *
274  * Also note that the function uses a somewhat relaxed locking scheme, so it may
275  * be called only for an already offlined CPU.
276  */
clear_tasks_mm_cpumask(int cpu)277 void clear_tasks_mm_cpumask(int cpu)
278 {
279 	struct task_struct *p;
280 
281 	/*
282 	 * This function is called after the cpu is taken down and marked
283 	 * offline, so its not like new tasks will ever get this cpu set in
284 	 * their mm mask. -- Peter Zijlstra
285 	 * Thus, we may use rcu_read_lock() here, instead of grabbing
286 	 * full-fledged tasklist_lock.
287 	 */
288 	WARN_ON(cpu_online(cpu));
289 	rcu_read_lock();
290 	for_each_process(p) {
291 		struct task_struct *t;
292 
293 		/*
294 		 * Main thread might exit, but other threads may still have
295 		 * a valid mm. Find one.
296 		 */
297 		t = find_lock_task_mm(p);
298 		if (!t)
299 			continue;
300 		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
301 		task_unlock(t);
302 	}
303 	rcu_read_unlock();
304 }
305 
check_for_tasks(int dead_cpu)306 static inline void check_for_tasks(int dead_cpu)
307 {
308 	struct task_struct *g, *p;
309 
310 	read_lock(&tasklist_lock);
311 	for_each_process_thread(g, p) {
312 		if (!p->on_rq)
313 			continue;
314 		/*
315 		 * We do the check with unlocked task_rq(p)->lock.
316 		 * Order the reading to do not warn about a task,
317 		 * which was running on this cpu in the past, and
318 		 * it's just been woken on another cpu.
319 		 */
320 		rmb();
321 		if (task_cpu(p) != dead_cpu)
322 			continue;
323 
324 		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
325 			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
326 	}
327 	read_unlock(&tasklist_lock);
328 }
329 
330 struct take_cpu_down_param {
331 	unsigned long mod;
332 	void *hcpu;
333 };
334 
335 /* Take this CPU down. */
take_cpu_down(void * _param)336 static int take_cpu_down(void *_param)
337 {
338 	struct take_cpu_down_param *param = _param;
339 	int err;
340 
341 	/* Ensure this CPU doesn't handle any more interrupts. */
342 	err = __cpu_disable();
343 	if (err < 0)
344 		return err;
345 
346 	cpu_notify(CPU_DYING | param->mod, param->hcpu);
347 	/* Give up timekeeping duties */
348 	tick_handover_do_timer();
349 	/* Park the stopper thread */
350 	stop_machine_park((long)param->hcpu);
351 	return 0;
352 }
353 
354 /* Requires cpu_add_remove_lock to be held */
_cpu_down(unsigned int cpu,int tasks_frozen)355 static int _cpu_down(unsigned int cpu, int tasks_frozen)
356 {
357 	int err, nr_calls = 0;
358 	void *hcpu = (void *)(long)cpu;
359 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
360 	struct take_cpu_down_param tcd_param = {
361 		.mod = mod,
362 		.hcpu = hcpu,
363 	};
364 
365 	if (num_online_cpus() == 1)
366 		return -EBUSY;
367 
368 	if (!cpu_online(cpu))
369 		return -EINVAL;
370 
371 	cpu_hotplug_begin();
372 
373 	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
374 	if (err) {
375 		nr_calls--;
376 		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
377 		pr_warn("%s: attempt to take down CPU %u failed\n",
378 			__func__, cpu);
379 		goto out_release;
380 	}
381 
382 	/*
383 	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
384 	 * and RCU users of this state to go away such that all new such users
385 	 * will observe it.
386 	 *
387 	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
388 	 * not imply sync_sched(), so wait for both.
389 	 *
390 	 * Do sync before park smpboot threads to take care the rcu boost case.
391 	 */
392 	if (IS_ENABLED(CONFIG_PREEMPT))
393 		synchronize_rcu_mult(call_rcu, call_rcu_sched);
394 	else
395 		synchronize_rcu();
396 
397 	smpboot_park_threads(cpu);
398 
399 	/*
400 	 * Prevent irq alloc/free while the dying cpu reorganizes the
401 	 * interrupt affinities.
402 	 */
403 	irq_lock_sparse();
404 
405 	/*
406 	 * So now all preempt/rcu users must observe !cpu_active().
407 	 */
408 	err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
409 	if (err) {
410 		/* CPU didn't die: tell everyone.  Can't complain. */
411 		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
412 		irq_unlock_sparse();
413 		goto out_release;
414 	}
415 	BUG_ON(cpu_online(cpu));
416 
417 	/*
418 	 * The migration_call() CPU_DYING callback will have removed all
419 	 * runnable tasks from the cpu, there's only the idle task left now
420 	 * that the migration thread is done doing the stop_machine thing.
421 	 *
422 	 * Wait for the stop thread to go away.
423 	 */
424 	while (!per_cpu(cpu_dead_idle, cpu))
425 		cpu_relax();
426 	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
427 	per_cpu(cpu_dead_idle, cpu) = false;
428 
429 	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
430 	irq_unlock_sparse();
431 
432 	hotplug_cpu__broadcast_tick_pull(cpu);
433 	/* This actually kills the CPU. */
434 	__cpu_die(cpu);
435 
436 	/* CPU is completely dead: tell everyone.  Too late to complain. */
437 	tick_cleanup_dead_cpu(cpu);
438 	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
439 
440 	check_for_tasks(cpu);
441 
442 out_release:
443 	cpu_hotplug_done();
444 	trace_sched_cpu_hotplug(cpu, err, 0);
445 	if (!err)
446 		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
447 	arch_smt_update();
448 	return err;
449 }
450 
cpu_down(unsigned int cpu)451 int cpu_down(unsigned int cpu)
452 {
453 	int err;
454 
455 	cpu_maps_update_begin();
456 
457 	if (cpu_hotplug_disabled) {
458 		err = -EBUSY;
459 		goto out;
460 	}
461 
462 	err = _cpu_down(cpu, 0);
463 
464 out:
465 	cpu_maps_update_done();
466 	return err;
467 }
468 EXPORT_SYMBOL(cpu_down);
469 #endif /*CONFIG_HOTPLUG_CPU*/
470 
471 /*
472  * Unpark per-CPU smpboot kthreads at CPU-online time.
473  */
smpboot_thread_call(struct notifier_block * nfb,unsigned long action,void * hcpu)474 static int smpboot_thread_call(struct notifier_block *nfb,
475 			       unsigned long action, void *hcpu)
476 {
477 	int cpu = (long)hcpu;
478 
479 	switch (action & ~CPU_TASKS_FROZEN) {
480 
481 	case CPU_DOWN_FAILED:
482 	case CPU_ONLINE:
483 		smpboot_unpark_threads(cpu);
484 		break;
485 
486 	default:
487 		break;
488 	}
489 
490 	return NOTIFY_OK;
491 }
492 
493 static struct notifier_block smpboot_thread_notifier = {
494 	.notifier_call = smpboot_thread_call,
495 	.priority = CPU_PRI_SMPBOOT,
496 };
497 
smpboot_thread_init(void)498 void smpboot_thread_init(void)
499 {
500 	register_cpu_notifier(&smpboot_thread_notifier);
501 }
502 
503 /* Requires cpu_add_remove_lock to be held */
_cpu_up(unsigned int cpu,int tasks_frozen)504 static int _cpu_up(unsigned int cpu, int tasks_frozen)
505 {
506 	int ret, nr_calls = 0;
507 	void *hcpu = (void *)(long)cpu;
508 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
509 	struct task_struct *idle;
510 
511 	cpu_hotplug_begin();
512 
513 	if (cpu_online(cpu) || !cpu_present(cpu)) {
514 		ret = -EINVAL;
515 		goto out;
516 	}
517 
518 	idle = idle_thread_get(cpu);
519 	if (IS_ERR(idle)) {
520 		ret = PTR_ERR(idle);
521 		goto out;
522 	}
523 
524 	ret = smpboot_create_threads(cpu);
525 	if (ret)
526 		goto out;
527 
528 	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
529 	if (ret) {
530 		nr_calls--;
531 		pr_warn("%s: attempt to bring up CPU %u failed\n",
532 			__func__, cpu);
533 		goto out_notify;
534 	}
535 
536 	/* Arch-specific enabling code. */
537 	ret = __cpu_up(cpu, idle);
538 
539 	if (ret != 0)
540 		goto out_notify;
541 	BUG_ON(!cpu_online(cpu));
542 
543 	/* Now call notifier in preparation. */
544 	cpu_notify(CPU_ONLINE | mod, hcpu);
545 
546 out_notify:
547 	if (ret != 0)
548 		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
549 out:
550 	cpu_hotplug_done();
551 	trace_sched_cpu_hotplug(cpu, ret, 1);
552 	arch_smt_update();
553 	return ret;
554 }
555 
cpu_up(unsigned int cpu)556 int cpu_up(unsigned int cpu)
557 {
558 	int err = 0;
559 
560 	if (!cpu_possible(cpu)) {
561 		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
562 		       cpu);
563 #if defined(CONFIG_IA64)
564 		pr_err("please check additional_cpus= boot parameter\n");
565 #endif
566 		return -EINVAL;
567 	}
568 
569 	err = try_online_node(cpu_to_node(cpu));
570 	if (err)
571 		return err;
572 
573 	cpu_maps_update_begin();
574 
575 	if (cpu_hotplug_disabled) {
576 		err = -EBUSY;
577 		goto out;
578 	}
579 
580 	err = _cpu_up(cpu, 0);
581 
582 out:
583 	cpu_maps_update_done();
584 	return err;
585 }
586 EXPORT_SYMBOL_GPL(cpu_up);
587 
588 #ifdef CONFIG_PM_SLEEP_SMP
589 static cpumask_var_t frozen_cpus;
590 
disable_nonboot_cpus(void)591 int disable_nonboot_cpus(void)
592 {
593 	int cpu, first_cpu, error = 0;
594 
595 	cpu_maps_update_begin();
596 	first_cpu = cpumask_first(cpu_online_mask);
597 	/*
598 	 * We take down all of the non-boot CPUs in one shot to avoid races
599 	 * with the userspace trying to use the CPU hotplug at the same time
600 	 */
601 	cpumask_clear(frozen_cpus);
602 
603 	pr_info("Disabling non-boot CPUs ...\n");
604 	for_each_online_cpu(cpu) {
605 		if (cpu == first_cpu)
606 			continue;
607 		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
608 		error = _cpu_down(cpu, 1);
609 		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
610 		if (!error)
611 			cpumask_set_cpu(cpu, frozen_cpus);
612 		else {
613 			pr_err("Error taking CPU%d down: %d\n", cpu, error);
614 			break;
615 		}
616 	}
617 
618 	if (!error)
619 		BUG_ON(num_online_cpus() > 1);
620 	else
621 		pr_err("Non-boot CPUs are not disabled\n");
622 
623 	/*
624 	 * Make sure the CPUs won't be enabled by someone else. We need to do
625 	 * this even in case of failure as all disable_nonboot_cpus() users are
626 	 * supposed to do enable_nonboot_cpus() on the failure path.
627 	 */
628 	cpu_hotplug_disabled++;
629 
630 	cpu_maps_update_done();
631 	return error;
632 }
633 
arch_enable_nonboot_cpus_begin(void)634 void __weak arch_enable_nonboot_cpus_begin(void)
635 {
636 }
637 
arch_enable_nonboot_cpus_end(void)638 void __weak arch_enable_nonboot_cpus_end(void)
639 {
640 }
641 
enable_nonboot_cpus(void)642 void enable_nonboot_cpus(void)
643 {
644 	int cpu, error;
645 	struct device *cpu_device;
646 
647 	/* Allow everyone to use the CPU hotplug again */
648 	cpu_maps_update_begin();
649 	__cpu_hotplug_enable();
650 	if (cpumask_empty(frozen_cpus))
651 		goto out;
652 
653 	pr_info("Enabling non-boot CPUs ...\n");
654 
655 	arch_enable_nonboot_cpus_begin();
656 
657 	for_each_cpu(cpu, frozen_cpus) {
658 		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
659 		error = _cpu_up(cpu, 1);
660 		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
661 		if (!error) {
662 			pr_info("CPU%d is up\n", cpu);
663 			cpu_device = get_cpu_device(cpu);
664 			if (!cpu_device)
665 				pr_err("%s: failed to get cpu%d device\n",
666 				       __func__, cpu);
667 			else
668 				kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
669 			continue;
670 		}
671 		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
672 	}
673 
674 	arch_enable_nonboot_cpus_end();
675 
676 	cpumask_clear(frozen_cpus);
677 out:
678 	cpu_maps_update_done();
679 }
680 
alloc_frozen_cpus(void)681 static int __init alloc_frozen_cpus(void)
682 {
683 	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
684 		return -ENOMEM;
685 	return 0;
686 }
687 core_initcall(alloc_frozen_cpus);
688 
689 /*
690  * When callbacks for CPU hotplug notifications are being executed, we must
691  * ensure that the state of the system with respect to the tasks being frozen
692  * or not, as reported by the notification, remains unchanged *throughout the
693  * duration* of the execution of the callbacks.
694  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
695  *
696  * This synchronization is implemented by mutually excluding regular CPU
697  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
698  * Hibernate notifications.
699  */
700 static int
cpu_hotplug_pm_callback(struct notifier_block * nb,unsigned long action,void * ptr)701 cpu_hotplug_pm_callback(struct notifier_block *nb,
702 			unsigned long action, void *ptr)
703 {
704 	switch (action) {
705 
706 	case PM_SUSPEND_PREPARE:
707 	case PM_HIBERNATION_PREPARE:
708 		cpu_hotplug_disable();
709 		break;
710 
711 	case PM_POST_SUSPEND:
712 	case PM_POST_HIBERNATION:
713 		cpu_hotplug_enable();
714 		break;
715 
716 	default:
717 		return NOTIFY_DONE;
718 	}
719 
720 	return NOTIFY_OK;
721 }
722 
723 
cpu_hotplug_pm_sync_init(void)724 static int __init cpu_hotplug_pm_sync_init(void)
725 {
726 	/*
727 	 * cpu_hotplug_pm_callback has higher priority than x86
728 	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
729 	 * to disable cpu hotplug to avoid cpu hotplug race.
730 	 */
731 	pm_notifier(cpu_hotplug_pm_callback, 0);
732 	return 0;
733 }
734 core_initcall(cpu_hotplug_pm_sync_init);
735 
736 #endif /* CONFIG_PM_SLEEP_SMP */
737 
738 /**
739  * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
740  * @cpu: cpu that just started
741  *
742  * This function calls the cpu_chain notifiers with CPU_STARTING.
743  * It must be called by the arch code on the new cpu, before the new cpu
744  * enables interrupts and before the "boot" cpu returns from __cpu_up().
745  */
notify_cpu_starting(unsigned int cpu)746 void notify_cpu_starting(unsigned int cpu)
747 {
748 	unsigned long val = CPU_STARTING;
749 
750 #ifdef CONFIG_PM_SLEEP_SMP
751 	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
752 		val = CPU_STARTING_FROZEN;
753 #endif /* CONFIG_PM_SLEEP_SMP */
754 	cpu_notify(val, (void *)(long)cpu);
755 }
756 
757 #endif /* CONFIG_SMP */
758 
759 /*
760  * cpu_bit_bitmap[] is a special, "compressed" data structure that
761  * represents all NR_CPUS bits binary values of 1<<nr.
762  *
763  * It is used by cpumask_of() to get a constant address to a CPU
764  * mask value that has a single bit set only.
765  */
766 
767 /* cpu_bit_bitmap[0] is empty - so we can back into it */
768 #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
769 #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
770 #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
771 #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
772 
773 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
774 
775 	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
776 	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
777 #if BITS_PER_LONG > 32
778 	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
779 	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
780 #endif
781 };
782 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
783 
784 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
785 EXPORT_SYMBOL(cpu_all_bits);
786 
787 #ifdef CONFIG_INIT_ALL_POSSIBLE
788 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
789 	= CPU_BITS_ALL;
790 #else
791 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
792 #endif
793 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
794 EXPORT_SYMBOL(cpu_possible_mask);
795 
796 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
797 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
798 EXPORT_SYMBOL(cpu_online_mask);
799 
800 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
801 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
802 EXPORT_SYMBOL(cpu_present_mask);
803 
804 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
805 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
806 EXPORT_SYMBOL(cpu_active_mask);
807 
set_cpu_possible(unsigned int cpu,bool possible)808 void set_cpu_possible(unsigned int cpu, bool possible)
809 {
810 	if (possible)
811 		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
812 	else
813 		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
814 }
815 
set_cpu_present(unsigned int cpu,bool present)816 void set_cpu_present(unsigned int cpu, bool present)
817 {
818 	if (present)
819 		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
820 	else
821 		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
822 }
823 
set_cpu_online(unsigned int cpu,bool online)824 void set_cpu_online(unsigned int cpu, bool online)
825 {
826 	if (online) {
827 		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
828 		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
829 	} else {
830 		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
831 	}
832 }
833 
set_cpu_active(unsigned int cpu,bool active)834 void set_cpu_active(unsigned int cpu, bool active)
835 {
836 	if (active)
837 		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
838 	else
839 		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
840 }
841 
init_cpu_present(const struct cpumask * src)842 void init_cpu_present(const struct cpumask *src)
843 {
844 	cpumask_copy(to_cpumask(cpu_present_bits), src);
845 }
846 
init_cpu_possible(const struct cpumask * src)847 void init_cpu_possible(const struct cpumask *src)
848 {
849 	cpumask_copy(to_cpumask(cpu_possible_bits), src);
850 }
851 
init_cpu_online(const struct cpumask * src)852 void init_cpu_online(const struct cpumask *src)
853 {
854 	cpumask_copy(to_cpumask(cpu_online_bits), src);
855 }
856 
857 enum cpu_mitigations cpu_mitigations = CPU_MITIGATIONS_AUTO;
858 
mitigations_parse_cmdline(char * arg)859 static int __init mitigations_parse_cmdline(char *arg)
860 {
861 	if (!strcmp(arg, "off"))
862 		cpu_mitigations = CPU_MITIGATIONS_OFF;
863 	else if (!strcmp(arg, "auto"))
864 		cpu_mitigations = CPU_MITIGATIONS_AUTO;
865 	else
866 		pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
867 			arg);
868 
869 	return 0;
870 }
871 early_param("mitigations", mitigations_parse_cmdline);
872 
873 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
874 
idle_notifier_register(struct notifier_block * n)875 void idle_notifier_register(struct notifier_block *n)
876 {
877 	atomic_notifier_chain_register(&idle_notifier, n);
878 }
879 EXPORT_SYMBOL_GPL(idle_notifier_register);
880 
idle_notifier_unregister(struct notifier_block * n)881 void idle_notifier_unregister(struct notifier_block *n)
882 {
883 	atomic_notifier_chain_unregister(&idle_notifier, n);
884 }
885 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
886 
idle_notifier_call_chain(unsigned long val)887 void idle_notifier_call_chain(unsigned long val)
888 {
889 	atomic_notifier_call_chain(&idle_notifier, val, NULL);
890 }
891 EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
892