• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SMP support for ppc.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6  * deal of code from the sparc and intel versions.
7  *
8  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9  *
10  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12  */
13 
14 #undef DEBUG
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
37 
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
40 #include <asm/irq.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
44 #include <asm/page.h>
45 #include <asm/prom.h>
46 #include <asm/smp.h>
47 #include <asm/time.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
51 #include <asm/mpic.h>
52 #include <asm/vdso_datapage.h>
53 #ifdef CONFIG_PPC64
54 #include <asm/paca.h>
55 #endif
56 #include <asm/vdso.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/asm-prototypes.h>
60 #include <asm/cpu_has_feature.h>
61 #include <asm/ftrace.h>
62 #include <asm/kup.h>
63 #include <asm/fadump.h>
64 
65 #ifdef DEBUG
66 #include <asm/udbg.h>
67 #define DBG(fmt...) udbg_printf(fmt)
68 #else
69 #define DBG(fmt...)
70 #endif
71 
72 #ifdef CONFIG_HOTPLUG_CPU
73 /* State of each CPU during hotplug phases */
74 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
75 #endif
76 
77 struct task_struct *secondary_current;
78 bool has_big_cores;
79 bool coregroup_enabled;
80 
81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
83 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
84 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
85 DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
86 
87 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
89 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
90 EXPORT_SYMBOL_GPL(has_big_cores);
91 
92 enum {
93 #ifdef CONFIG_SCHED_SMT
94 	smt_idx,
95 #endif
96 	cache_idx,
97 	mc_idx,
98 	die_idx,
99 };
100 
101 #define MAX_THREAD_LIST_SIZE	8
102 #define THREAD_GROUP_SHARE_L1   1
103 struct thread_groups {
104 	unsigned int property;
105 	unsigned int nr_groups;
106 	unsigned int threads_per_group;
107 	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
108 };
109 
110 /*
111  * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
112  * the set its siblings that share the L1-cache.
113  */
114 DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
115 
116 /* SMP operations for this machine */
117 struct smp_ops_t *smp_ops;
118 
119 /* Can't be static due to PowerMac hackery */
120 volatile unsigned int cpu_callin_map[NR_CPUS];
121 
122 int smt_enabled_at_boot = 1;
123 
124 /*
125  * Returns 1 if the specified cpu should be brought up during boot.
126  * Used to inhibit booting threads if they've been disabled or
127  * limited on the command line
128  */
smp_generic_cpu_bootable(unsigned int nr)129 int smp_generic_cpu_bootable(unsigned int nr)
130 {
131 	/* Special case - we inhibit secondary thread startup
132 	 * during boot if the user requests it.
133 	 */
134 	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
135 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
136 			return 0;
137 		if (smt_enabled_at_boot
138 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
139 			return 0;
140 	}
141 
142 	return 1;
143 }
144 
145 
146 #ifdef CONFIG_PPC64
smp_generic_kick_cpu(int nr)147 int smp_generic_kick_cpu(int nr)
148 {
149 	if (nr < 0 || nr >= nr_cpu_ids)
150 		return -EINVAL;
151 
152 	/*
153 	 * The processor is currently spinning, waiting for the
154 	 * cpu_start field to become non-zero After we set cpu_start,
155 	 * the processor will continue on to secondary_start
156 	 */
157 	if (!paca_ptrs[nr]->cpu_start) {
158 		paca_ptrs[nr]->cpu_start = 1;
159 		smp_mb();
160 		return 0;
161 	}
162 
163 #ifdef CONFIG_HOTPLUG_CPU
164 	/*
165 	 * Ok it's not there, so it might be soft-unplugged, let's
166 	 * try to bring it back
167 	 */
168 	generic_set_cpu_up(nr);
169 	smp_wmb();
170 	smp_send_reschedule(nr);
171 #endif /* CONFIG_HOTPLUG_CPU */
172 
173 	return 0;
174 }
175 #endif /* CONFIG_PPC64 */
176 
call_function_action(int irq,void * data)177 static irqreturn_t call_function_action(int irq, void *data)
178 {
179 	generic_smp_call_function_interrupt();
180 	return IRQ_HANDLED;
181 }
182 
reschedule_action(int irq,void * data)183 static irqreturn_t reschedule_action(int irq, void *data)
184 {
185 	scheduler_ipi();
186 	return IRQ_HANDLED;
187 }
188 
189 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast_ipi_action(int irq,void * data)190 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
191 {
192 	timer_broadcast_interrupt();
193 	return IRQ_HANDLED;
194 }
195 #endif
196 
197 #ifdef CONFIG_NMI_IPI
nmi_ipi_action(int irq,void * data)198 static irqreturn_t nmi_ipi_action(int irq, void *data)
199 {
200 	smp_handle_nmi_ipi(get_irq_regs());
201 	return IRQ_HANDLED;
202 }
203 #endif
204 
205 static irq_handler_t smp_ipi_action[] = {
206 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
207 	[PPC_MSG_RESCHEDULE] = reschedule_action,
208 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
209 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
210 #endif
211 #ifdef CONFIG_NMI_IPI
212 	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
213 #endif
214 };
215 
216 /*
217  * The NMI IPI is a fallback and not truly non-maskable. It is simpler
218  * than going through the call function infrastructure, and strongly
219  * serialized, so it is more appropriate for debugging.
220  */
221 const char *smp_ipi_name[] = {
222 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
223 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
224 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
225 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
226 #endif
227 #ifdef CONFIG_NMI_IPI
228 	[PPC_MSG_NMI_IPI] = "nmi ipi",
229 #endif
230 };
231 
232 /* optional function to request ipi, for controllers with >= 4 ipis */
smp_request_message_ipi(int virq,int msg)233 int smp_request_message_ipi(int virq, int msg)
234 {
235 	int err;
236 
237 	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
238 		return -EINVAL;
239 #ifndef CONFIG_NMI_IPI
240 	if (msg == PPC_MSG_NMI_IPI)
241 		return 1;
242 #endif
243 
244 	err = request_irq(virq, smp_ipi_action[msg],
245 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
246 			  smp_ipi_name[msg], NULL);
247 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
248 		virq, smp_ipi_name[msg], err);
249 
250 	return err;
251 }
252 
253 #ifdef CONFIG_PPC_SMP_MUXED_IPI
254 struct cpu_messages {
255 	long messages;			/* current messages */
256 };
257 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
258 
smp_muxed_ipi_set_message(int cpu,int msg)259 void smp_muxed_ipi_set_message(int cpu, int msg)
260 {
261 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
262 	char *message = (char *)&info->messages;
263 
264 	/*
265 	 * Order previous accesses before accesses in the IPI handler.
266 	 */
267 	smp_mb();
268 	message[msg] = 1;
269 }
270 
smp_muxed_ipi_message_pass(int cpu,int msg)271 void smp_muxed_ipi_message_pass(int cpu, int msg)
272 {
273 	smp_muxed_ipi_set_message(cpu, msg);
274 
275 	/*
276 	 * cause_ipi functions are required to include a full barrier
277 	 * before doing whatever causes the IPI.
278 	 */
279 	smp_ops->cause_ipi(cpu);
280 }
281 
282 #ifdef __BIG_ENDIAN__
283 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
284 #else
285 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
286 #endif
287 
smp_ipi_demux(void)288 irqreturn_t smp_ipi_demux(void)
289 {
290 	mb();	/* order any irq clear */
291 
292 	return smp_ipi_demux_relaxed();
293 }
294 
295 /* sync-free variant. Callers should ensure synchronization */
smp_ipi_demux_relaxed(void)296 irqreturn_t smp_ipi_demux_relaxed(void)
297 {
298 	struct cpu_messages *info;
299 	unsigned long all;
300 
301 	info = this_cpu_ptr(&ipi_message);
302 	do {
303 		all = xchg(&info->messages, 0);
304 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
305 		/*
306 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
307 		 * before PPC_MSG_CALL_FUNCTION messages because when
308 		 * a VM is destroyed, we call kick_all_cpus_sync()
309 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
310 		 * messages have completed before we free any VCPUs.
311 		 */
312 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
313 			kvmppc_xics_ipi_action();
314 #endif
315 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
316 			generic_smp_call_function_interrupt();
317 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
318 			scheduler_ipi();
319 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
320 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
321 			timer_broadcast_interrupt();
322 #endif
323 #ifdef CONFIG_NMI_IPI
324 		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
325 			nmi_ipi_action(0, NULL);
326 #endif
327 	} while (info->messages);
328 
329 	return IRQ_HANDLED;
330 }
331 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
332 
do_message_pass(int cpu,int msg)333 static inline void do_message_pass(int cpu, int msg)
334 {
335 	if (smp_ops->message_pass)
336 		smp_ops->message_pass(cpu, msg);
337 #ifdef CONFIG_PPC_SMP_MUXED_IPI
338 	else
339 		smp_muxed_ipi_message_pass(cpu, msg);
340 #endif
341 }
342 
smp_send_reschedule(int cpu)343 void smp_send_reschedule(int cpu)
344 {
345 	if (likely(smp_ops))
346 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
347 }
348 EXPORT_SYMBOL_GPL(smp_send_reschedule);
349 
arch_send_call_function_single_ipi(int cpu)350 void arch_send_call_function_single_ipi(int cpu)
351 {
352 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
353 }
354 
arch_send_call_function_ipi_mask(const struct cpumask * mask)355 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
356 {
357 	unsigned int cpu;
358 
359 	for_each_cpu(cpu, mask)
360 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
361 }
362 
363 #ifdef CONFIG_NMI_IPI
364 
365 /*
366  * "NMI IPI" system.
367  *
368  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
369  * a running system. They can be used for crash, debug, halt/reboot, etc.
370  *
371  * The IPI call waits with interrupts disabled until all targets enter the
372  * NMI handler, then returns. Subsequent IPIs can be issued before targets
373  * have returned from their handlers, so there is no guarantee about
374  * concurrency or re-entrancy.
375  *
376  * A new NMI can be issued before all targets exit the handler.
377  *
378  * The IPI call may time out without all targets entering the NMI handler.
379  * In that case, there is some logic to recover (and ignore subsequent
380  * NMI interrupts that may eventually be raised), but the platform interrupt
381  * handler may not be able to distinguish this from other exception causes,
382  * which may cause a crash.
383  */
384 
385 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
386 static struct cpumask nmi_ipi_pending_mask;
387 static bool nmi_ipi_busy = false;
388 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
389 
nmi_ipi_lock_start(unsigned long * flags)390 static void nmi_ipi_lock_start(unsigned long *flags)
391 {
392 	raw_local_irq_save(*flags);
393 	hard_irq_disable();
394 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
395 		raw_local_irq_restore(*flags);
396 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
397 		raw_local_irq_save(*flags);
398 		hard_irq_disable();
399 	}
400 }
401 
nmi_ipi_lock(void)402 static void nmi_ipi_lock(void)
403 {
404 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
405 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
406 }
407 
nmi_ipi_unlock(void)408 static void nmi_ipi_unlock(void)
409 {
410 	smp_mb();
411 	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
412 	atomic_set(&__nmi_ipi_lock, 0);
413 }
414 
nmi_ipi_unlock_end(unsigned long * flags)415 static void nmi_ipi_unlock_end(unsigned long *flags)
416 {
417 	nmi_ipi_unlock();
418 	raw_local_irq_restore(*flags);
419 }
420 
421 /*
422  * Platform NMI handler calls this to ack
423  */
smp_handle_nmi_ipi(struct pt_regs * regs)424 int smp_handle_nmi_ipi(struct pt_regs *regs)
425 {
426 	void (*fn)(struct pt_regs *) = NULL;
427 	unsigned long flags;
428 	int me = raw_smp_processor_id();
429 	int ret = 0;
430 
431 	/*
432 	 * Unexpected NMIs are possible here because the interrupt may not
433 	 * be able to distinguish NMI IPIs from other types of NMIs, or
434 	 * because the caller may have timed out.
435 	 */
436 	nmi_ipi_lock_start(&flags);
437 	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
438 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
439 		fn = READ_ONCE(nmi_ipi_function);
440 		WARN_ON_ONCE(!fn);
441 		ret = 1;
442 	}
443 	nmi_ipi_unlock_end(&flags);
444 
445 	if (fn)
446 		fn(regs);
447 
448 	return ret;
449 }
450 
do_smp_send_nmi_ipi(int cpu,bool safe)451 static void do_smp_send_nmi_ipi(int cpu, bool safe)
452 {
453 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
454 		return;
455 
456 	if (cpu >= 0) {
457 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
458 	} else {
459 		int c;
460 
461 		for_each_online_cpu(c) {
462 			if (c == raw_smp_processor_id())
463 				continue;
464 			do_message_pass(c, PPC_MSG_NMI_IPI);
465 		}
466 	}
467 }
468 
469 /*
470  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
471  * - fn is the target callback function.
472  * - delay_us > 0 is the delay before giving up waiting for targets to
473  *   begin executing the handler, == 0 specifies indefinite delay.
474  */
__smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us,bool safe)475 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
476 				u64 delay_us, bool safe)
477 {
478 	unsigned long flags;
479 	int me = raw_smp_processor_id();
480 	int ret = 1;
481 
482 	BUG_ON(cpu == me);
483 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
484 
485 	if (unlikely(!smp_ops))
486 		return 0;
487 
488 	nmi_ipi_lock_start(&flags);
489 	while (nmi_ipi_busy) {
490 		nmi_ipi_unlock_end(&flags);
491 		spin_until_cond(!nmi_ipi_busy);
492 		nmi_ipi_lock_start(&flags);
493 	}
494 	nmi_ipi_busy = true;
495 	nmi_ipi_function = fn;
496 
497 	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
498 
499 	if (cpu < 0) {
500 		/* ALL_OTHERS */
501 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
502 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
503 	} else {
504 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
505 	}
506 
507 	nmi_ipi_unlock();
508 
509 	/* Interrupts remain hard disabled */
510 
511 	do_smp_send_nmi_ipi(cpu, safe);
512 
513 	nmi_ipi_lock();
514 	/* nmi_ipi_busy is set here, so unlock/lock is okay */
515 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
516 		nmi_ipi_unlock();
517 		udelay(1);
518 		nmi_ipi_lock();
519 		if (delay_us) {
520 			delay_us--;
521 			if (!delay_us)
522 				break;
523 		}
524 	}
525 
526 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
527 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
528 		ret = 0;
529 		cpumask_clear(&nmi_ipi_pending_mask);
530 	}
531 
532 	nmi_ipi_function = NULL;
533 	nmi_ipi_busy = false;
534 
535 	nmi_ipi_unlock_end(&flags);
536 
537 	return ret;
538 }
539 
smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)540 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
541 {
542 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
543 }
544 
smp_send_safe_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)545 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
546 {
547 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
548 }
549 #endif /* CONFIG_NMI_IPI */
550 
551 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)552 void tick_broadcast(const struct cpumask *mask)
553 {
554 	unsigned int cpu;
555 
556 	for_each_cpu(cpu, mask)
557 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
558 }
559 #endif
560 
561 #ifdef CONFIG_DEBUGGER
debugger_ipi_callback(struct pt_regs * regs)562 void debugger_ipi_callback(struct pt_regs *regs)
563 {
564 	debugger_ipi(regs);
565 }
566 
smp_send_debugger_break(void)567 void smp_send_debugger_break(void)
568 {
569 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
570 }
571 #endif
572 
573 #ifdef CONFIG_KEXEC_CORE
crash_send_ipi(void (* crash_ipi_callback)(struct pt_regs *))574 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
575 {
576 	int cpu;
577 
578 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
579 	if (kdump_in_progress() && crash_wake_offline) {
580 		for_each_present_cpu(cpu) {
581 			if (cpu_online(cpu))
582 				continue;
583 			/*
584 			 * crash_ipi_callback will wait for
585 			 * all cpus, including offline CPUs.
586 			 * We don't care about nmi_ipi_function.
587 			 * Offline cpus will jump straight into
588 			 * crash_ipi_callback, we can skip the
589 			 * entire NMI dance and waiting for
590 			 * cpus to clear pending mask, etc.
591 			 */
592 			do_smp_send_nmi_ipi(cpu, false);
593 		}
594 	}
595 }
596 #endif
597 
598 #ifdef CONFIG_NMI_IPI
crash_stop_this_cpu(struct pt_regs * regs)599 static void crash_stop_this_cpu(struct pt_regs *regs)
600 #else
601 static void crash_stop_this_cpu(void *dummy)
602 #endif
603 {
604 	/*
605 	 * Just busy wait here and avoid marking CPU as offline to ensure
606 	 * register data is captured appropriately.
607 	 */
608 	while (1)
609 		cpu_relax();
610 }
611 
crash_smp_send_stop(void)612 void crash_smp_send_stop(void)
613 {
614 	static bool stopped = false;
615 
616 	/*
617 	 * In case of fadump, register data for all CPUs is captured by f/w
618 	 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
619 	 * this rtas call to avoid tricky post processing of those CPUs'
620 	 * backtraces.
621 	 */
622 	if (should_fadump_crash())
623 		return;
624 
625 	if (stopped)
626 		return;
627 
628 	stopped = true;
629 
630 #ifdef CONFIG_NMI_IPI
631 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
632 #else
633 	smp_call_function(crash_stop_this_cpu, NULL, 0);
634 #endif /* CONFIG_NMI_IPI */
635 }
636 
637 #ifdef CONFIG_NMI_IPI
nmi_stop_this_cpu(struct pt_regs * regs)638 static void nmi_stop_this_cpu(struct pt_regs *regs)
639 {
640 	/*
641 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
642 	 */
643 	set_cpu_online(smp_processor_id(), false);
644 
645 	spin_begin();
646 	while (1)
647 		spin_cpu_relax();
648 }
649 
smp_send_stop(void)650 void smp_send_stop(void)
651 {
652 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
653 }
654 
655 #else /* CONFIG_NMI_IPI */
656 
stop_this_cpu(void * dummy)657 static void stop_this_cpu(void *dummy)
658 {
659 	hard_irq_disable();
660 
661 	/*
662 	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
663 	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
664 	 * to know other CPUs are offline before it breaks locks to flush
665 	 * printk buffers, in case we panic()ed while holding the lock.
666 	 */
667 	set_cpu_online(smp_processor_id(), false);
668 
669 	spin_begin();
670 	while (1)
671 		spin_cpu_relax();
672 }
673 
smp_send_stop(void)674 void smp_send_stop(void)
675 {
676 	static bool stopped = false;
677 
678 	/*
679 	 * Prevent waiting on csd lock from a previous smp_send_stop.
680 	 * This is racy, but in general callers try to do the right
681 	 * thing and only fire off one smp_send_stop (e.g., see
682 	 * kernel/panic.c)
683 	 */
684 	if (stopped)
685 		return;
686 
687 	stopped = true;
688 
689 	smp_call_function(stop_this_cpu, NULL, 0);
690 }
691 #endif /* CONFIG_NMI_IPI */
692 
693 struct task_struct *current_set[NR_CPUS];
694 
smp_store_cpu_info(int id)695 static void smp_store_cpu_info(int id)
696 {
697 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
698 #ifdef CONFIG_PPC_FSL_BOOK3E
699 	per_cpu(next_tlbcam_idx, id)
700 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
701 #endif
702 }
703 
704 /*
705  * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
706  * rather than just passing around the cpumask we pass around a function that
707  * returns the that cpumask for the given CPU.
708  */
set_cpus_related(int i,int j,struct cpumask * (* get_cpumask)(int))709 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
710 {
711 	cpumask_set_cpu(i, get_cpumask(j));
712 	cpumask_set_cpu(j, get_cpumask(i));
713 }
714 
715 #ifdef CONFIG_HOTPLUG_CPU
set_cpus_unrelated(int i,int j,struct cpumask * (* get_cpumask)(int))716 static void set_cpus_unrelated(int i, int j,
717 		struct cpumask *(*get_cpumask)(int))
718 {
719 	cpumask_clear_cpu(i, get_cpumask(j));
720 	cpumask_clear_cpu(j, get_cpumask(i));
721 }
722 #endif
723 
724 /*
725  * Extends set_cpus_related. Instead of setting one CPU at a time in
726  * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
727  */
or_cpumasks_related(int i,int j,struct cpumask * (* srcmask)(int),struct cpumask * (* dstmask)(int))728 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
729 				struct cpumask *(*dstmask)(int))
730 {
731 	struct cpumask *mask;
732 	int k;
733 
734 	mask = srcmask(j);
735 	for_each_cpu(k, srcmask(i))
736 		cpumask_or(dstmask(k), dstmask(k), mask);
737 
738 	if (i == j)
739 		return;
740 
741 	mask = srcmask(i);
742 	for_each_cpu(k, srcmask(j))
743 		cpumask_or(dstmask(k), dstmask(k), mask);
744 }
745 
746 /*
747  * parse_thread_groups: Parses the "ibm,thread-groups" device tree
748  *                      property for the CPU device node @dn and stores
749  *                      the parsed output in the thread_groups
750  *                      structure @tg if the ibm,thread-groups[0]
751  *                      matches @property.
752  *
753  * @dn: The device node of the CPU device.
754  * @tg: Pointer to a thread group structure into which the parsed
755  *      output of "ibm,thread-groups" is stored.
756  * @property: The property of the thread-group that the caller is
757  *            interested in.
758  *
759  * ibm,thread-groups[0..N-1] array defines which group of threads in
760  * the CPU-device node can be grouped together based on the property.
761  *
762  * ibm,thread-groups[0] tells us the property based on which the
763  * threads are being grouped together. If this value is 1, it implies
764  * that the threads in the same group share L1, translation cache.
765  *
766  * ibm,thread-groups[1] tells us how many such thread groups exist.
767  *
768  * ibm,thread-groups[2] tells us the number of threads in each such
769  * group.
770  *
771  * ibm,thread-groups[3..N-1] is the list of threads identified by
772  * "ibm,ppc-interrupt-server#s" arranged as per their membership in
773  * the grouping.
774  *
775  * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
776  * implies that there are 2 groups of 4 threads each, where each group
777  * of threads share L1, translation cache.
778  *
779  * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
780  * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
781  * 11, 12} structure
782  *
783  * Returns 0 on success, -EINVAL if the property does not exist,
784  * -ENODATA if property does not have a value, and -EOVERFLOW if the
785  * property data isn't large enough.
786  */
parse_thread_groups(struct device_node * dn,struct thread_groups * tg,unsigned int property)787 static int parse_thread_groups(struct device_node *dn,
788 			       struct thread_groups *tg,
789 			       unsigned int property)
790 {
791 	int i;
792 	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
793 	u32 *thread_list;
794 	size_t total_threads;
795 	int ret;
796 
797 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
798 					 thread_group_array, 3);
799 	if (ret)
800 		return ret;
801 
802 	tg->property = thread_group_array[0];
803 	tg->nr_groups = thread_group_array[1];
804 	tg->threads_per_group = thread_group_array[2];
805 	if (tg->property != property ||
806 	    tg->nr_groups < 1 ||
807 	    tg->threads_per_group < 1)
808 		return -ENODATA;
809 
810 	total_threads = tg->nr_groups * tg->threads_per_group;
811 
812 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
813 					 thread_group_array,
814 					 3 + total_threads);
815 	if (ret)
816 		return ret;
817 
818 	thread_list = &thread_group_array[3];
819 
820 	for (i = 0 ; i < total_threads; i++)
821 		tg->thread_list[i] = thread_list[i];
822 
823 	return 0;
824 }
825 
826 /*
827  * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
828  *                              that @cpu belongs to.
829  *
830  * @cpu : The logical CPU whose thread group is being searched.
831  * @tg : The thread-group structure of the CPU node which @cpu belongs
832  *       to.
833  *
834  * Returns the index to tg->thread_list that points to the the start
835  * of the thread_group that @cpu belongs to.
836  *
837  * Returns -1 if cpu doesn't belong to any of the groups pointed to by
838  * tg->thread_list.
839  */
get_cpu_thread_group_start(int cpu,struct thread_groups * tg)840 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
841 {
842 	int hw_cpu_id = get_hard_smp_processor_id(cpu);
843 	int i, j;
844 
845 	for (i = 0; i < tg->nr_groups; i++) {
846 		int group_start = i * tg->threads_per_group;
847 
848 		for (j = 0; j < tg->threads_per_group; j++) {
849 			int idx = group_start + j;
850 
851 			if (tg->thread_list[idx] == hw_cpu_id)
852 				return group_start;
853 		}
854 	}
855 
856 	return -1;
857 }
858 
init_cpu_l1_cache_map(int cpu)859 static int init_cpu_l1_cache_map(int cpu)
860 
861 {
862 	struct device_node *dn = of_get_cpu_node(cpu, NULL);
863 	struct thread_groups tg = {.property = 0,
864 				   .nr_groups = 0,
865 				   .threads_per_group = 0};
866 	int first_thread = cpu_first_thread_sibling(cpu);
867 	int i, cpu_group_start = -1, err = 0;
868 
869 	if (!dn)
870 		return -ENODATA;
871 
872 	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
873 	if (err)
874 		goto out;
875 
876 	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
877 
878 	if (unlikely(cpu_group_start == -1)) {
879 		WARN_ON_ONCE(1);
880 		err = -ENODATA;
881 		goto out;
882 	}
883 
884 	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
885 				GFP_KERNEL, cpu_to_node(cpu));
886 
887 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
888 		int i_group_start = get_cpu_thread_group_start(i, &tg);
889 
890 		if (unlikely(i_group_start == -1)) {
891 			WARN_ON_ONCE(1);
892 			err = -ENODATA;
893 			goto out;
894 		}
895 
896 		if (i_group_start == cpu_group_start)
897 			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
898 	}
899 
900 out:
901 	of_node_put(dn);
902 	return err;
903 }
904 
905 static bool shared_caches;
906 
907 #ifdef CONFIG_SCHED_SMT
908 /* cpumask of CPUs with asymmetric SMT dependency */
powerpc_smt_flags(void)909 static int powerpc_smt_flags(void)
910 {
911 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
912 
913 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
914 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
915 		flags |= SD_ASYM_PACKING;
916 	}
917 	return flags;
918 }
919 #endif
920 
921 /*
922  * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
923  * This topology makes it *much* cheaper to migrate tasks between adjacent cores
924  * since the migrated task remains cache hot. We want to take advantage of this
925  * at the scheduler level so an extra topology level is required.
926  */
powerpc_shared_cache_flags(void)927 static int powerpc_shared_cache_flags(void)
928 {
929 	return SD_SHARE_PKG_RESOURCES;
930 }
931 
932 /*
933  * We can't just pass cpu_l2_cache_mask() directly because
934  * returns a non-const pointer and the compiler barfs on that.
935  */
shared_cache_mask(int cpu)936 static const struct cpumask *shared_cache_mask(int cpu)
937 {
938 	return per_cpu(cpu_l2_cache_map, cpu);
939 }
940 
941 #ifdef CONFIG_SCHED_SMT
smallcore_smt_mask(int cpu)942 static const struct cpumask *smallcore_smt_mask(int cpu)
943 {
944 	return cpu_smallcore_mask(cpu);
945 }
946 #endif
947 
cpu_coregroup_mask(int cpu)948 static struct cpumask *cpu_coregroup_mask(int cpu)
949 {
950 	return per_cpu(cpu_coregroup_map, cpu);
951 }
952 
has_coregroup_support(void)953 static bool has_coregroup_support(void)
954 {
955 	return coregroup_enabled;
956 }
957 
cpu_mc_mask(int cpu)958 static const struct cpumask *cpu_mc_mask(int cpu)
959 {
960 	return cpu_coregroup_mask(cpu);
961 }
962 
963 static struct sched_domain_topology_level powerpc_topology[] = {
964 #ifdef CONFIG_SCHED_SMT
965 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
966 #endif
967 	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
968 	{ cpu_mc_mask, SD_INIT_NAME(MC) },
969 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
970 	{ NULL, },
971 };
972 
init_big_cores(void)973 static int __init init_big_cores(void)
974 {
975 	int cpu;
976 
977 	for_each_possible_cpu(cpu) {
978 		int err = init_cpu_l1_cache_map(cpu);
979 
980 		if (err)
981 			return err;
982 
983 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
984 					GFP_KERNEL,
985 					cpu_to_node(cpu));
986 	}
987 
988 	has_big_cores = true;
989 	return 0;
990 }
991 
smp_prepare_cpus(unsigned int max_cpus)992 void __init smp_prepare_cpus(unsigned int max_cpus)
993 {
994 	unsigned int cpu;
995 
996 	DBG("smp_prepare_cpus\n");
997 
998 	/*
999 	 * setup_cpu may need to be called on the boot cpu. We havent
1000 	 * spun any cpus up but lets be paranoid.
1001 	 */
1002 	BUG_ON(boot_cpuid != smp_processor_id());
1003 
1004 	/* Fixup boot cpu */
1005 	smp_store_cpu_info(boot_cpuid);
1006 	cpu_callin_map[boot_cpuid] = 1;
1007 
1008 	for_each_possible_cpu(cpu) {
1009 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1010 					GFP_KERNEL, cpu_to_node(cpu));
1011 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1012 					GFP_KERNEL, cpu_to_node(cpu));
1013 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1014 					GFP_KERNEL, cpu_to_node(cpu));
1015 		if (has_coregroup_support())
1016 			zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1017 						GFP_KERNEL, cpu_to_node(cpu));
1018 
1019 #ifdef CONFIG_NEED_MULTIPLE_NODES
1020 		/*
1021 		 * numa_node_id() works after this.
1022 		 */
1023 		if (cpu_present(cpu)) {
1024 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1025 			set_cpu_numa_mem(cpu,
1026 				local_memory_node(numa_cpu_lookup_table[cpu]));
1027 		}
1028 #endif
1029 	}
1030 
1031 	/* Init the cpumasks so the boot CPU is related to itself */
1032 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1033 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1034 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1035 
1036 	if (has_coregroup_support())
1037 		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1038 
1039 	init_big_cores();
1040 	if (has_big_cores) {
1041 		cpumask_set_cpu(boot_cpuid,
1042 				cpu_smallcore_mask(boot_cpuid));
1043 	}
1044 
1045 	if (smp_ops && smp_ops->probe)
1046 		smp_ops->probe();
1047 }
1048 
smp_prepare_boot_cpu(void)1049 void smp_prepare_boot_cpu(void)
1050 {
1051 	BUG_ON(smp_processor_id() != boot_cpuid);
1052 #ifdef CONFIG_PPC64
1053 	paca_ptrs[boot_cpuid]->__current = current;
1054 #endif
1055 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1056 	current_set[boot_cpuid] = current;
1057 }
1058 
1059 #ifdef CONFIG_HOTPLUG_CPU
1060 
generic_cpu_disable(void)1061 int generic_cpu_disable(void)
1062 {
1063 	unsigned int cpu = smp_processor_id();
1064 
1065 	if (cpu == boot_cpuid)
1066 		return -EBUSY;
1067 
1068 	set_cpu_online(cpu, false);
1069 #ifdef CONFIG_PPC64
1070 	vdso_data->processorCount--;
1071 #endif
1072 	/* Update affinity of all IRQs previously aimed at this CPU */
1073 	irq_migrate_all_off_this_cpu();
1074 
1075 	/*
1076 	 * Depending on the details of the interrupt controller, it's possible
1077 	 * that one of the interrupts we just migrated away from this CPU is
1078 	 * actually already pending on this CPU. If we leave it in that state
1079 	 * the interrupt will never be EOI'ed, and will never fire again. So
1080 	 * temporarily enable interrupts here, to allow any pending interrupt to
1081 	 * be received (and EOI'ed), before we take this CPU offline.
1082 	 */
1083 	local_irq_enable();
1084 	mdelay(1);
1085 	local_irq_disable();
1086 
1087 	return 0;
1088 }
1089 
generic_cpu_die(unsigned int cpu)1090 void generic_cpu_die(unsigned int cpu)
1091 {
1092 	int i;
1093 
1094 	for (i = 0; i < 100; i++) {
1095 		smp_rmb();
1096 		if (is_cpu_dead(cpu))
1097 			return;
1098 		msleep(100);
1099 	}
1100 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1101 }
1102 
generic_set_cpu_dead(unsigned int cpu)1103 void generic_set_cpu_dead(unsigned int cpu)
1104 {
1105 	per_cpu(cpu_state, cpu) = CPU_DEAD;
1106 }
1107 
1108 /*
1109  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1110  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1111  * which makes the delay in generic_cpu_die() not happen.
1112  */
generic_set_cpu_up(unsigned int cpu)1113 void generic_set_cpu_up(unsigned int cpu)
1114 {
1115 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1116 }
1117 
generic_check_cpu_restart(unsigned int cpu)1118 int generic_check_cpu_restart(unsigned int cpu)
1119 {
1120 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1121 }
1122 
is_cpu_dead(unsigned int cpu)1123 int is_cpu_dead(unsigned int cpu)
1124 {
1125 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
1126 }
1127 
secondaries_inhibited(void)1128 static bool secondaries_inhibited(void)
1129 {
1130 	return kvm_hv_mode_active();
1131 }
1132 
1133 #else /* HOTPLUG_CPU */
1134 
1135 #define secondaries_inhibited()		0
1136 
1137 #endif
1138 
cpu_idle_thread_init(unsigned int cpu,struct task_struct * idle)1139 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1140 {
1141 #ifdef CONFIG_PPC64
1142 	paca_ptrs[cpu]->__current = idle;
1143 	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1144 				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1145 #endif
1146 	idle->cpu = cpu;
1147 	secondary_current = current_set[cpu] = idle;
1148 }
1149 
__cpu_up(unsigned int cpu,struct task_struct * tidle)1150 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1151 {
1152 	int rc, c;
1153 
1154 	/*
1155 	 * Don't allow secondary threads to come online if inhibited
1156 	 */
1157 	if (threads_per_core > 1 && secondaries_inhibited() &&
1158 	    cpu_thread_in_subcore(cpu))
1159 		return -EBUSY;
1160 
1161 	if (smp_ops == NULL ||
1162 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1163 		return -EINVAL;
1164 
1165 	cpu_idle_thread_init(cpu, tidle);
1166 
1167 	/*
1168 	 * The platform might need to allocate resources prior to bringing
1169 	 * up the CPU
1170 	 */
1171 	if (smp_ops->prepare_cpu) {
1172 		rc = smp_ops->prepare_cpu(cpu);
1173 		if (rc)
1174 			return rc;
1175 	}
1176 
1177 	/* Make sure callin-map entry is 0 (can be leftover a CPU
1178 	 * hotplug
1179 	 */
1180 	cpu_callin_map[cpu] = 0;
1181 
1182 	/* The information for processor bringup must
1183 	 * be written out to main store before we release
1184 	 * the processor.
1185 	 */
1186 	smp_mb();
1187 
1188 	/* wake up cpus */
1189 	DBG("smp: kicking cpu %d\n", cpu);
1190 	rc = smp_ops->kick_cpu(cpu);
1191 	if (rc) {
1192 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1193 		return rc;
1194 	}
1195 
1196 	/*
1197 	 * wait to see if the cpu made a callin (is actually up).
1198 	 * use this value that I found through experimentation.
1199 	 * -- Cort
1200 	 */
1201 	if (system_state < SYSTEM_RUNNING)
1202 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1203 			udelay(100);
1204 #ifdef CONFIG_HOTPLUG_CPU
1205 	else
1206 		/*
1207 		 * CPUs can take much longer to come up in the
1208 		 * hotplug case.  Wait five seconds.
1209 		 */
1210 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1211 			msleep(1);
1212 #endif
1213 
1214 	if (!cpu_callin_map[cpu]) {
1215 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1216 		return -ENOENT;
1217 	}
1218 
1219 	DBG("Processor %u found.\n", cpu);
1220 
1221 	if (smp_ops->give_timebase)
1222 		smp_ops->give_timebase();
1223 
1224 	/* Wait until cpu puts itself in the online & active maps */
1225 	spin_until_cond(cpu_online(cpu));
1226 
1227 	return 0;
1228 }
1229 
1230 /* Return the value of the reg property corresponding to the given
1231  * logical cpu.
1232  */
cpu_to_core_id(int cpu)1233 int cpu_to_core_id(int cpu)
1234 {
1235 	struct device_node *np;
1236 	const __be32 *reg;
1237 	int id = -1;
1238 
1239 	np = of_get_cpu_node(cpu, NULL);
1240 	if (!np)
1241 		goto out;
1242 
1243 	reg = of_get_property(np, "reg", NULL);
1244 	if (!reg)
1245 		goto out;
1246 
1247 	id = be32_to_cpup(reg);
1248 out:
1249 	of_node_put(np);
1250 	return id;
1251 }
1252 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1253 
1254 /* Helper routines for cpu to core mapping */
cpu_core_index_of_thread(int cpu)1255 int cpu_core_index_of_thread(int cpu)
1256 {
1257 	return cpu >> threads_shift;
1258 }
1259 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1260 
cpu_first_thread_of_core(int core)1261 int cpu_first_thread_of_core(int core)
1262 {
1263 	return core << threads_shift;
1264 }
1265 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1266 
1267 /* Must be called when no change can occur to cpu_present_mask,
1268  * i.e. during cpu online or offline.
1269  */
cpu_to_l2cache(int cpu)1270 static struct device_node *cpu_to_l2cache(int cpu)
1271 {
1272 	struct device_node *np;
1273 	struct device_node *cache;
1274 
1275 	if (!cpu_present(cpu))
1276 		return NULL;
1277 
1278 	np = of_get_cpu_node(cpu, NULL);
1279 	if (np == NULL)
1280 		return NULL;
1281 
1282 	cache = of_find_next_cache_node(np);
1283 
1284 	of_node_put(np);
1285 
1286 	return cache;
1287 }
1288 
update_mask_by_l2(int cpu,cpumask_var_t * mask)1289 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1290 {
1291 	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1292 	struct device_node *l2_cache, *np;
1293 	int i;
1294 
1295 	if (has_big_cores)
1296 		submask_fn = cpu_smallcore_mask;
1297 
1298 	l2_cache = cpu_to_l2cache(cpu);
1299 	if (!l2_cache || !*mask) {
1300 		/* Assume only core siblings share cache with this CPU */
1301 		for_each_cpu(i, submask_fn(cpu))
1302 			set_cpus_related(cpu, i, cpu_l2_cache_mask);
1303 
1304 		return false;
1305 	}
1306 
1307 	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1308 
1309 	/* Update l2-cache mask with all the CPUs that are part of submask */
1310 	or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1311 
1312 	/* Skip all CPUs already part of current CPU l2-cache mask */
1313 	cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1314 
1315 	for_each_cpu(i, *mask) {
1316 		/*
1317 		 * when updating the marks the current CPU has not been marked
1318 		 * online, but we need to update the cache masks
1319 		 */
1320 		np = cpu_to_l2cache(i);
1321 
1322 		/* Skip all CPUs already part of current CPU l2-cache */
1323 		if (np == l2_cache) {
1324 			or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1325 			cpumask_andnot(*mask, *mask, submask_fn(i));
1326 		} else {
1327 			cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1328 		}
1329 
1330 		of_node_put(np);
1331 	}
1332 	of_node_put(l2_cache);
1333 
1334 	return true;
1335 }
1336 
1337 #ifdef CONFIG_HOTPLUG_CPU
remove_cpu_from_masks(int cpu)1338 static void remove_cpu_from_masks(int cpu)
1339 {
1340 	struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1341 	int i;
1342 
1343 	unmap_cpu_from_node(cpu);
1344 
1345 	if (shared_caches)
1346 		mask_fn = cpu_l2_cache_mask;
1347 
1348 	for_each_cpu(i, mask_fn(cpu)) {
1349 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1350 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1351 		if (has_big_cores)
1352 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1353 	}
1354 
1355 	for_each_cpu(i, cpu_core_mask(cpu))
1356 		set_cpus_unrelated(cpu, i, cpu_core_mask);
1357 
1358 	if (has_coregroup_support()) {
1359 		for_each_cpu(i, cpu_coregroup_mask(cpu))
1360 			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1361 	}
1362 }
1363 #endif
1364 
add_cpu_to_smallcore_masks(int cpu)1365 static inline void add_cpu_to_smallcore_masks(int cpu)
1366 {
1367 	int i;
1368 
1369 	if (!has_big_cores)
1370 		return;
1371 
1372 	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1373 
1374 	for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) {
1375 		if (cpu_online(i))
1376 			set_cpus_related(i, cpu, cpu_smallcore_mask);
1377 	}
1378 }
1379 
update_coregroup_mask(int cpu,cpumask_var_t * mask)1380 static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1381 {
1382 	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1383 	int coregroup_id = cpu_to_coregroup_id(cpu);
1384 	int i;
1385 
1386 	if (shared_caches)
1387 		submask_fn = cpu_l2_cache_mask;
1388 
1389 	if (!*mask) {
1390 		/* Assume only siblings are part of this CPU's coregroup */
1391 		for_each_cpu(i, submask_fn(cpu))
1392 			set_cpus_related(cpu, i, cpu_coregroup_mask);
1393 
1394 		return;
1395 	}
1396 
1397 	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1398 
1399 	/* Update coregroup mask with all the CPUs that are part of submask */
1400 	or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1401 
1402 	/* Skip all CPUs already part of coregroup mask */
1403 	cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1404 
1405 	for_each_cpu(i, *mask) {
1406 		/* Skip all CPUs not part of this coregroup */
1407 		if (coregroup_id == cpu_to_coregroup_id(i)) {
1408 			or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1409 			cpumask_andnot(*mask, *mask, submask_fn(i));
1410 		} else {
1411 			cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1412 		}
1413 	}
1414 }
1415 
add_cpu_to_masks(int cpu)1416 static void add_cpu_to_masks(int cpu)
1417 {
1418 	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1419 	int first_thread = cpu_first_thread_sibling(cpu);
1420 	int chip_id = cpu_to_chip_id(cpu);
1421 	cpumask_var_t mask;
1422 	bool ret;
1423 	int i;
1424 
1425 	/*
1426 	 * This CPU will not be in the online mask yet so we need to manually
1427 	 * add it to it's own thread sibling mask.
1428 	 */
1429 	map_cpu_to_node(cpu, cpu_to_node(cpu));
1430 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1431 	cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1432 
1433 	for (i = first_thread; i < first_thread + threads_per_core; i++)
1434 		if (cpu_online(i))
1435 			set_cpus_related(i, cpu, cpu_sibling_mask);
1436 
1437 	add_cpu_to_smallcore_masks(cpu);
1438 
1439 	/* In CPU-hotplug path, hence use GFP_ATOMIC */
1440 	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1441 	update_mask_by_l2(cpu, &mask);
1442 
1443 	if (has_coregroup_support())
1444 		update_coregroup_mask(cpu, &mask);
1445 
1446 	if (shared_caches)
1447 		submask_fn = cpu_l2_cache_mask;
1448 
1449 	/* Update core_mask with all the CPUs that are part of submask */
1450 	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1451 
1452 	/* Skip all CPUs already part of current CPU core mask */
1453 	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1454 
1455 	/* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1456 	if (chip_id == -1)
1457 		cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1458 
1459 	for_each_cpu(i, mask) {
1460 		if (chip_id == cpu_to_chip_id(i)) {
1461 			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1462 			cpumask_andnot(mask, mask, submask_fn(i));
1463 		} else {
1464 			cpumask_andnot(mask, mask, cpu_core_mask(i));
1465 		}
1466 	}
1467 
1468 	free_cpumask_var(mask);
1469 }
1470 
1471 /* Activate a secondary processor. */
start_secondary(void * unused)1472 void start_secondary(void *unused)
1473 {
1474 	unsigned int cpu = raw_smp_processor_id();
1475 
1476 	mmgrab(&init_mm);
1477 	current->active_mm = &init_mm;
1478 
1479 	smp_store_cpu_info(cpu);
1480 	set_dec(tb_ticks_per_jiffy);
1481 	rcu_cpu_starting(cpu);
1482 	cpu_callin_map[cpu] = 1;
1483 
1484 	if (smp_ops->setup_cpu)
1485 		smp_ops->setup_cpu(cpu);
1486 	if (smp_ops->take_timebase)
1487 		smp_ops->take_timebase();
1488 
1489 	secondary_cpu_time_init();
1490 
1491 #ifdef CONFIG_PPC64
1492 	if (system_state == SYSTEM_RUNNING)
1493 		vdso_data->processorCount++;
1494 
1495 	vdso_getcpu_init();
1496 #endif
1497 	set_numa_node(numa_cpu_lookup_table[cpu]);
1498 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1499 
1500 	/* Update topology CPU masks */
1501 	add_cpu_to_masks(cpu);
1502 
1503 	/*
1504 	 * Check for any shared caches. Note that this must be done on a
1505 	 * per-core basis because one core in the pair might be disabled.
1506 	 */
1507 	if (!shared_caches) {
1508 		struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1509 		struct cpumask *mask = cpu_l2_cache_mask(cpu);
1510 
1511 		if (has_big_cores)
1512 			sibling_mask = cpu_smallcore_mask;
1513 
1514 		if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1515 			shared_caches = true;
1516 	}
1517 
1518 	smp_wmb();
1519 	notify_cpu_starting(cpu);
1520 	set_cpu_online(cpu, true);
1521 
1522 	boot_init_stack_canary();
1523 
1524 	local_irq_enable();
1525 
1526 	/* We can enable ftrace for secondary cpus now */
1527 	this_cpu_enable_ftrace();
1528 
1529 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1530 
1531 	BUG();
1532 }
1533 
1534 #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)1535 int setup_profiling_timer(unsigned int multiplier)
1536 {
1537 	return 0;
1538 }
1539 #endif
1540 
fixup_topology(void)1541 static void fixup_topology(void)
1542 {
1543 	int i;
1544 
1545 #ifdef CONFIG_SCHED_SMT
1546 	if (has_big_cores) {
1547 		pr_info("Big cores detected but using small core scheduling\n");
1548 		powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1549 	}
1550 #endif
1551 
1552 	if (!has_coregroup_support())
1553 		powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1554 
1555 	/*
1556 	 * Try to consolidate topology levels here instead of
1557 	 * allowing scheduler to degenerate.
1558 	 * - Dont consolidate if masks are different.
1559 	 * - Dont consolidate if sd_flags exists and are different.
1560 	 */
1561 	for (i = 1; i <= die_idx; i++) {
1562 		if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1563 			continue;
1564 
1565 		if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1566 				powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1567 			continue;
1568 
1569 		if (!powerpc_topology[i - 1].sd_flags)
1570 			powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1571 
1572 		powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1573 		powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1574 #ifdef CONFIG_SCHED_DEBUG
1575 		powerpc_topology[i].name = powerpc_topology[i + 1].name;
1576 #endif
1577 	}
1578 }
1579 
smp_cpus_done(unsigned int max_cpus)1580 void __init smp_cpus_done(unsigned int max_cpus)
1581 {
1582 	/*
1583 	 * We are running pinned to the boot CPU, see rest_init().
1584 	 */
1585 	if (smp_ops && smp_ops->setup_cpu)
1586 		smp_ops->setup_cpu(boot_cpuid);
1587 
1588 	if (smp_ops && smp_ops->bringup_done)
1589 		smp_ops->bringup_done();
1590 
1591 	dump_numa_cpu_topology();
1592 
1593 	fixup_topology();
1594 	set_sched_topology(powerpc_topology);
1595 }
1596 
1597 #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)1598 int __cpu_disable(void)
1599 {
1600 	int cpu = smp_processor_id();
1601 	int err;
1602 
1603 	if (!smp_ops->cpu_disable)
1604 		return -ENOSYS;
1605 
1606 	this_cpu_disable_ftrace();
1607 
1608 	err = smp_ops->cpu_disable();
1609 	if (err)
1610 		return err;
1611 
1612 	/* Update sibling maps */
1613 	remove_cpu_from_masks(cpu);
1614 
1615 	return 0;
1616 }
1617 
__cpu_die(unsigned int cpu)1618 void __cpu_die(unsigned int cpu)
1619 {
1620 	if (smp_ops->cpu_die)
1621 		smp_ops->cpu_die(cpu);
1622 }
1623 
arch_cpu_idle_dead(void)1624 void arch_cpu_idle_dead(void)
1625 {
1626 	/*
1627 	 * Disable on the down path. This will be re-enabled by
1628 	 * start_secondary() via start_secondary_resume() below
1629 	 */
1630 	this_cpu_disable_ftrace();
1631 
1632 	if (smp_ops->cpu_offline_self)
1633 		smp_ops->cpu_offline_self();
1634 
1635 	/* If we return, we re-enter start_secondary */
1636 	start_secondary_resume();
1637 }
1638 
1639 #endif
1640