• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SMP support for ppc.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6  * deal of code from the sparc and intel versions.
7  *
8  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9  *
10  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12  */
13 
14 #undef DEBUG
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 
37 #include <asm/ptrace.h>
38 #include <linux/atomic.h>
39 #include <asm/irq.h>
40 #include <asm/hw_irq.h>
41 #include <asm/kvm_ppc.h>
42 #include <asm/dbell.h>
43 #include <asm/page.h>
44 #include <asm/pgtable.h>
45 #include <asm/prom.h>
46 #include <asm/smp.h>
47 #include <asm/time.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
51 #include <asm/mpic.h>
52 #include <asm/vdso_datapage.h>
53 #ifdef CONFIG_PPC64
54 #include <asm/paca.h>
55 #endif
56 #include <asm/vdso.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/asm-prototypes.h>
60 #include <asm/cpu_has_feature.h>
61 #include <asm/ftrace.h>
62 
63 #ifdef DEBUG
64 #include <asm/udbg.h>
65 #define DBG(fmt...) udbg_printf(fmt)
66 #else
67 #define DBG(fmt...)
68 #endif
69 
70 #ifdef CONFIG_HOTPLUG_CPU
71 /* State of each CPU during hotplug phases */
72 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
73 #endif
74 
75 struct task_struct *secondary_current;
76 bool has_big_cores;
77 
78 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
79 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
80 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
81 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
82 
83 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
84 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
85 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
86 EXPORT_SYMBOL_GPL(has_big_cores);
87 
88 #define MAX_THREAD_LIST_SIZE	8
89 #define THREAD_GROUP_SHARE_L1   1
90 struct thread_groups {
91 	unsigned int property;
92 	unsigned int nr_groups;
93 	unsigned int threads_per_group;
94 	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
95 };
96 
97 /*
98  * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
99  * the set its siblings that share the L1-cache.
100  */
101 DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
102 
103 /* SMP operations for this machine */
104 struct smp_ops_t *smp_ops;
105 
106 /* Can't be static due to PowerMac hackery */
107 volatile unsigned int cpu_callin_map[NR_CPUS];
108 
109 int smt_enabled_at_boot = 1;
110 
111 /*
112  * Returns 1 if the specified cpu should be brought up during boot.
113  * Used to inhibit booting threads if they've been disabled or
114  * limited on the command line
115  */
smp_generic_cpu_bootable(unsigned int nr)116 int smp_generic_cpu_bootable(unsigned int nr)
117 {
118 	/* Special case - we inhibit secondary thread startup
119 	 * during boot if the user requests it.
120 	 */
121 	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
122 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
123 			return 0;
124 		if (smt_enabled_at_boot
125 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
126 			return 0;
127 	}
128 
129 	return 1;
130 }
131 
132 
133 #ifdef CONFIG_PPC64
smp_generic_kick_cpu(int nr)134 int smp_generic_kick_cpu(int nr)
135 {
136 	if (nr < 0 || nr >= nr_cpu_ids)
137 		return -EINVAL;
138 
139 	/*
140 	 * The processor is currently spinning, waiting for the
141 	 * cpu_start field to become non-zero After we set cpu_start,
142 	 * the processor will continue on to secondary_start
143 	 */
144 	if (!paca_ptrs[nr]->cpu_start) {
145 		paca_ptrs[nr]->cpu_start = 1;
146 		smp_mb();
147 		return 0;
148 	}
149 
150 #ifdef CONFIG_HOTPLUG_CPU
151 	/*
152 	 * Ok it's not there, so it might be soft-unplugged, let's
153 	 * try to bring it back
154 	 */
155 	generic_set_cpu_up(nr);
156 	smp_wmb();
157 	smp_send_reschedule(nr);
158 #endif /* CONFIG_HOTPLUG_CPU */
159 
160 	return 0;
161 }
162 #endif /* CONFIG_PPC64 */
163 
call_function_action(int irq,void * data)164 static irqreturn_t call_function_action(int irq, void *data)
165 {
166 	generic_smp_call_function_interrupt();
167 	return IRQ_HANDLED;
168 }
169 
reschedule_action(int irq,void * data)170 static irqreturn_t reschedule_action(int irq, void *data)
171 {
172 	scheduler_ipi();
173 	return IRQ_HANDLED;
174 }
175 
176 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast_ipi_action(int irq,void * data)177 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
178 {
179 	timer_broadcast_interrupt();
180 	return IRQ_HANDLED;
181 }
182 #endif
183 
184 #ifdef CONFIG_NMI_IPI
nmi_ipi_action(int irq,void * data)185 static irqreturn_t nmi_ipi_action(int irq, void *data)
186 {
187 	smp_handle_nmi_ipi(get_irq_regs());
188 	return IRQ_HANDLED;
189 }
190 #endif
191 
192 static irq_handler_t smp_ipi_action[] = {
193 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
194 	[PPC_MSG_RESCHEDULE] = reschedule_action,
195 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
196 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
197 #endif
198 #ifdef CONFIG_NMI_IPI
199 	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
200 #endif
201 };
202 
203 /*
204  * The NMI IPI is a fallback and not truly non-maskable. It is simpler
205  * than going through the call function infrastructure, and strongly
206  * serialized, so it is more appropriate for debugging.
207  */
208 const char *smp_ipi_name[] = {
209 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
210 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
211 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
212 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
213 #endif
214 #ifdef CONFIG_NMI_IPI
215 	[PPC_MSG_NMI_IPI] = "nmi ipi",
216 #endif
217 };
218 
219 /* optional function to request ipi, for controllers with >= 4 ipis */
smp_request_message_ipi(int virq,int msg)220 int smp_request_message_ipi(int virq, int msg)
221 {
222 	int err;
223 
224 	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
225 		return -EINVAL;
226 #ifndef CONFIG_NMI_IPI
227 	if (msg == PPC_MSG_NMI_IPI)
228 		return 1;
229 #endif
230 
231 	err = request_irq(virq, smp_ipi_action[msg],
232 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
233 			  smp_ipi_name[msg], NULL);
234 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
235 		virq, smp_ipi_name[msg], err);
236 
237 	return err;
238 }
239 
240 #ifdef CONFIG_PPC_SMP_MUXED_IPI
241 struct cpu_messages {
242 	long messages;			/* current messages */
243 };
244 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
245 
smp_muxed_ipi_set_message(int cpu,int msg)246 void smp_muxed_ipi_set_message(int cpu, int msg)
247 {
248 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
249 	char *message = (char *)&info->messages;
250 
251 	/*
252 	 * Order previous accesses before accesses in the IPI handler.
253 	 */
254 	smp_mb();
255 	message[msg] = 1;
256 }
257 
smp_muxed_ipi_message_pass(int cpu,int msg)258 void smp_muxed_ipi_message_pass(int cpu, int msg)
259 {
260 	smp_muxed_ipi_set_message(cpu, msg);
261 
262 	/*
263 	 * cause_ipi functions are required to include a full barrier
264 	 * before doing whatever causes the IPI.
265 	 */
266 	smp_ops->cause_ipi(cpu);
267 }
268 
269 #ifdef __BIG_ENDIAN__
270 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
271 #else
272 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
273 #endif
274 
smp_ipi_demux(void)275 irqreturn_t smp_ipi_demux(void)
276 {
277 	mb();	/* order any irq clear */
278 
279 	return smp_ipi_demux_relaxed();
280 }
281 
282 /* sync-free variant. Callers should ensure synchronization */
smp_ipi_demux_relaxed(void)283 irqreturn_t smp_ipi_demux_relaxed(void)
284 {
285 	struct cpu_messages *info;
286 	unsigned long all;
287 
288 	info = this_cpu_ptr(&ipi_message);
289 	do {
290 		all = xchg(&info->messages, 0);
291 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
292 		/*
293 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
294 		 * before PPC_MSG_CALL_FUNCTION messages because when
295 		 * a VM is destroyed, we call kick_all_cpus_sync()
296 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
297 		 * messages have completed before we free any VCPUs.
298 		 */
299 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
300 			kvmppc_xics_ipi_action();
301 #endif
302 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
303 			generic_smp_call_function_interrupt();
304 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
305 			scheduler_ipi();
306 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
307 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
308 			timer_broadcast_interrupt();
309 #endif
310 #ifdef CONFIG_NMI_IPI
311 		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
312 			nmi_ipi_action(0, NULL);
313 #endif
314 	} while (info->messages);
315 
316 	return IRQ_HANDLED;
317 }
318 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
319 
do_message_pass(int cpu,int msg)320 static inline void do_message_pass(int cpu, int msg)
321 {
322 	if (smp_ops->message_pass)
323 		smp_ops->message_pass(cpu, msg);
324 #ifdef CONFIG_PPC_SMP_MUXED_IPI
325 	else
326 		smp_muxed_ipi_message_pass(cpu, msg);
327 #endif
328 }
329 
smp_send_reschedule(int cpu)330 void smp_send_reschedule(int cpu)
331 {
332 	if (likely(smp_ops))
333 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
334 }
335 EXPORT_SYMBOL_GPL(smp_send_reschedule);
336 
arch_send_call_function_single_ipi(int cpu)337 void arch_send_call_function_single_ipi(int cpu)
338 {
339 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
340 }
341 
arch_send_call_function_ipi_mask(const struct cpumask * mask)342 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
343 {
344 	unsigned int cpu;
345 
346 	for_each_cpu(cpu, mask)
347 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
348 }
349 
350 #ifdef CONFIG_NMI_IPI
351 
352 /*
353  * "NMI IPI" system.
354  *
355  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
356  * a running system. They can be used for crash, debug, halt/reboot, etc.
357  *
358  * The IPI call waits with interrupts disabled until all targets enter the
359  * NMI handler, then returns. Subsequent IPIs can be issued before targets
360  * have returned from their handlers, so there is no guarantee about
361  * concurrency or re-entrancy.
362  *
363  * A new NMI can be issued before all targets exit the handler.
364  *
365  * The IPI call may time out without all targets entering the NMI handler.
366  * In that case, there is some logic to recover (and ignore subsequent
367  * NMI interrupts that may eventually be raised), but the platform interrupt
368  * handler may not be able to distinguish this from other exception causes,
369  * which may cause a crash.
370  */
371 
372 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
373 static struct cpumask nmi_ipi_pending_mask;
374 static bool nmi_ipi_busy = false;
375 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
376 
nmi_ipi_lock_start(unsigned long * flags)377 static void nmi_ipi_lock_start(unsigned long *flags)
378 {
379 	raw_local_irq_save(*flags);
380 	hard_irq_disable();
381 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
382 		raw_local_irq_restore(*flags);
383 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
384 		raw_local_irq_save(*flags);
385 		hard_irq_disable();
386 	}
387 }
388 
nmi_ipi_lock(void)389 static void nmi_ipi_lock(void)
390 {
391 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
392 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
393 }
394 
nmi_ipi_unlock(void)395 static void nmi_ipi_unlock(void)
396 {
397 	smp_mb();
398 	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
399 	atomic_set(&__nmi_ipi_lock, 0);
400 }
401 
nmi_ipi_unlock_end(unsigned long * flags)402 static void nmi_ipi_unlock_end(unsigned long *flags)
403 {
404 	nmi_ipi_unlock();
405 	raw_local_irq_restore(*flags);
406 }
407 
408 /*
409  * Platform NMI handler calls this to ack
410  */
smp_handle_nmi_ipi(struct pt_regs * regs)411 int smp_handle_nmi_ipi(struct pt_regs *regs)
412 {
413 	void (*fn)(struct pt_regs *) = NULL;
414 	unsigned long flags;
415 	int me = raw_smp_processor_id();
416 	int ret = 0;
417 
418 	/*
419 	 * Unexpected NMIs are possible here because the interrupt may not
420 	 * be able to distinguish NMI IPIs from other types of NMIs, or
421 	 * because the caller may have timed out.
422 	 */
423 	nmi_ipi_lock_start(&flags);
424 	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
425 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
426 		fn = READ_ONCE(nmi_ipi_function);
427 		WARN_ON_ONCE(!fn);
428 		ret = 1;
429 	}
430 	nmi_ipi_unlock_end(&flags);
431 
432 	if (fn)
433 		fn(regs);
434 
435 	return ret;
436 }
437 
do_smp_send_nmi_ipi(int cpu,bool safe)438 static void do_smp_send_nmi_ipi(int cpu, bool safe)
439 {
440 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
441 		return;
442 
443 	if (cpu >= 0) {
444 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
445 	} else {
446 		int c;
447 
448 		for_each_online_cpu(c) {
449 			if (c == raw_smp_processor_id())
450 				continue;
451 			do_message_pass(c, PPC_MSG_NMI_IPI);
452 		}
453 	}
454 }
455 
456 /*
457  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
458  * - fn is the target callback function.
459  * - delay_us > 0 is the delay before giving up waiting for targets to
460  *   begin executing the handler, == 0 specifies indefinite delay.
461  */
__smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us,bool safe)462 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
463 				u64 delay_us, bool safe)
464 {
465 	unsigned long flags;
466 	int me = raw_smp_processor_id();
467 	int ret = 1;
468 
469 	BUG_ON(cpu == me);
470 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
471 
472 	if (unlikely(!smp_ops))
473 		return 0;
474 
475 	nmi_ipi_lock_start(&flags);
476 	while (nmi_ipi_busy) {
477 		nmi_ipi_unlock_end(&flags);
478 		spin_until_cond(!nmi_ipi_busy);
479 		nmi_ipi_lock_start(&flags);
480 	}
481 	nmi_ipi_busy = true;
482 	nmi_ipi_function = fn;
483 
484 	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
485 
486 	if (cpu < 0) {
487 		/* ALL_OTHERS */
488 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
489 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
490 	} else {
491 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
492 	}
493 
494 	nmi_ipi_unlock();
495 
496 	/* Interrupts remain hard disabled */
497 
498 	do_smp_send_nmi_ipi(cpu, safe);
499 
500 	nmi_ipi_lock();
501 	/* nmi_ipi_busy is set here, so unlock/lock is okay */
502 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
503 		nmi_ipi_unlock();
504 		udelay(1);
505 		nmi_ipi_lock();
506 		if (delay_us) {
507 			delay_us--;
508 			if (!delay_us)
509 				break;
510 		}
511 	}
512 
513 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
514 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
515 		ret = 0;
516 		cpumask_clear(&nmi_ipi_pending_mask);
517 	}
518 
519 	nmi_ipi_function = NULL;
520 	nmi_ipi_busy = false;
521 
522 	nmi_ipi_unlock_end(&flags);
523 
524 	return ret;
525 }
526 
smp_send_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)527 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
528 {
529 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
530 }
531 
smp_send_safe_nmi_ipi(int cpu,void (* fn)(struct pt_regs *),u64 delay_us)532 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
533 {
534 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
535 }
536 #endif /* CONFIG_NMI_IPI */
537 
538 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)539 void tick_broadcast(const struct cpumask *mask)
540 {
541 	unsigned int cpu;
542 
543 	for_each_cpu(cpu, mask)
544 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
545 }
546 #endif
547 
548 #ifdef CONFIG_DEBUGGER
debugger_ipi_callback(struct pt_regs * regs)549 void debugger_ipi_callback(struct pt_regs *regs)
550 {
551 	debugger_ipi(regs);
552 }
553 
smp_send_debugger_break(void)554 void smp_send_debugger_break(void)
555 {
556 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
557 }
558 #endif
559 
560 #ifdef CONFIG_KEXEC_CORE
crash_send_ipi(void (* crash_ipi_callback)(struct pt_regs *))561 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
562 {
563 	int cpu;
564 
565 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
566 	if (kdump_in_progress() && crash_wake_offline) {
567 		for_each_present_cpu(cpu) {
568 			if (cpu_online(cpu))
569 				continue;
570 			/*
571 			 * crash_ipi_callback will wait for
572 			 * all cpus, including offline CPUs.
573 			 * We don't care about nmi_ipi_function.
574 			 * Offline cpus will jump straight into
575 			 * crash_ipi_callback, we can skip the
576 			 * entire NMI dance and waiting for
577 			 * cpus to clear pending mask, etc.
578 			 */
579 			do_smp_send_nmi_ipi(cpu, false);
580 		}
581 	}
582 }
583 #endif
584 
585 #ifdef CONFIG_NMI_IPI
crash_stop_this_cpu(struct pt_regs * regs)586 static void crash_stop_this_cpu(struct pt_regs *regs)
587 #else
588 static void crash_stop_this_cpu(void *dummy)
589 #endif
590 {
591 	/*
592 	 * Just busy wait here and avoid marking CPU as offline to ensure
593 	 * register data is captured appropriately.
594 	 */
595 	while (1)
596 		cpu_relax();
597 }
598 
crash_smp_send_stop(void)599 void crash_smp_send_stop(void)
600 {
601 	static bool stopped = false;
602 
603 	if (stopped)
604 		return;
605 
606 	stopped = true;
607 
608 #ifdef CONFIG_NMI_IPI
609 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
610 #else
611 	smp_call_function(crash_stop_this_cpu, NULL, 0);
612 #endif /* CONFIG_NMI_IPI */
613 }
614 
615 #ifdef CONFIG_NMI_IPI
nmi_stop_this_cpu(struct pt_regs * regs)616 static void nmi_stop_this_cpu(struct pt_regs *regs)
617 {
618 	/*
619 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
620 	 */
621 	set_cpu_online(smp_processor_id(), false);
622 
623 	spin_begin();
624 	while (1)
625 		spin_cpu_relax();
626 }
627 
smp_send_stop(void)628 void smp_send_stop(void)
629 {
630 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
631 }
632 
633 #else /* CONFIG_NMI_IPI */
634 
stop_this_cpu(void * dummy)635 static void stop_this_cpu(void *dummy)
636 {
637 	hard_irq_disable();
638 
639 	/*
640 	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
641 	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
642 	 * to know other CPUs are offline before it breaks locks to flush
643 	 * printk buffers, in case we panic()ed while holding the lock.
644 	 */
645 	set_cpu_online(smp_processor_id(), false);
646 
647 	spin_begin();
648 	while (1)
649 		spin_cpu_relax();
650 }
651 
smp_send_stop(void)652 void smp_send_stop(void)
653 {
654 	static bool stopped = false;
655 
656 	/*
657 	 * Prevent waiting on csd lock from a previous smp_send_stop.
658 	 * This is racy, but in general callers try to do the right
659 	 * thing and only fire off one smp_send_stop (e.g., see
660 	 * kernel/panic.c)
661 	 */
662 	if (stopped)
663 		return;
664 
665 	stopped = true;
666 
667 	smp_call_function(stop_this_cpu, NULL, 0);
668 }
669 #endif /* CONFIG_NMI_IPI */
670 
671 struct task_struct *current_set[NR_CPUS];
672 
smp_store_cpu_info(int id)673 static void smp_store_cpu_info(int id)
674 {
675 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
676 #ifdef CONFIG_PPC_FSL_BOOK3E
677 	per_cpu(next_tlbcam_idx, id)
678 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
679 #endif
680 }
681 
682 /*
683  * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
684  * rather than just passing around the cpumask we pass around a function that
685  * returns the that cpumask for the given CPU.
686  */
set_cpus_related(int i,int j,struct cpumask * (* get_cpumask)(int))687 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
688 {
689 	cpumask_set_cpu(i, get_cpumask(j));
690 	cpumask_set_cpu(j, get_cpumask(i));
691 }
692 
693 #ifdef CONFIG_HOTPLUG_CPU
set_cpus_unrelated(int i,int j,struct cpumask * (* get_cpumask)(int))694 static void set_cpus_unrelated(int i, int j,
695 		struct cpumask *(*get_cpumask)(int))
696 {
697 	cpumask_clear_cpu(i, get_cpumask(j));
698 	cpumask_clear_cpu(j, get_cpumask(i));
699 }
700 #endif
701 
702 /*
703  * parse_thread_groups: Parses the "ibm,thread-groups" device tree
704  *                      property for the CPU device node @dn and stores
705  *                      the parsed output in the thread_groups
706  *                      structure @tg if the ibm,thread-groups[0]
707  *                      matches @property.
708  *
709  * @dn: The device node of the CPU device.
710  * @tg: Pointer to a thread group structure into which the parsed
711  *      output of "ibm,thread-groups" is stored.
712  * @property: The property of the thread-group that the caller is
713  *            interested in.
714  *
715  * ibm,thread-groups[0..N-1] array defines which group of threads in
716  * the CPU-device node can be grouped together based on the property.
717  *
718  * ibm,thread-groups[0] tells us the property based on which the
719  * threads are being grouped together. If this value is 1, it implies
720  * that the threads in the same group share L1, translation cache.
721  *
722  * ibm,thread-groups[1] tells us how many such thread groups exist.
723  *
724  * ibm,thread-groups[2] tells us the number of threads in each such
725  * group.
726  *
727  * ibm,thread-groups[3..N-1] is the list of threads identified by
728  * "ibm,ppc-interrupt-server#s" arranged as per their membership in
729  * the grouping.
730  *
731  * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
732  * implies that there are 2 groups of 4 threads each, where each group
733  * of threads share L1, translation cache.
734  *
735  * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
736  * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
737  * 11, 12} structure
738  *
739  * Returns 0 on success, -EINVAL if the property does not exist,
740  * -ENODATA if property does not have a value, and -EOVERFLOW if the
741  * property data isn't large enough.
742  */
parse_thread_groups(struct device_node * dn,struct thread_groups * tg,unsigned int property)743 static int parse_thread_groups(struct device_node *dn,
744 			       struct thread_groups *tg,
745 			       unsigned int property)
746 {
747 	int i;
748 	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
749 	u32 *thread_list;
750 	size_t total_threads;
751 	int ret;
752 
753 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
754 					 thread_group_array, 3);
755 	if (ret)
756 		return ret;
757 
758 	tg->property = thread_group_array[0];
759 	tg->nr_groups = thread_group_array[1];
760 	tg->threads_per_group = thread_group_array[2];
761 	if (tg->property != property ||
762 	    tg->nr_groups < 1 ||
763 	    tg->threads_per_group < 1)
764 		return -ENODATA;
765 
766 	total_threads = tg->nr_groups * tg->threads_per_group;
767 
768 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
769 					 thread_group_array,
770 					 3 + total_threads);
771 	if (ret)
772 		return ret;
773 
774 	thread_list = &thread_group_array[3];
775 
776 	for (i = 0 ; i < total_threads; i++)
777 		tg->thread_list[i] = thread_list[i];
778 
779 	return 0;
780 }
781 
782 /*
783  * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
784  *                              that @cpu belongs to.
785  *
786  * @cpu : The logical CPU whose thread group is being searched.
787  * @tg : The thread-group structure of the CPU node which @cpu belongs
788  *       to.
789  *
790  * Returns the index to tg->thread_list that points to the the start
791  * of the thread_group that @cpu belongs to.
792  *
793  * Returns -1 if cpu doesn't belong to any of the groups pointed to by
794  * tg->thread_list.
795  */
get_cpu_thread_group_start(int cpu,struct thread_groups * tg)796 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
797 {
798 	int hw_cpu_id = get_hard_smp_processor_id(cpu);
799 	int i, j;
800 
801 	for (i = 0; i < tg->nr_groups; i++) {
802 		int group_start = i * tg->threads_per_group;
803 
804 		for (j = 0; j < tg->threads_per_group; j++) {
805 			int idx = group_start + j;
806 
807 			if (tg->thread_list[idx] == hw_cpu_id)
808 				return group_start;
809 		}
810 	}
811 
812 	return -1;
813 }
814 
init_cpu_l1_cache_map(int cpu)815 static int init_cpu_l1_cache_map(int cpu)
816 
817 {
818 	struct device_node *dn = of_get_cpu_node(cpu, NULL);
819 	struct thread_groups tg = {.property = 0,
820 				   .nr_groups = 0,
821 				   .threads_per_group = 0};
822 	int first_thread = cpu_first_thread_sibling(cpu);
823 	int i, cpu_group_start = -1, err = 0;
824 
825 	if (!dn)
826 		return -ENODATA;
827 
828 	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
829 	if (err)
830 		goto out;
831 
832 	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
833 				GFP_KERNEL,
834 				cpu_to_node(cpu));
835 
836 	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
837 
838 	if (unlikely(cpu_group_start == -1)) {
839 		WARN_ON_ONCE(1);
840 		err = -ENODATA;
841 		goto out;
842 	}
843 
844 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
845 		int i_group_start = get_cpu_thread_group_start(i, &tg);
846 
847 		if (unlikely(i_group_start == -1)) {
848 			WARN_ON_ONCE(1);
849 			err = -ENODATA;
850 			goto out;
851 		}
852 
853 		if (i_group_start == cpu_group_start)
854 			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
855 	}
856 
857 out:
858 	of_node_put(dn);
859 	return err;
860 }
861 
init_big_cores(void)862 static int init_big_cores(void)
863 {
864 	int cpu;
865 
866 	for_each_possible_cpu(cpu) {
867 		int err = init_cpu_l1_cache_map(cpu);
868 
869 		if (err)
870 			return err;
871 
872 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
873 					GFP_KERNEL,
874 					cpu_to_node(cpu));
875 	}
876 
877 	has_big_cores = true;
878 	return 0;
879 }
880 
smp_prepare_cpus(unsigned int max_cpus)881 void __init smp_prepare_cpus(unsigned int max_cpus)
882 {
883 	unsigned int cpu;
884 
885 	DBG("smp_prepare_cpus\n");
886 
887 	/*
888 	 * setup_cpu may need to be called on the boot cpu. We havent
889 	 * spun any cpus up but lets be paranoid.
890 	 */
891 	BUG_ON(boot_cpuid != smp_processor_id());
892 
893 	/* Fixup boot cpu */
894 	smp_store_cpu_info(boot_cpuid);
895 	cpu_callin_map[boot_cpuid] = 1;
896 
897 	for_each_possible_cpu(cpu) {
898 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
899 					GFP_KERNEL, cpu_to_node(cpu));
900 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
901 					GFP_KERNEL, cpu_to_node(cpu));
902 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
903 					GFP_KERNEL, cpu_to_node(cpu));
904 		/*
905 		 * numa_node_id() works after this.
906 		 */
907 		if (cpu_present(cpu)) {
908 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
909 			set_cpu_numa_mem(cpu,
910 				local_memory_node(numa_cpu_lookup_table[cpu]));
911 		}
912 	}
913 
914 	/* Init the cpumasks so the boot CPU is related to itself */
915 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
916 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
917 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
918 
919 	init_big_cores();
920 	if (has_big_cores) {
921 		cpumask_set_cpu(boot_cpuid,
922 				cpu_smallcore_mask(boot_cpuid));
923 	}
924 
925 	if (smp_ops && smp_ops->probe)
926 		smp_ops->probe();
927 }
928 
smp_prepare_boot_cpu(void)929 void smp_prepare_boot_cpu(void)
930 {
931 	BUG_ON(smp_processor_id() != boot_cpuid);
932 #ifdef CONFIG_PPC64
933 	paca_ptrs[boot_cpuid]->__current = current;
934 #endif
935 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
936 	current_set[boot_cpuid] = current;
937 }
938 
939 #ifdef CONFIG_HOTPLUG_CPU
940 
generic_cpu_disable(void)941 int generic_cpu_disable(void)
942 {
943 	unsigned int cpu = smp_processor_id();
944 
945 	if (cpu == boot_cpuid)
946 		return -EBUSY;
947 
948 	set_cpu_online(cpu, false);
949 #ifdef CONFIG_PPC64
950 	vdso_data->processorCount--;
951 #endif
952 	/* Update affinity of all IRQs previously aimed at this CPU */
953 	irq_migrate_all_off_this_cpu();
954 
955 	/*
956 	 * Depending on the details of the interrupt controller, it's possible
957 	 * that one of the interrupts we just migrated away from this CPU is
958 	 * actually already pending on this CPU. If we leave it in that state
959 	 * the interrupt will never be EOI'ed, and will never fire again. So
960 	 * temporarily enable interrupts here, to allow any pending interrupt to
961 	 * be received (and EOI'ed), before we take this CPU offline.
962 	 */
963 	local_irq_enable();
964 	mdelay(1);
965 	local_irq_disable();
966 
967 	return 0;
968 }
969 
generic_cpu_die(unsigned int cpu)970 void generic_cpu_die(unsigned int cpu)
971 {
972 	int i;
973 
974 	for (i = 0; i < 100; i++) {
975 		smp_rmb();
976 		if (is_cpu_dead(cpu))
977 			return;
978 		msleep(100);
979 	}
980 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
981 }
982 
generic_set_cpu_dead(unsigned int cpu)983 void generic_set_cpu_dead(unsigned int cpu)
984 {
985 	per_cpu(cpu_state, cpu) = CPU_DEAD;
986 }
987 
988 /*
989  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
990  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
991  * which makes the delay in generic_cpu_die() not happen.
992  */
generic_set_cpu_up(unsigned int cpu)993 void generic_set_cpu_up(unsigned int cpu)
994 {
995 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
996 }
997 
generic_check_cpu_restart(unsigned int cpu)998 int generic_check_cpu_restart(unsigned int cpu)
999 {
1000 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1001 }
1002 
is_cpu_dead(unsigned int cpu)1003 int is_cpu_dead(unsigned int cpu)
1004 {
1005 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
1006 }
1007 
secondaries_inhibited(void)1008 static bool secondaries_inhibited(void)
1009 {
1010 	return kvm_hv_mode_active();
1011 }
1012 
1013 #else /* HOTPLUG_CPU */
1014 
1015 #define secondaries_inhibited()		0
1016 
1017 #endif
1018 
cpu_idle_thread_init(unsigned int cpu,struct task_struct * idle)1019 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1020 {
1021 #ifdef CONFIG_PPC64
1022 	paca_ptrs[cpu]->__current = idle;
1023 	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1024 				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1025 #endif
1026 	idle->cpu = cpu;
1027 	secondary_current = current_set[cpu] = idle;
1028 }
1029 
__cpu_up(unsigned int cpu,struct task_struct * tidle)1030 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1031 {
1032 	int rc, c;
1033 
1034 	/*
1035 	 * Don't allow secondary threads to come online if inhibited
1036 	 */
1037 	if (threads_per_core > 1 && secondaries_inhibited() &&
1038 	    cpu_thread_in_subcore(cpu))
1039 		return -EBUSY;
1040 
1041 	if (smp_ops == NULL ||
1042 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1043 		return -EINVAL;
1044 
1045 	cpu_idle_thread_init(cpu, tidle);
1046 
1047 	/*
1048 	 * The platform might need to allocate resources prior to bringing
1049 	 * up the CPU
1050 	 */
1051 	if (smp_ops->prepare_cpu) {
1052 		rc = smp_ops->prepare_cpu(cpu);
1053 		if (rc)
1054 			return rc;
1055 	}
1056 
1057 	/* Make sure callin-map entry is 0 (can be leftover a CPU
1058 	 * hotplug
1059 	 */
1060 	cpu_callin_map[cpu] = 0;
1061 
1062 	/* The information for processor bringup must
1063 	 * be written out to main store before we release
1064 	 * the processor.
1065 	 */
1066 	smp_mb();
1067 
1068 	/* wake up cpus */
1069 	DBG("smp: kicking cpu %d\n", cpu);
1070 	rc = smp_ops->kick_cpu(cpu);
1071 	if (rc) {
1072 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1073 		return rc;
1074 	}
1075 
1076 	/*
1077 	 * wait to see if the cpu made a callin (is actually up).
1078 	 * use this value that I found through experimentation.
1079 	 * -- Cort
1080 	 */
1081 	if (system_state < SYSTEM_RUNNING)
1082 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1083 			udelay(100);
1084 #ifdef CONFIG_HOTPLUG_CPU
1085 	else
1086 		/*
1087 		 * CPUs can take much longer to come up in the
1088 		 * hotplug case.  Wait five seconds.
1089 		 */
1090 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1091 			msleep(1);
1092 #endif
1093 
1094 	if (!cpu_callin_map[cpu]) {
1095 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1096 		return -ENOENT;
1097 	}
1098 
1099 	DBG("Processor %u found.\n", cpu);
1100 
1101 	if (smp_ops->give_timebase)
1102 		smp_ops->give_timebase();
1103 
1104 	/* Wait until cpu puts itself in the online & active maps */
1105 	spin_until_cond(cpu_online(cpu));
1106 
1107 	return 0;
1108 }
1109 
1110 /* Return the value of the reg property corresponding to the given
1111  * logical cpu.
1112  */
cpu_to_core_id(int cpu)1113 int cpu_to_core_id(int cpu)
1114 {
1115 	struct device_node *np;
1116 	const __be32 *reg;
1117 	int id = -1;
1118 
1119 	np = of_get_cpu_node(cpu, NULL);
1120 	if (!np)
1121 		goto out;
1122 
1123 	reg = of_get_property(np, "reg", NULL);
1124 	if (!reg)
1125 		goto out;
1126 
1127 	id = be32_to_cpup(reg);
1128 out:
1129 	of_node_put(np);
1130 	return id;
1131 }
1132 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1133 
1134 /* Helper routines for cpu to core mapping */
cpu_core_index_of_thread(int cpu)1135 int cpu_core_index_of_thread(int cpu)
1136 {
1137 	return cpu >> threads_shift;
1138 }
1139 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1140 
cpu_first_thread_of_core(int core)1141 int cpu_first_thread_of_core(int core)
1142 {
1143 	return core << threads_shift;
1144 }
1145 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1146 
1147 /* Must be called when no change can occur to cpu_present_mask,
1148  * i.e. during cpu online or offline.
1149  */
cpu_to_l2cache(int cpu)1150 static struct device_node *cpu_to_l2cache(int cpu)
1151 {
1152 	struct device_node *np;
1153 	struct device_node *cache;
1154 
1155 	if (!cpu_present(cpu))
1156 		return NULL;
1157 
1158 	np = of_get_cpu_node(cpu, NULL);
1159 	if (np == NULL)
1160 		return NULL;
1161 
1162 	cache = of_find_next_cache_node(np);
1163 
1164 	of_node_put(np);
1165 
1166 	return cache;
1167 }
1168 
update_mask_by_l2(int cpu,struct cpumask * (* mask_fn)(int))1169 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1170 {
1171 	struct device_node *l2_cache, *np;
1172 	int i;
1173 
1174 	l2_cache = cpu_to_l2cache(cpu);
1175 	if (!l2_cache)
1176 		return false;
1177 
1178 	for_each_cpu(i, cpu_online_mask) {
1179 		/*
1180 		 * when updating the marks the current CPU has not been marked
1181 		 * online, but we need to update the cache masks
1182 		 */
1183 		np = cpu_to_l2cache(i);
1184 		if (!np)
1185 			continue;
1186 
1187 		if (np == l2_cache)
1188 			set_cpus_related(cpu, i, mask_fn);
1189 
1190 		of_node_put(np);
1191 	}
1192 	of_node_put(l2_cache);
1193 
1194 	return true;
1195 }
1196 
1197 #ifdef CONFIG_HOTPLUG_CPU
remove_cpu_from_masks(int cpu)1198 static void remove_cpu_from_masks(int cpu)
1199 {
1200 	int i;
1201 
1202 	/* NB: cpu_core_mask is a superset of the others */
1203 	for_each_cpu(i, cpu_core_mask(cpu)) {
1204 		set_cpus_unrelated(cpu, i, cpu_core_mask);
1205 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1206 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1207 		if (has_big_cores)
1208 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1209 	}
1210 }
1211 #endif
1212 
add_cpu_to_smallcore_masks(int cpu)1213 static inline void add_cpu_to_smallcore_masks(int cpu)
1214 {
1215 	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1216 	int i, first_thread = cpu_first_thread_sibling(cpu);
1217 
1218 	if (!has_big_cores)
1219 		return;
1220 
1221 	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1222 
1223 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
1224 		if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1225 			set_cpus_related(i, cpu, cpu_smallcore_mask);
1226 	}
1227 }
1228 
add_cpu_to_masks(int cpu)1229 static void add_cpu_to_masks(int cpu)
1230 {
1231 	int first_thread = cpu_first_thread_sibling(cpu);
1232 	int chipid = cpu_to_chip_id(cpu);
1233 	int i;
1234 
1235 	/*
1236 	 * This CPU will not be in the online mask yet so we need to manually
1237 	 * add it to it's own thread sibling mask.
1238 	 */
1239 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1240 
1241 	for (i = first_thread; i < first_thread + threads_per_core; i++)
1242 		if (cpu_online(i))
1243 			set_cpus_related(i, cpu, cpu_sibling_mask);
1244 
1245 	add_cpu_to_smallcore_masks(cpu);
1246 	/*
1247 	 * Copy the thread sibling mask into the cache sibling mask
1248 	 * and mark any CPUs that share an L2 with this CPU.
1249 	 */
1250 	for_each_cpu(i, cpu_sibling_mask(cpu))
1251 		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1252 	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1253 
1254 	/*
1255 	 * Copy the cache sibling mask into core sibling mask and mark
1256 	 * any CPUs on the same chip as this CPU.
1257 	 */
1258 	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1259 		set_cpus_related(cpu, i, cpu_core_mask);
1260 
1261 	if (chipid == -1)
1262 		return;
1263 
1264 	for_each_cpu(i, cpu_online_mask)
1265 		if (cpu_to_chip_id(i) == chipid)
1266 			set_cpus_related(cpu, i, cpu_core_mask);
1267 }
1268 
1269 static bool shared_caches;
1270 
1271 /* Activate a secondary processor. */
start_secondary(void * unused)1272 void start_secondary(void *unused)
1273 {
1274 	unsigned int cpu = smp_processor_id();
1275 	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1276 
1277 	mmgrab(&init_mm);
1278 	current->active_mm = &init_mm;
1279 
1280 	smp_store_cpu_info(cpu);
1281 	set_dec(tb_ticks_per_jiffy);
1282 	preempt_disable();
1283 	cpu_callin_map[cpu] = 1;
1284 
1285 	if (smp_ops->setup_cpu)
1286 		smp_ops->setup_cpu(cpu);
1287 	if (smp_ops->take_timebase)
1288 		smp_ops->take_timebase();
1289 
1290 	secondary_cpu_time_init();
1291 
1292 #ifdef CONFIG_PPC64
1293 	if (system_state == SYSTEM_RUNNING)
1294 		vdso_data->processorCount++;
1295 
1296 	vdso_getcpu_init();
1297 #endif
1298 	set_numa_node(numa_cpu_lookup_table[cpu]);
1299 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1300 
1301 	/* Update topology CPU masks */
1302 	add_cpu_to_masks(cpu);
1303 
1304 	if (has_big_cores)
1305 		sibling_mask = cpu_smallcore_mask;
1306 	/*
1307 	 * Check for any shared caches. Note that this must be done on a
1308 	 * per-core basis because one core in the pair might be disabled.
1309 	 */
1310 	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1311 		shared_caches = true;
1312 
1313 	smp_wmb();
1314 	notify_cpu_starting(cpu);
1315 	set_cpu_online(cpu, true);
1316 
1317 	boot_init_stack_canary();
1318 
1319 	local_irq_enable();
1320 
1321 	/* We can enable ftrace for secondary cpus now */
1322 	this_cpu_enable_ftrace();
1323 
1324 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1325 
1326 	BUG();
1327 }
1328 
1329 #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)1330 int setup_profiling_timer(unsigned int multiplier)
1331 {
1332 	return 0;
1333 }
1334 #endif
1335 
1336 #ifdef CONFIG_SCHED_SMT
1337 /* cpumask of CPUs with asymetric SMT dependancy */
powerpc_smt_flags(void)1338 static int powerpc_smt_flags(void)
1339 {
1340 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1341 
1342 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1343 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1344 		flags |= SD_ASYM_PACKING;
1345 	}
1346 	return flags;
1347 }
1348 #endif
1349 
1350 static struct sched_domain_topology_level powerpc_topology[] = {
1351 #ifdef CONFIG_SCHED_SMT
1352 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1353 #endif
1354 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1355 	{ NULL, },
1356 };
1357 
1358 /*
1359  * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1360  * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1361  * since the migrated task remains cache hot. We want to take advantage of this
1362  * at the scheduler level so an extra topology level is required.
1363  */
powerpc_shared_cache_flags(void)1364 static int powerpc_shared_cache_flags(void)
1365 {
1366 	return SD_SHARE_PKG_RESOURCES;
1367 }
1368 
1369 /*
1370  * We can't just pass cpu_l2_cache_mask() directly because
1371  * returns a non-const pointer and the compiler barfs on that.
1372  */
shared_cache_mask(int cpu)1373 static const struct cpumask *shared_cache_mask(int cpu)
1374 {
1375 	return cpu_l2_cache_mask(cpu);
1376 }
1377 
1378 #ifdef CONFIG_SCHED_SMT
smallcore_smt_mask(int cpu)1379 static const struct cpumask *smallcore_smt_mask(int cpu)
1380 {
1381 	return cpu_smallcore_mask(cpu);
1382 }
1383 #endif
1384 
1385 static struct sched_domain_topology_level power9_topology[] = {
1386 #ifdef CONFIG_SCHED_SMT
1387 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1388 #endif
1389 	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1390 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1391 	{ NULL, },
1392 };
1393 
smp_cpus_done(unsigned int max_cpus)1394 void __init smp_cpus_done(unsigned int max_cpus)
1395 {
1396 	/*
1397 	 * We are running pinned to the boot CPU, see rest_init().
1398 	 */
1399 	if (smp_ops && smp_ops->setup_cpu)
1400 		smp_ops->setup_cpu(boot_cpuid);
1401 
1402 	if (smp_ops && smp_ops->bringup_done)
1403 		smp_ops->bringup_done();
1404 
1405 	/*
1406 	 * On a shared LPAR, associativity needs to be requested.
1407 	 * Hence, get numa topology before dumping cpu topology
1408 	 */
1409 	shared_proc_topology_init();
1410 	dump_numa_cpu_topology();
1411 
1412 #ifdef CONFIG_SCHED_SMT
1413 	if (has_big_cores) {
1414 		pr_info("Using small cores at SMT level\n");
1415 		power9_topology[0].mask = smallcore_smt_mask;
1416 		powerpc_topology[0].mask = smallcore_smt_mask;
1417 	}
1418 #endif
1419 	/*
1420 	 * If any CPU detects that it's sharing a cache with another CPU then
1421 	 * use the deeper topology that is aware of this sharing.
1422 	 */
1423 	if (shared_caches) {
1424 		pr_info("Using shared cache scheduler topology\n");
1425 		set_sched_topology(power9_topology);
1426 	} else {
1427 		pr_info("Using standard scheduler topology\n");
1428 		set_sched_topology(powerpc_topology);
1429 	}
1430 }
1431 
1432 #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)1433 int __cpu_disable(void)
1434 {
1435 	int cpu = smp_processor_id();
1436 	int err;
1437 
1438 	if (!smp_ops->cpu_disable)
1439 		return -ENOSYS;
1440 
1441 	this_cpu_disable_ftrace();
1442 
1443 	err = smp_ops->cpu_disable();
1444 	if (err)
1445 		return err;
1446 
1447 	/* Update sibling maps */
1448 	remove_cpu_from_masks(cpu);
1449 
1450 	return 0;
1451 }
1452 
__cpu_die(unsigned int cpu)1453 void __cpu_die(unsigned int cpu)
1454 {
1455 	if (smp_ops->cpu_die)
1456 		smp_ops->cpu_die(cpu);
1457 }
1458 
cpu_die(void)1459 void cpu_die(void)
1460 {
1461 	/*
1462 	 * Disable on the down path. This will be re-enabled by
1463 	 * start_secondary() via start_secondary_resume() below
1464 	 */
1465 	this_cpu_disable_ftrace();
1466 
1467 	if (ppc_md.cpu_die)
1468 		ppc_md.cpu_die();
1469 
1470 	/* If we return, we re-enter start_secondary */
1471 	start_secondary_resume();
1472 }
1473 
1474 #endif
1475