• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/export.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36 #include <linux/irqdomain.h>
37 #include <linux/of.h>
38 #include <linux/of_irq.h>
39 
40 #include <linux/atomic.h>
41 #include <asm/cpu.h>
42 #include <asm/processor.h>
43 #include <asm/idle.h>
44 #include <asm/r4k-timer.h>
45 #include <asm/mips-cpc.h>
46 #include <asm/mmu_context.h>
47 #include <asm/time.h>
48 #include <asm/setup.h>
49 #include <asm/maar.h>
50 
51 cpumask_t cpu_callin_map;		/* Bitmask of started secondaries */
52 
53 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
54 EXPORT_SYMBOL(__cpu_number_map);
55 
56 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
57 EXPORT_SYMBOL(__cpu_logical_map);
58 
59 /* Number of TCs (or siblings in Intel speak) per CPU core */
60 int smp_num_siblings = 1;
61 EXPORT_SYMBOL(smp_num_siblings);
62 
63 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
64 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
65 EXPORT_SYMBOL(cpu_sibling_map);
66 
67 /* representing the core map of multi-core chips of each logical CPU */
68 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
69 EXPORT_SYMBOL(cpu_core_map);
70 
71 static DECLARE_COMPLETION(cpu_starting);
72 static DECLARE_COMPLETION(cpu_running);
73 
74 /*
75  * A logcal cpu mask containing only one VPE per core to
76  * reduce the number of IPIs on large MT systems.
77  */
78 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
79 EXPORT_SYMBOL(cpu_foreign_map);
80 
81 /* representing cpus for which sibling maps can be computed */
82 static cpumask_t cpu_sibling_setup_map;
83 
84 /* representing cpus for which core maps can be computed */
85 static cpumask_t cpu_core_setup_map;
86 
87 cpumask_t cpu_coherent_mask;
88 
89 #ifdef CONFIG_GENERIC_IRQ_IPI
90 static struct irq_desc *call_desc;
91 static struct irq_desc *sched_desc;
92 #endif
93 
set_cpu_sibling_map(int cpu)94 static inline void set_cpu_sibling_map(int cpu)
95 {
96 	int i;
97 
98 	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
99 
100 	if (smp_num_siblings > 1) {
101 		for_each_cpu(i, &cpu_sibling_setup_map) {
102 			if (cpu_data[cpu].package == cpu_data[i].package &&
103 				    cpu_data[cpu].core == cpu_data[i].core) {
104 				cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
105 				cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
106 			}
107 		}
108 	} else
109 		cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
110 }
111 
set_cpu_core_map(int cpu)112 static inline void set_cpu_core_map(int cpu)
113 {
114 	int i;
115 
116 	cpumask_set_cpu(cpu, &cpu_core_setup_map);
117 
118 	for_each_cpu(i, &cpu_core_setup_map) {
119 		if (cpu_data[cpu].package == cpu_data[i].package) {
120 			cpumask_set_cpu(i, &cpu_core_map[cpu]);
121 			cpumask_set_cpu(cpu, &cpu_core_map[i]);
122 		}
123 	}
124 }
125 
126 /*
127  * Calculate a new cpu_foreign_map mask whenever a
128  * new cpu appears or disappears.
129  */
calculate_cpu_foreign_map(void)130 void calculate_cpu_foreign_map(void)
131 {
132 	int i, k, core_present;
133 	cpumask_t temp_foreign_map;
134 
135 	/* Re-calculate the mask */
136 	cpumask_clear(&temp_foreign_map);
137 	for_each_online_cpu(i) {
138 		core_present = 0;
139 		for_each_cpu(k, &temp_foreign_map)
140 			if (cpu_data[i].package == cpu_data[k].package &&
141 			    cpu_data[i].core == cpu_data[k].core)
142 				core_present = 1;
143 		if (!core_present)
144 			cpumask_set_cpu(i, &temp_foreign_map);
145 	}
146 
147 	for_each_online_cpu(i)
148 		cpumask_andnot(&cpu_foreign_map[i],
149 			       &temp_foreign_map, &cpu_sibling_map[i]);
150 }
151 
152 struct plat_smp_ops *mp_ops;
153 EXPORT_SYMBOL(mp_ops);
154 
register_smp_ops(struct plat_smp_ops * ops)155 void register_smp_ops(struct plat_smp_ops *ops)
156 {
157 	if (mp_ops)
158 		printk(KERN_WARNING "Overriding previously set SMP ops\n");
159 
160 	mp_ops = ops;
161 }
162 
163 #ifdef CONFIG_GENERIC_IRQ_IPI
mips_smp_send_ipi_single(int cpu,unsigned int action)164 void mips_smp_send_ipi_single(int cpu, unsigned int action)
165 {
166 	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
167 }
168 
mips_smp_send_ipi_mask(const struct cpumask * mask,unsigned int action)169 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
170 {
171 	unsigned long flags;
172 	unsigned int core;
173 	int cpu;
174 
175 	local_irq_save(flags);
176 
177 	switch (action) {
178 	case SMP_CALL_FUNCTION:
179 		__ipi_send_mask(call_desc, mask);
180 		break;
181 
182 	case SMP_RESCHEDULE_YOURSELF:
183 		__ipi_send_mask(sched_desc, mask);
184 		break;
185 
186 	default:
187 		BUG();
188 	}
189 
190 	if (mips_cpc_present()) {
191 		for_each_cpu(cpu, mask) {
192 			core = cpu_data[cpu].core;
193 
194 			if (core == current_cpu_data.core)
195 				continue;
196 
197 			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
198 				mips_cm_lock_other(core, 0);
199 				mips_cpc_lock_other(core);
200 				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
201 				mips_cpc_unlock_other();
202 				mips_cm_unlock_other();
203 			}
204 		}
205 	}
206 
207 	local_irq_restore(flags);
208 }
209 
210 
ipi_resched_interrupt(int irq,void * dev_id)211 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
212 {
213 	scheduler_ipi();
214 
215 	return IRQ_HANDLED;
216 }
217 
ipi_call_interrupt(int irq,void * dev_id)218 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
219 {
220 	generic_smp_call_function_interrupt();
221 
222 	return IRQ_HANDLED;
223 }
224 
225 static struct irqaction irq_resched = {
226 	.handler	= ipi_resched_interrupt,
227 	.flags		= IRQF_PERCPU,
228 	.name		= "IPI resched"
229 };
230 
231 static struct irqaction irq_call = {
232 	.handler	= ipi_call_interrupt,
233 	.flags		= IRQF_PERCPU,
234 	.name		= "IPI call"
235 };
236 
smp_ipi_init_one(unsigned int virq,struct irqaction * action)237 static void smp_ipi_init_one(unsigned int virq,
238 				    struct irqaction *action)
239 {
240 	int ret;
241 
242 	irq_set_handler(virq, handle_percpu_irq);
243 	ret = setup_irq(virq, action);
244 	BUG_ON(ret);
245 }
246 
247 static unsigned int call_virq, sched_virq;
248 
mips_smp_ipi_allocate(const struct cpumask * mask)249 int mips_smp_ipi_allocate(const struct cpumask *mask)
250 {
251 	int virq;
252 	struct irq_domain *ipidomain;
253 	struct device_node *node;
254 
255 	node = of_irq_find_parent(of_root);
256 	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
257 
258 	/*
259 	 * Some platforms have half DT setup. So if we found irq node but
260 	 * didn't find an ipidomain, try to search for one that is not in the
261 	 * DT.
262 	 */
263 	if (node && !ipidomain)
264 		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
265 
266 	/*
267 	 * There are systems which only use IPI domains some of the time,
268 	 * depending upon configuration we don't know until runtime. An
269 	 * example is Malta where we may compile in support for GIC & the
270 	 * MT ASE, but run on a system which has multiple VPEs in a single
271 	 * core and doesn't include a GIC. Until all IPI implementations
272 	 * have been converted to use IPI domains the best we can do here
273 	 * is to return & hope some other code sets up the IPIs.
274 	 */
275 	if (!ipidomain)
276 		return 0;
277 
278 	virq = irq_reserve_ipi(ipidomain, mask);
279 	BUG_ON(!virq);
280 	if (!call_virq)
281 		call_virq = virq;
282 
283 	virq = irq_reserve_ipi(ipidomain, mask);
284 	BUG_ON(!virq);
285 	if (!sched_virq)
286 		sched_virq = virq;
287 
288 	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
289 		int cpu;
290 
291 		for_each_cpu(cpu, mask) {
292 			smp_ipi_init_one(call_virq + cpu, &irq_call);
293 			smp_ipi_init_one(sched_virq + cpu, &irq_resched);
294 		}
295 	} else {
296 		smp_ipi_init_one(call_virq, &irq_call);
297 		smp_ipi_init_one(sched_virq, &irq_resched);
298 	}
299 
300 	return 0;
301 }
302 
mips_smp_ipi_free(const struct cpumask * mask)303 int mips_smp_ipi_free(const struct cpumask *mask)
304 {
305 	struct irq_domain *ipidomain;
306 	struct device_node *node;
307 
308 	node = of_irq_find_parent(of_root);
309 	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
310 
311 	/*
312 	 * Some platforms have half DT setup. So if we found irq node but
313 	 * didn't find an ipidomain, try to search for one that is not in the
314 	 * DT.
315 	 */
316 	if (node && !ipidomain)
317 		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
318 
319 	BUG_ON(!ipidomain);
320 
321 	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
322 		int cpu;
323 
324 		for_each_cpu(cpu, mask) {
325 			remove_irq(call_virq + cpu, &irq_call);
326 			remove_irq(sched_virq + cpu, &irq_resched);
327 		}
328 	}
329 	irq_destroy_ipi(call_virq, mask);
330 	irq_destroy_ipi(sched_virq, mask);
331 	return 0;
332 }
333 
334 
mips_smp_ipi_init(void)335 static int __init mips_smp_ipi_init(void)
336 {
337 	mips_smp_ipi_allocate(cpu_possible_mask);
338 
339 	call_desc = irq_to_desc(call_virq);
340 	sched_desc = irq_to_desc(sched_virq);
341 
342 	return 0;
343 }
344 early_initcall(mips_smp_ipi_init);
345 #endif
346 
347 /*
348  * First C code run on the secondary CPUs after being started up by
349  * the master.
350  */
start_secondary(void)351 asmlinkage void start_secondary(void)
352 {
353 	unsigned int cpu;
354 
355 	cpu_probe();
356 	per_cpu_trap_init(false);
357 	mips_clockevent_init();
358 	mp_ops->init_secondary();
359 	cpu_report();
360 	maar_init();
361 
362 	/*
363 	 * XXX parity protection should be folded in here when it's converted
364 	 * to an option instead of something based on .cputype
365 	 */
366 
367 	calibrate_delay();
368 	preempt_disable();
369 	cpu = smp_processor_id();
370 	cpu_data[cpu].udelay_val = loops_per_jiffy;
371 
372 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
373 	notify_cpu_starting(cpu);
374 
375 	/* Notify boot CPU that we're starting & ready to sync counters */
376 	complete(&cpu_starting);
377 
378 	synchronise_count_slave(cpu);
379 
380 	/* The CPU is running and counters synchronised, now mark it online */
381 	set_cpu_online(cpu, true);
382 
383 	set_cpu_sibling_map(cpu);
384 	set_cpu_core_map(cpu);
385 
386 	calculate_cpu_foreign_map();
387 
388 	/*
389 	 * Notify boot CPU that we're up & online and it can safely return
390 	 * from __cpu_up
391 	 */
392 	complete(&cpu_running);
393 
394 	/*
395 	 * irq will be enabled in ->smp_finish(), enabling it too early
396 	 * is dangerous.
397 	 */
398 	WARN_ON_ONCE(!irqs_disabled());
399 	mp_ops->smp_finish();
400 
401 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
402 }
403 
stop_this_cpu(void * dummy)404 static void stop_this_cpu(void *dummy)
405 {
406 	/*
407 	 * Remove this CPU:
408 	 */
409 
410 	set_cpu_online(smp_processor_id(), false);
411 	calculate_cpu_foreign_map();
412 	local_irq_disable();
413 	while (1);
414 }
415 
smp_send_stop(void)416 void smp_send_stop(void)
417 {
418 	smp_call_function(stop_this_cpu, NULL, 0);
419 }
420 
smp_cpus_done(unsigned int max_cpus)421 void __init smp_cpus_done(unsigned int max_cpus)
422 {
423 }
424 
425 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)426 void __init smp_prepare_cpus(unsigned int max_cpus)
427 {
428 	init_new_context(current, &init_mm);
429 	current_thread_info()->cpu = 0;
430 	mp_ops->prepare_cpus(max_cpus);
431 	set_cpu_sibling_map(0);
432 	set_cpu_core_map(0);
433 	calculate_cpu_foreign_map();
434 #ifndef CONFIG_HOTPLUG_CPU
435 	init_cpu_present(cpu_possible_mask);
436 #endif
437 	cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
438 }
439 
440 /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)441 void smp_prepare_boot_cpu(void)
442 {
443 	set_cpu_possible(0, true);
444 	set_cpu_online(0, true);
445 }
446 
__cpu_up(unsigned int cpu,struct task_struct * tidle)447 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
448 {
449 	mp_ops->boot_secondary(cpu, tidle);
450 
451 	/* Wait for CPU to start and be ready to sync counters */
452 	if (!wait_for_completion_timeout(&cpu_starting,
453 					 msecs_to_jiffies(1000))) {
454 		pr_crit("CPU%u: failed to start\n", cpu);
455 		return -EIO;
456 	}
457 
458 	synchronise_count_master(cpu);
459 
460 	/* Wait for CPU to finish startup & mark itself online before return */
461 	wait_for_completion(&cpu_running);
462 	return 0;
463 }
464 
465 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)466 int setup_profiling_timer(unsigned int multiplier)
467 {
468 	return 0;
469 }
470 
flush_tlb_all_ipi(void * info)471 static void flush_tlb_all_ipi(void *info)
472 {
473 	local_flush_tlb_all();
474 }
475 
flush_tlb_all(void)476 void flush_tlb_all(void)
477 {
478 	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
479 }
480 
flush_tlb_mm_ipi(void * mm)481 static void flush_tlb_mm_ipi(void *mm)
482 {
483 	local_flush_tlb_mm((struct mm_struct *)mm);
484 }
485 
486 /*
487  * Special Variant of smp_call_function for use by TLB functions:
488  *
489  *  o No return value
490  *  o collapses to normal function call on UP kernels
491  *  o collapses to normal function call on systems with a single shared
492  *    primary cache.
493  */
smp_on_other_tlbs(void (* func)(void * info),void * info)494 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
495 {
496 	smp_call_function(func, info, 1);
497 }
498 
smp_on_each_tlb(void (* func)(void * info),void * info)499 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
500 {
501 	preempt_disable();
502 
503 	smp_on_other_tlbs(func, info);
504 	func(info);
505 
506 	preempt_enable();
507 }
508 
509 /*
510  * The following tlb flush calls are invoked when old translations are
511  * being torn down, or pte attributes are changing. For single threaded
512  * address spaces, a new context is obtained on the current cpu, and tlb
513  * context on other cpus are invalidated to force a new context allocation
514  * at switch_mm time, should the mm ever be used on other cpus. For
515  * multithreaded address spaces, intercpu interrupts have to be sent.
516  * Another case where intercpu interrupts are required is when the target
517  * mm might be active on another cpu (eg debuggers doing the flushes on
518  * behalf of debugees, kswapd stealing pages from another process etc).
519  * Kanoj 07/00.
520  */
521 
flush_tlb_mm(struct mm_struct * mm)522 void flush_tlb_mm(struct mm_struct *mm)
523 {
524 	preempt_disable();
525 
526 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
527 		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
528 	} else {
529 		unsigned int cpu;
530 
531 		for_each_online_cpu(cpu) {
532 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
533 				cpu_context(cpu, mm) = 0;
534 		}
535 	}
536 	local_flush_tlb_mm(mm);
537 
538 	preempt_enable();
539 }
540 
541 struct flush_tlb_data {
542 	struct vm_area_struct *vma;
543 	unsigned long addr1;
544 	unsigned long addr2;
545 };
546 
flush_tlb_range_ipi(void * info)547 static void flush_tlb_range_ipi(void *info)
548 {
549 	struct flush_tlb_data *fd = info;
550 
551 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
552 }
553 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)554 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
555 {
556 	struct mm_struct *mm = vma->vm_mm;
557 
558 	preempt_disable();
559 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
560 		struct flush_tlb_data fd = {
561 			.vma = vma,
562 			.addr1 = start,
563 			.addr2 = end,
564 		};
565 
566 		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
567 	} else {
568 		unsigned int cpu;
569 		int exec = vma->vm_flags & VM_EXEC;
570 
571 		for_each_online_cpu(cpu) {
572 			/*
573 			 * flush_cache_range() will only fully flush icache if
574 			 * the VMA is executable, otherwise we must invalidate
575 			 * ASID without it appearing to has_valid_asid() as if
576 			 * mm has been completely unused by that CPU.
577 			 */
578 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
579 				cpu_context(cpu, mm) = !exec;
580 		}
581 	}
582 	local_flush_tlb_range(vma, start, end);
583 	preempt_enable();
584 }
585 
flush_tlb_kernel_range_ipi(void * info)586 static void flush_tlb_kernel_range_ipi(void *info)
587 {
588 	struct flush_tlb_data *fd = info;
589 
590 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
591 }
592 
flush_tlb_kernel_range(unsigned long start,unsigned long end)593 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
594 {
595 	struct flush_tlb_data fd = {
596 		.addr1 = start,
597 		.addr2 = end,
598 	};
599 
600 	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
601 }
602 
flush_tlb_page_ipi(void * info)603 static void flush_tlb_page_ipi(void *info)
604 {
605 	struct flush_tlb_data *fd = info;
606 
607 	local_flush_tlb_page(fd->vma, fd->addr1);
608 }
609 
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)610 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
611 {
612 	preempt_disable();
613 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
614 		struct flush_tlb_data fd = {
615 			.vma = vma,
616 			.addr1 = page,
617 		};
618 
619 		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
620 	} else {
621 		unsigned int cpu;
622 
623 		for_each_online_cpu(cpu) {
624 			/*
625 			 * flush_cache_page() only does partial flushes, so
626 			 * invalidate ASID without it appearing to
627 			 * has_valid_asid() as if mm has been completely unused
628 			 * by that CPU.
629 			 */
630 			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
631 				cpu_context(cpu, vma->vm_mm) = 1;
632 		}
633 	}
634 	local_flush_tlb_page(vma, page);
635 	preempt_enable();
636 }
637 
flush_tlb_one_ipi(void * info)638 static void flush_tlb_one_ipi(void *info)
639 {
640 	unsigned long vaddr = (unsigned long) info;
641 
642 	local_flush_tlb_one(vaddr);
643 }
644 
flush_tlb_one(unsigned long vaddr)645 void flush_tlb_one(unsigned long vaddr)
646 {
647 	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
648 }
649 
650 EXPORT_SYMBOL(flush_tlb_page);
651 EXPORT_SYMBOL(flush_tlb_one);
652 
653 #if defined(CONFIG_KEXEC)
654 void (*dump_ipi_function_ptr)(void *) = NULL;
dump_send_ipi(void (* dump_ipi_callback)(void *))655 void dump_send_ipi(void (*dump_ipi_callback)(void *))
656 {
657 	int i;
658 	int cpu = smp_processor_id();
659 
660 	dump_ipi_function_ptr = dump_ipi_callback;
661 	smp_mb();
662 	for_each_online_cpu(i)
663 		if (i != cpu)
664 			mp_ops->send_ipi_single(i, SMP_DUMP);
665 
666 }
667 EXPORT_SYMBOL(dump_send_ipi);
668 #endif
669 
670 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
671 
672 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
673 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
674 
tick_broadcast(const struct cpumask * mask)675 void tick_broadcast(const struct cpumask *mask)
676 {
677 	atomic_t *count;
678 	struct call_single_data *csd;
679 	int cpu;
680 
681 	for_each_cpu(cpu, mask) {
682 		count = &per_cpu(tick_broadcast_count, cpu);
683 		csd = &per_cpu(tick_broadcast_csd, cpu);
684 
685 		if (atomic_inc_return(count) == 1)
686 			smp_call_function_single_async(cpu, csd);
687 	}
688 }
689 
tick_broadcast_callee(void * info)690 static void tick_broadcast_callee(void *info)
691 {
692 	int cpu = smp_processor_id();
693 	tick_receive_broadcast();
694 	atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
695 }
696 
tick_broadcast_init(void)697 static int __init tick_broadcast_init(void)
698 {
699 	struct call_single_data *csd;
700 	int cpu;
701 
702 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
703 		csd = &per_cpu(tick_broadcast_csd, cpu);
704 		csd->func = tick_broadcast_callee;
705 	}
706 
707 	return 0;
708 }
709 early_initcall(tick_broadcast_init);
710 
711 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
712