• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) 2000, 2001 Kanoj Sarcar
5  * Copyright (C) 2000, 2001 Ralf Baechle
6  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8  */
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/threads.h>
16 #include <linux/export.h>
17 #include <linux/time.h>
18 #include <linux/timex.h>
19 #include <linux/sched/mm.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpu.h>
22 #include <linux/err.h>
23 #include <linux/ftrace.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 
28 #include <linux/atomic.h>
29 #include <asm/cpu.h>
30 #include <asm/ginvt.h>
31 #include <asm/processor.h>
32 #include <asm/idle.h>
33 #include <asm/r4k-timer.h>
34 #include <asm/mips-cps.h>
35 #include <asm/mmu_context.h>
36 #include <asm/time.h>
37 #include <asm/setup.h>
38 #include <asm/maar.h>
39 
40 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
41 EXPORT_SYMBOL(__cpu_number_map);
42 
43 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
44 EXPORT_SYMBOL(__cpu_logical_map);
45 
46 /* Number of TCs (or siblings in Intel speak) per CPU core */
47 int smp_num_siblings = 1;
48 EXPORT_SYMBOL(smp_num_siblings);
49 
50 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
51 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
52 EXPORT_SYMBOL(cpu_sibling_map);
53 
54 /* representing the core map of multi-core chips of each logical CPU */
55 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
56 EXPORT_SYMBOL(cpu_core_map);
57 
58 static DECLARE_COMPLETION(cpu_starting);
59 static DECLARE_COMPLETION(cpu_running);
60 
61 /*
62  * A logcal cpu mask containing only one VPE per core to
63  * reduce the number of IPIs on large MT systems.
64  */
65 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
66 EXPORT_SYMBOL(cpu_foreign_map);
67 
68 /* representing cpus for which sibling maps can be computed */
69 static cpumask_t cpu_sibling_setup_map;
70 
71 /* representing cpus for which core maps can be computed */
72 static cpumask_t cpu_core_setup_map;
73 
74 cpumask_t cpu_coherent_mask;
75 
76 #ifdef CONFIG_GENERIC_IRQ_IPI
77 static struct irq_desc *call_desc;
78 static struct irq_desc *sched_desc;
79 #endif
80 
set_cpu_sibling_map(int cpu)81 static inline void set_cpu_sibling_map(int cpu)
82 {
83 	int i;
84 
85 	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
86 
87 	if (smp_num_siblings > 1) {
88 		for_each_cpu(i, &cpu_sibling_setup_map) {
89 			if (cpus_are_siblings(cpu, i)) {
90 				cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
91 				cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
92 			}
93 		}
94 	} else
95 		cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
96 }
97 
set_cpu_core_map(int cpu)98 static inline void set_cpu_core_map(int cpu)
99 {
100 	int i;
101 
102 	cpumask_set_cpu(cpu, &cpu_core_setup_map);
103 
104 	for_each_cpu(i, &cpu_core_setup_map) {
105 		if (cpu_data[cpu].package == cpu_data[i].package) {
106 			cpumask_set_cpu(i, &cpu_core_map[cpu]);
107 			cpumask_set_cpu(cpu, &cpu_core_map[i]);
108 		}
109 	}
110 }
111 
112 /*
113  * Calculate a new cpu_foreign_map mask whenever a
114  * new cpu appears or disappears.
115  */
calculate_cpu_foreign_map(void)116 void calculate_cpu_foreign_map(void)
117 {
118 	int i, k, core_present;
119 	cpumask_t temp_foreign_map;
120 
121 	/* Re-calculate the mask */
122 	cpumask_clear(&temp_foreign_map);
123 	for_each_online_cpu(i) {
124 		core_present = 0;
125 		for_each_cpu(k, &temp_foreign_map)
126 			if (cpus_are_siblings(i, k))
127 				core_present = 1;
128 		if (!core_present)
129 			cpumask_set_cpu(i, &temp_foreign_map);
130 	}
131 
132 	for_each_online_cpu(i)
133 		cpumask_andnot(&cpu_foreign_map[i],
134 			       &temp_foreign_map, &cpu_sibling_map[i]);
135 }
136 
137 const struct plat_smp_ops *mp_ops;
138 EXPORT_SYMBOL(mp_ops);
139 
register_smp_ops(const struct plat_smp_ops * ops)140 void register_smp_ops(const struct plat_smp_ops *ops)
141 {
142 	if (mp_ops)
143 		printk(KERN_WARNING "Overriding previously set SMP ops\n");
144 
145 	mp_ops = ops;
146 }
147 
148 #ifdef CONFIG_GENERIC_IRQ_IPI
mips_smp_send_ipi_single(int cpu,unsigned int action)149 void mips_smp_send_ipi_single(int cpu, unsigned int action)
150 {
151 	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
152 }
153 
mips_smp_send_ipi_mask(const struct cpumask * mask,unsigned int action)154 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
155 {
156 	unsigned long flags;
157 	unsigned int core;
158 	int cpu;
159 
160 	local_irq_save(flags);
161 
162 	switch (action) {
163 	case SMP_CALL_FUNCTION:
164 		__ipi_send_mask(call_desc, mask);
165 		break;
166 
167 	case SMP_RESCHEDULE_YOURSELF:
168 		__ipi_send_mask(sched_desc, mask);
169 		break;
170 
171 	default:
172 		BUG();
173 	}
174 
175 	if (mips_cpc_present()) {
176 		for_each_cpu(cpu, mask) {
177 			if (cpus_are_siblings(cpu, smp_processor_id()))
178 				continue;
179 
180 			core = cpu_core(&cpu_data[cpu]);
181 
182 			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
183 				mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
184 				mips_cpc_lock_other(core);
185 				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
186 				mips_cpc_unlock_other();
187 				mips_cm_unlock_other();
188 			}
189 		}
190 	}
191 
192 	local_irq_restore(flags);
193 }
194 
195 
ipi_resched_interrupt(int irq,void * dev_id)196 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
197 {
198 	scheduler_ipi();
199 
200 	return IRQ_HANDLED;
201 }
202 
ipi_call_interrupt(int irq,void * dev_id)203 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
204 {
205 	generic_smp_call_function_interrupt();
206 
207 	return IRQ_HANDLED;
208 }
209 
smp_ipi_init_one(unsigned int virq,const char * name,irq_handler_t handler)210 static void smp_ipi_init_one(unsigned int virq, const char *name,
211 			     irq_handler_t handler)
212 {
213 	int ret;
214 
215 	irq_set_handler(virq, handle_percpu_irq);
216 	ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
217 	BUG_ON(ret);
218 }
219 
220 static unsigned int call_virq, sched_virq;
221 
mips_smp_ipi_allocate(const struct cpumask * mask)222 int mips_smp_ipi_allocate(const struct cpumask *mask)
223 {
224 	int virq;
225 	struct irq_domain *ipidomain;
226 	struct device_node *node;
227 
228 	node = of_irq_find_parent(of_root);
229 	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
230 
231 	/*
232 	 * Some platforms have half DT setup. So if we found irq node but
233 	 * didn't find an ipidomain, try to search for one that is not in the
234 	 * DT.
235 	 */
236 	if (node && !ipidomain)
237 		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
238 
239 	/*
240 	 * There are systems which use IPI IRQ domains, but only have one
241 	 * registered when some runtime condition is met. For example a Malta
242 	 * kernel may include support for GIC & CPU interrupt controller IPI
243 	 * IRQ domains, but if run on a system with no GIC & no MT ASE then
244 	 * neither will be supported or registered.
245 	 *
246 	 * We only have a problem if we're actually using multiple CPUs so fail
247 	 * loudly if that is the case. Otherwise simply return, skipping IPI
248 	 * setup, if we're running with only a single CPU.
249 	 */
250 	if (!ipidomain) {
251 		BUG_ON(num_present_cpus() > 1);
252 		return 0;
253 	}
254 
255 	virq = irq_reserve_ipi(ipidomain, mask);
256 	BUG_ON(!virq);
257 	if (!call_virq)
258 		call_virq = virq;
259 
260 	virq = irq_reserve_ipi(ipidomain, mask);
261 	BUG_ON(!virq);
262 	if (!sched_virq)
263 		sched_virq = virq;
264 
265 	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
266 		int cpu;
267 
268 		for_each_cpu(cpu, mask) {
269 			smp_ipi_init_one(call_virq + cpu, "IPI call",
270 					 ipi_call_interrupt);
271 			smp_ipi_init_one(sched_virq + cpu, "IPI resched",
272 					 ipi_resched_interrupt);
273 		}
274 	} else {
275 		smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
276 		smp_ipi_init_one(sched_virq, "IPI resched",
277 				 ipi_resched_interrupt);
278 	}
279 
280 	return 0;
281 }
282 
mips_smp_ipi_free(const struct cpumask * mask)283 int mips_smp_ipi_free(const struct cpumask *mask)
284 {
285 	struct irq_domain *ipidomain;
286 	struct device_node *node;
287 
288 	node = of_irq_find_parent(of_root);
289 	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
290 
291 	/*
292 	 * Some platforms have half DT setup. So if we found irq node but
293 	 * didn't find an ipidomain, try to search for one that is not in the
294 	 * DT.
295 	 */
296 	if (node && !ipidomain)
297 		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
298 
299 	BUG_ON(!ipidomain);
300 
301 	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
302 		int cpu;
303 
304 		for_each_cpu(cpu, mask) {
305 			free_irq(call_virq + cpu, NULL);
306 			free_irq(sched_virq + cpu, NULL);
307 		}
308 	}
309 	irq_destroy_ipi(call_virq, mask);
310 	irq_destroy_ipi(sched_virq, mask);
311 	return 0;
312 }
313 
314 
mips_smp_ipi_init(void)315 static int __init mips_smp_ipi_init(void)
316 {
317 	if (num_possible_cpus() == 1)
318 		return 0;
319 
320 	mips_smp_ipi_allocate(cpu_possible_mask);
321 
322 	call_desc = irq_to_desc(call_virq);
323 	sched_desc = irq_to_desc(sched_virq);
324 
325 	return 0;
326 }
327 early_initcall(mips_smp_ipi_init);
328 #endif
329 
330 /*
331  * First C code run on the secondary CPUs after being started up by
332  * the master.
333  */
start_secondary(void)334 asmlinkage void start_secondary(void)
335 {
336 	unsigned int cpu;
337 
338 	cpu_probe();
339 	per_cpu_trap_init(false);
340 	mips_clockevent_init();
341 	mp_ops->init_secondary();
342 	cpu_report();
343 	maar_init();
344 
345 	/*
346 	 * XXX parity protection should be folded in here when it's converted
347 	 * to an option instead of something based on .cputype
348 	 */
349 
350 	calibrate_delay();
351 	cpu = smp_processor_id();
352 	cpu_data[cpu].udelay_val = loops_per_jiffy;
353 
354 	set_cpu_sibling_map(cpu);
355 	set_cpu_core_map(cpu);
356 
357 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
358 	notify_cpu_starting(cpu);
359 
360 	/* Notify boot CPU that we're starting & ready to sync counters */
361 	complete(&cpu_starting);
362 
363 	synchronise_count_slave(cpu);
364 
365 	/* The CPU is running and counters synchronised, now mark it online */
366 	set_cpu_online(cpu, true);
367 
368 	calculate_cpu_foreign_map();
369 
370 	/*
371 	 * Notify boot CPU that we're up & online and it can safely return
372 	 * from __cpu_up
373 	 */
374 	complete(&cpu_running);
375 
376 	/*
377 	 * irq will be enabled in ->smp_finish(), enabling it too early
378 	 * is dangerous.
379 	 */
380 	WARN_ON_ONCE(!irqs_disabled());
381 	mp_ops->smp_finish();
382 
383 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
384 }
385 
stop_this_cpu(void * dummy)386 static void stop_this_cpu(void *dummy)
387 {
388 	/*
389 	 * Remove this CPU:
390 	 */
391 
392 	set_cpu_online(smp_processor_id(), false);
393 	calculate_cpu_foreign_map();
394 	local_irq_disable();
395 	while (1);
396 }
397 
smp_send_stop(void)398 void smp_send_stop(void)
399 {
400 	smp_call_function(stop_this_cpu, NULL, 0);
401 }
402 
smp_cpus_done(unsigned int max_cpus)403 void __init smp_cpus_done(unsigned int max_cpus)
404 {
405 }
406 
407 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)408 void __init smp_prepare_cpus(unsigned int max_cpus)
409 {
410 	init_new_context(current, &init_mm);
411 	current_thread_info()->cpu = 0;
412 	mp_ops->prepare_cpus(max_cpus);
413 	set_cpu_sibling_map(0);
414 	set_cpu_core_map(0);
415 	calculate_cpu_foreign_map();
416 #ifndef CONFIG_HOTPLUG_CPU
417 	init_cpu_present(cpu_possible_mask);
418 #endif
419 	cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
420 }
421 
422 /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)423 void smp_prepare_boot_cpu(void)
424 {
425 	if (mp_ops->prepare_boot_cpu)
426 		mp_ops->prepare_boot_cpu();
427 	set_cpu_possible(0, true);
428 	set_cpu_online(0, true);
429 }
430 
__cpu_up(unsigned int cpu,struct task_struct * tidle)431 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
432 {
433 	int err;
434 
435 	err = mp_ops->boot_secondary(cpu, tidle);
436 	if (err)
437 		return err;
438 
439 	/* Wait for CPU to start and be ready to sync counters */
440 	if (!wait_for_completion_timeout(&cpu_starting,
441 					 msecs_to_jiffies(1000))) {
442 		pr_crit("CPU%u: failed to start\n", cpu);
443 		return -EIO;
444 	}
445 
446 	synchronise_count_master(cpu);
447 
448 	/* Wait for CPU to finish startup & mark itself online before return */
449 	wait_for_completion(&cpu_running);
450 	return 0;
451 }
452 
453 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)454 int setup_profiling_timer(unsigned int multiplier)
455 {
456 	return 0;
457 }
458 
flush_tlb_all_ipi(void * info)459 static void flush_tlb_all_ipi(void *info)
460 {
461 	local_flush_tlb_all();
462 }
463 
flush_tlb_all(void)464 void flush_tlb_all(void)
465 {
466 	if (cpu_has_mmid) {
467 		htw_stop();
468 		ginvt_full();
469 		sync_ginv();
470 		instruction_hazard();
471 		htw_start();
472 		return;
473 	}
474 
475 	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
476 }
477 
flush_tlb_mm_ipi(void * mm)478 static void flush_tlb_mm_ipi(void *mm)
479 {
480 	drop_mmu_context((struct mm_struct *)mm);
481 }
482 
483 /*
484  * Special Variant of smp_call_function for use by TLB functions:
485  *
486  *  o No return value
487  *  o collapses to normal function call on UP kernels
488  *  o collapses to normal function call on systems with a single shared
489  *    primary cache.
490  */
smp_on_other_tlbs(void (* func)(void * info),void * info)491 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
492 {
493 	smp_call_function(func, info, 1);
494 }
495 
smp_on_each_tlb(void (* func)(void * info),void * info)496 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
497 {
498 	preempt_disable();
499 
500 	smp_on_other_tlbs(func, info);
501 	func(info);
502 
503 	preempt_enable();
504 }
505 
506 /*
507  * The following tlb flush calls are invoked when old translations are
508  * being torn down, or pte attributes are changing. For single threaded
509  * address spaces, a new context is obtained on the current cpu, and tlb
510  * context on other cpus are invalidated to force a new context allocation
511  * at switch_mm time, should the mm ever be used on other cpus. For
512  * multithreaded address spaces, intercpu interrupts have to be sent.
513  * Another case where intercpu interrupts are required is when the target
514  * mm might be active on another cpu (eg debuggers doing the flushes on
515  * behalf of debugees, kswapd stealing pages from another process etc).
516  * Kanoj 07/00.
517  */
518 
flush_tlb_mm(struct mm_struct * mm)519 void flush_tlb_mm(struct mm_struct *mm)
520 {
521 	preempt_disable();
522 
523 	if (cpu_has_mmid) {
524 		/*
525 		 * No need to worry about other CPUs - the ginvt in
526 		 * drop_mmu_context() will be globalized.
527 		 */
528 	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
529 		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
530 	} else {
531 		unsigned int cpu;
532 
533 		for_each_online_cpu(cpu) {
534 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
535 				set_cpu_context(cpu, mm, 0);
536 		}
537 	}
538 	drop_mmu_context(mm);
539 
540 	preempt_enable();
541 }
542 
543 struct flush_tlb_data {
544 	struct vm_area_struct *vma;
545 	unsigned long addr1;
546 	unsigned long addr2;
547 };
548 
flush_tlb_range_ipi(void * info)549 static void flush_tlb_range_ipi(void *info)
550 {
551 	struct flush_tlb_data *fd = info;
552 
553 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
554 }
555 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)556 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
557 {
558 	struct mm_struct *mm = vma->vm_mm;
559 	unsigned long addr;
560 	u32 old_mmid;
561 
562 	preempt_disable();
563 	if (cpu_has_mmid) {
564 		htw_stop();
565 		old_mmid = read_c0_memorymapid();
566 		write_c0_memorymapid(cpu_asid(0, mm));
567 		mtc0_tlbw_hazard();
568 		addr = round_down(start, PAGE_SIZE * 2);
569 		end = round_up(end, PAGE_SIZE * 2);
570 		do {
571 			ginvt_va_mmid(addr);
572 			sync_ginv();
573 			addr += PAGE_SIZE * 2;
574 		} while (addr < end);
575 		write_c0_memorymapid(old_mmid);
576 		instruction_hazard();
577 		htw_start();
578 	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
579 		struct flush_tlb_data fd = {
580 			.vma = vma,
581 			.addr1 = start,
582 			.addr2 = end,
583 		};
584 
585 		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
586 		local_flush_tlb_range(vma, start, end);
587 	} else {
588 		unsigned int cpu;
589 		int exec = vma->vm_flags & VM_EXEC;
590 
591 		for_each_online_cpu(cpu) {
592 			/*
593 			 * flush_cache_range() will only fully flush icache if
594 			 * the VMA is executable, otherwise we must invalidate
595 			 * ASID without it appearing to has_valid_asid() as if
596 			 * mm has been completely unused by that CPU.
597 			 */
598 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
599 				set_cpu_context(cpu, mm, !exec);
600 		}
601 		local_flush_tlb_range(vma, start, end);
602 	}
603 	preempt_enable();
604 }
605 
flush_tlb_kernel_range_ipi(void * info)606 static void flush_tlb_kernel_range_ipi(void *info)
607 {
608 	struct flush_tlb_data *fd = info;
609 
610 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
611 }
612 
flush_tlb_kernel_range(unsigned long start,unsigned long end)613 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
614 {
615 	struct flush_tlb_data fd = {
616 		.addr1 = start,
617 		.addr2 = end,
618 	};
619 
620 	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
621 }
622 
flush_tlb_page_ipi(void * info)623 static void flush_tlb_page_ipi(void *info)
624 {
625 	struct flush_tlb_data *fd = info;
626 
627 	local_flush_tlb_page(fd->vma, fd->addr1);
628 }
629 
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)630 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
631 {
632 	u32 old_mmid;
633 
634 	preempt_disable();
635 	if (cpu_has_mmid) {
636 		htw_stop();
637 		old_mmid = read_c0_memorymapid();
638 		write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
639 		mtc0_tlbw_hazard();
640 		ginvt_va_mmid(page);
641 		sync_ginv();
642 		write_c0_memorymapid(old_mmid);
643 		instruction_hazard();
644 		htw_start();
645 	} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
646 		   (current->mm != vma->vm_mm)) {
647 		struct flush_tlb_data fd = {
648 			.vma = vma,
649 			.addr1 = page,
650 		};
651 
652 		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
653 		local_flush_tlb_page(vma, page);
654 	} else {
655 		unsigned int cpu;
656 
657 		for_each_online_cpu(cpu) {
658 			/*
659 			 * flush_cache_page() only does partial flushes, so
660 			 * invalidate ASID without it appearing to
661 			 * has_valid_asid() as if mm has been completely unused
662 			 * by that CPU.
663 			 */
664 			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
665 				set_cpu_context(cpu, vma->vm_mm, 1);
666 		}
667 		local_flush_tlb_page(vma, page);
668 	}
669 	preempt_enable();
670 }
671 
flush_tlb_one_ipi(void * info)672 static void flush_tlb_one_ipi(void *info)
673 {
674 	unsigned long vaddr = (unsigned long) info;
675 
676 	local_flush_tlb_one(vaddr);
677 }
678 
flush_tlb_one(unsigned long vaddr)679 void flush_tlb_one(unsigned long vaddr)
680 {
681 	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
682 }
683 
684 EXPORT_SYMBOL(flush_tlb_page);
685 EXPORT_SYMBOL(flush_tlb_one);
686 
687 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
688 
689 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
690 
tick_broadcast(const struct cpumask * mask)691 void tick_broadcast(const struct cpumask *mask)
692 {
693 	call_single_data_t *csd;
694 	int cpu;
695 
696 	for_each_cpu(cpu, mask) {
697 		csd = &per_cpu(tick_broadcast_csd, cpu);
698 		smp_call_function_single_async(cpu, csd);
699 	}
700 }
701 
tick_broadcast_callee(void * info)702 static void tick_broadcast_callee(void *info)
703 {
704 	tick_receive_broadcast();
705 }
706 
tick_broadcast_init(void)707 static int __init tick_broadcast_init(void)
708 {
709 	call_single_data_t *csd;
710 	int cpu;
711 
712 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
713 		csd = &per_cpu(tick_broadcast_csd, cpu);
714 		csd->func = tick_broadcast_callee;
715 	}
716 
717 	return 0;
718 }
719 early_initcall(tick_broadcast_init);
720 
721 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
722