• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
31 
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35 
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
42 
43 #include "proto.h"
44 #include "irq_impl.h"
45 
46 
47 #define DEBUG_SMP 0
48 #if DEBUG_SMP
49 #define DBGS(args)	printk args
50 #else
51 #define DBGS(args)
52 #endif
53 
54 /* A collection of per-processor data.  */
55 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 EXPORT_SYMBOL(cpu_data);
57 
58 /* A collection of single bit ipi messages.  */
59 static struct {
60 	unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
62 
63 enum ipi_message_type {
64 	IPI_RESCHEDULE,
65 	IPI_CALL_FUNC,
66 	IPI_CALL_FUNC_SINGLE,
67 	IPI_CPU_STOP,
68 };
69 
70 /* Set to a secondary's cpuid when it comes online.  */
71 static int smp_secondary_alive __devinitdata = 0;
72 
73 int smp_num_probed;		/* Internal processor count */
74 int smp_num_cpus = 1;		/* Number that came online.  */
75 EXPORT_SYMBOL(smp_num_cpus);
76 
77 /*
78  * Called by both boot and secondaries to move global data into
79  *  per-processor storage.
80  */
81 static inline void __init
smp_store_cpu_info(int cpuid)82 smp_store_cpu_info(int cpuid)
83 {
84 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
85 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
86 	cpu_data[cpuid].need_new_asn = 0;
87 	cpu_data[cpuid].asn_lock = 0;
88 }
89 
90 /*
91  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
92  */
93 static inline void __init
smp_setup_percpu_timer(int cpuid)94 smp_setup_percpu_timer(int cpuid)
95 {
96 	cpu_data[cpuid].prof_counter = 1;
97 	cpu_data[cpuid].prof_multiplier = 1;
98 }
99 
100 static void __init
wait_boot_cpu_to_stop(int cpuid)101 wait_boot_cpu_to_stop(int cpuid)
102 {
103 	unsigned long stop = jiffies + 10*HZ;
104 
105 	while (time_before(jiffies, stop)) {
106 	        if (!smp_secondary_alive)
107 			return;
108 		barrier();
109 	}
110 
111 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
112 	for (;;)
113 		barrier();
114 }
115 
116 /*
117  * Where secondaries begin a life of C.
118  */
119 void __cpuinit
smp_callin(void)120 smp_callin(void)
121 {
122 	int cpuid = hard_smp_processor_id();
123 
124 	if (cpu_online(cpuid)) {
125 		printk("??, cpu 0x%x already present??\n", cpuid);
126 		BUG();
127 	}
128 	set_cpu_online(cpuid, true);
129 
130 	/* Turn on machine checks.  */
131 	wrmces(7);
132 
133 	/* Set trap vectors.  */
134 	trap_init();
135 
136 	/* Set interrupt vector.  */
137 	wrent(entInt, 0);
138 
139 	/* Get our local ticker going. */
140 	smp_setup_percpu_timer(cpuid);
141 
142 	/* Call platform-specific callin, if specified */
143 	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
144 
145 	/* All kernel threads share the same mm context.  */
146 	atomic_inc(&init_mm.mm_count);
147 	current->active_mm = &init_mm;
148 
149 	/* inform the notifiers about the new cpu */
150 	notify_cpu_starting(cpuid);
151 
152 	/* Must have completely accurate bogos.  */
153 	local_irq_enable();
154 
155 	/* Wait boot CPU to stop with irq enabled before running
156 	   calibrate_delay. */
157 	wait_boot_cpu_to_stop(cpuid);
158 	mb();
159 	calibrate_delay();
160 
161 	smp_store_cpu_info(cpuid);
162 	/* Allow master to continue only after we written loops_per_jiffy.  */
163 	wmb();
164 	smp_secondary_alive = 1;
165 
166 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167 	      cpuid, current, current->active_mm));
168 
169 	/* Do nothing.  */
170 	cpu_idle();
171 }
172 
173 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
174 static int __devinit
wait_for_txrdy(unsigned long cpumask)175 wait_for_txrdy (unsigned long cpumask)
176 {
177 	unsigned long timeout;
178 
179 	if (!(hwrpb->txrdy & cpumask))
180 		return 0;
181 
182 	timeout = jiffies + 10*HZ;
183 	while (time_before(jiffies, timeout)) {
184 		if (!(hwrpb->txrdy & cpumask))
185 			return 0;
186 		udelay(10);
187 		barrier();
188 	}
189 
190 	return -1;
191 }
192 
193 /*
194  * Send a message to a secondary's console.  "START" is one such
195  * interesting message.  ;-)
196  */
197 static void __cpuinit
send_secondary_console_msg(char * str,int cpuid)198 send_secondary_console_msg(char *str, int cpuid)
199 {
200 	struct percpu_struct *cpu;
201 	register char *cp1, *cp2;
202 	unsigned long cpumask;
203 	size_t len;
204 
205 	cpu = (struct percpu_struct *)
206 		((char*)hwrpb
207 		 + hwrpb->processor_offset
208 		 + cpuid * hwrpb->processor_size);
209 
210 	cpumask = (1UL << cpuid);
211 	if (wait_for_txrdy(cpumask))
212 		goto timeout;
213 
214 	cp2 = str;
215 	len = strlen(cp2);
216 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
217 	cp1 = (char *) &cpu->ipc_buffer[1];
218 	memcpy(cp1, cp2, len);
219 
220 	/* atomic test and set */
221 	wmb();
222 	set_bit(cpuid, &hwrpb->rxrdy);
223 
224 	if (wait_for_txrdy(cpumask))
225 		goto timeout;
226 	return;
227 
228  timeout:
229 	printk("Processor %x not ready\n", cpuid);
230 }
231 
232 /*
233  * A secondary console wants to send a message.  Receive it.
234  */
235 static void
recv_secondary_console_msg(void)236 recv_secondary_console_msg(void)
237 {
238 	int mycpu, i, cnt;
239 	unsigned long txrdy = hwrpb->txrdy;
240 	char *cp1, *cp2, buf[80];
241 	struct percpu_struct *cpu;
242 
243 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
244 
245 	mycpu = hard_smp_processor_id();
246 
247 	for (i = 0; i < NR_CPUS; i++) {
248 		if (!(txrdy & (1UL << i)))
249 			continue;
250 
251 		DBGS(("recv_secondary_console_msg: "
252 		      "TXRDY contains CPU %d.\n", i));
253 
254 		cpu = (struct percpu_struct *)
255 		  ((char*)hwrpb
256 		   + hwrpb->processor_offset
257 		   + i * hwrpb->processor_size);
258 
259  		DBGS(("recv_secondary_console_msg: on %d from %d"
260 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
261 		      mycpu, i, cpu->halt_reason, cpu->flags));
262 
263 		cnt = cpu->ipc_buffer[0] >> 32;
264 		if (cnt <= 0 || cnt >= 80)
265 			strcpy(buf, "<<< BOGUS MSG >>>");
266 		else {
267 			cp1 = (char *) &cpu->ipc_buffer[11];
268 			cp2 = buf;
269 			strcpy(cp2, cp1);
270 
271 			while ((cp2 = strchr(cp2, '\r')) != 0) {
272 				*cp2 = ' ';
273 				if (cp2[1] == '\n')
274 					cp2[1] = ' ';
275 			}
276 		}
277 
278 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
279 		      "message is '%s'\n", mycpu, buf));
280 	}
281 
282 	hwrpb->txrdy = 0;
283 }
284 
285 /*
286  * Convince the console to have a secondary cpu begin execution.
287  */
288 static int __cpuinit
secondary_cpu_start(int cpuid,struct task_struct * idle)289 secondary_cpu_start(int cpuid, struct task_struct *idle)
290 {
291 	struct percpu_struct *cpu;
292 	struct pcb_struct *hwpcb, *ipcb;
293 	unsigned long timeout;
294 
295 	cpu = (struct percpu_struct *)
296 		((char*)hwrpb
297 		 + hwrpb->processor_offset
298 		 + cpuid * hwrpb->processor_size);
299 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
300 	ipcb = &task_thread_info(idle)->pcb;
301 
302 	/* Initialize the CPU's HWPCB to something just good enough for
303 	   us to get started.  Immediately after starting, we'll swpctx
304 	   to the target idle task's pcb.  Reuse the stack in the mean
305 	   time.  Precalculate the target PCBB.  */
306 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
307 	hwpcb->usp = 0;
308 	hwpcb->ptbr = ipcb->ptbr;
309 	hwpcb->pcc = 0;
310 	hwpcb->asn = 0;
311 	hwpcb->unique = virt_to_phys(ipcb);
312 	hwpcb->flags = ipcb->flags;
313 	hwpcb->res1 = hwpcb->res2 = 0;
314 
315 #if 0
316 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
317 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
318 #endif
319 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
320 	      cpuid, idle->state, ipcb->flags));
321 
322 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
323 	hwrpb->CPU_restart = __smp_callin;
324 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
325 
326 	/* Recalculate and update the HWRPB checksum */
327 	hwrpb_update_checksum(hwrpb);
328 
329 	/*
330 	 * Send a "start" command to the specified processor.
331 	 */
332 
333 	/* SRM III 3.4.1.3 */
334 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
335 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
336 	wmb();
337 
338 	send_secondary_console_msg("START\r\n", cpuid);
339 
340 	/* Wait 10 seconds for an ACK from the console.  */
341 	timeout = jiffies + 10*HZ;
342 	while (time_before(jiffies, timeout)) {
343 		if (cpu->flags & 1)
344 			goto started;
345 		udelay(10);
346 		barrier();
347 	}
348 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
349 	return -1;
350 
351  started:
352 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
353 	return 0;
354 }
355 
356 /*
357  * Bring one cpu online.
358  */
359 static int __cpuinit
smp_boot_one_cpu(int cpuid)360 smp_boot_one_cpu(int cpuid)
361 {
362 	struct task_struct *idle;
363 	unsigned long timeout;
364 
365 	/* Cook up an idler for this guy.  Note that the address we
366 	   give to kernel_thread is irrelevant -- it's going to start
367 	   where HWRPB.CPU_restart says to start.  But this gets all
368 	   the other task-y sort of data structures set up like we
369 	   wish.  We can't use kernel_thread since we must avoid
370 	   rescheduling the child.  */
371 	idle = fork_idle(cpuid);
372 	if (IS_ERR(idle))
373 		panic("failed fork for CPU %d", cpuid);
374 
375 	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
376 	      cpuid, idle->state, idle->flags));
377 
378 	/* Signal the secondary to wait a moment.  */
379 	smp_secondary_alive = -1;
380 
381 	/* Whirrr, whirrr, whirrrrrrrrr... */
382 	if (secondary_cpu_start(cpuid, idle))
383 		return -1;
384 
385 	/* Notify the secondary CPU it can run calibrate_delay.  */
386 	mb();
387 	smp_secondary_alive = 0;
388 
389 	/* We've been acked by the console; wait one second for
390 	   the task to start up for real.  */
391 	timeout = jiffies + 1*HZ;
392 	while (time_before(jiffies, timeout)) {
393 		if (smp_secondary_alive == 1)
394 			goto alive;
395 		udelay(10);
396 		barrier();
397 	}
398 
399 	/* We failed to boot the CPU.  */
400 
401 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
402 	return -1;
403 
404  alive:
405 	/* Another "Red Snapper". */
406 	return 0;
407 }
408 
409 /*
410  * Called from setup_arch.  Detect an SMP system and which processors
411  * are present.
412  */
413 void __init
setup_smp(void)414 setup_smp(void)
415 {
416 	struct percpu_struct *cpubase, *cpu;
417 	unsigned long i;
418 
419 	if (boot_cpuid != 0) {
420 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
421 		       boot_cpuid);
422 	}
423 
424 	if (hwrpb->nr_processors > 1) {
425 		int boot_cpu_palrev;
426 
427 		DBGS(("setup_smp: nr_processors %ld\n",
428 		      hwrpb->nr_processors));
429 
430 		cpubase = (struct percpu_struct *)
431 			((char*)hwrpb + hwrpb->processor_offset);
432 		boot_cpu_palrev = cpubase->pal_revision;
433 
434 		for (i = 0; i < hwrpb->nr_processors; i++) {
435 			cpu = (struct percpu_struct *)
436 				((char *)cpubase + i*hwrpb->processor_size);
437 			if ((cpu->flags & 0x1cc) == 0x1cc) {
438 				smp_num_probed++;
439 				set_cpu_possible(i, true);
440 				set_cpu_present(i, true);
441 				cpu->pal_revision = boot_cpu_palrev;
442 			}
443 
444 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
445 			      i, cpu->flags, cpu->type));
446 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
447 			      i, cpu->pal_revision));
448 		}
449 	} else {
450 		smp_num_probed = 1;
451 	}
452 
453 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
454 	       smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
455 }
456 
457 /*
458  * Called by smp_init prepare the secondaries
459  */
460 void __init
smp_prepare_cpus(unsigned int max_cpus)461 smp_prepare_cpus(unsigned int max_cpus)
462 {
463 	/* Take care of some initial bookkeeping.  */
464 	memset(ipi_data, 0, sizeof(ipi_data));
465 
466 	current_thread_info()->cpu = boot_cpuid;
467 
468 	smp_store_cpu_info(boot_cpuid);
469 	smp_setup_percpu_timer(boot_cpuid);
470 
471 	/* Nothing to do on a UP box, or when told not to.  */
472 	if (smp_num_probed == 1 || max_cpus == 0) {
473 		init_cpu_possible(cpumask_of(boot_cpuid));
474 		init_cpu_present(cpumask_of(boot_cpuid));
475 		printk(KERN_INFO "SMP mode deactivated.\n");
476 		return;
477 	}
478 
479 	printk(KERN_INFO "SMP starting up secondaries.\n");
480 
481 	smp_num_cpus = smp_num_probed;
482 }
483 
484 void __devinit
smp_prepare_boot_cpu(void)485 smp_prepare_boot_cpu(void)
486 {
487 }
488 
489 int __cpuinit
__cpu_up(unsigned int cpu)490 __cpu_up(unsigned int cpu)
491 {
492 	smp_boot_one_cpu(cpu);
493 
494 	return cpu_online(cpu) ? 0 : -ENOSYS;
495 }
496 
497 void __init
smp_cpus_done(unsigned int max_cpus)498 smp_cpus_done(unsigned int max_cpus)
499 {
500 	int cpu;
501 	unsigned long bogosum = 0;
502 
503 	for(cpu = 0; cpu < NR_CPUS; cpu++)
504 		if (cpu_online(cpu))
505 			bogosum += cpu_data[cpu].loops_per_jiffy;
506 
507 	printk(KERN_INFO "SMP: Total of %d processors activated "
508 	       "(%lu.%02lu BogoMIPS).\n",
509 	       num_online_cpus(),
510 	       (bogosum + 2500) / (500000/HZ),
511 	       ((bogosum + 2500) / (5000/HZ)) % 100);
512 }
513 
514 
515 void
smp_percpu_timer_interrupt(struct pt_regs * regs)516 smp_percpu_timer_interrupt(struct pt_regs *regs)
517 {
518 	struct pt_regs *old_regs;
519 	int cpu = smp_processor_id();
520 	unsigned long user = user_mode(regs);
521 	struct cpuinfo_alpha *data = &cpu_data[cpu];
522 
523 	old_regs = set_irq_regs(regs);
524 
525 	/* Record kernel PC.  */
526 	profile_tick(CPU_PROFILING);
527 
528 	if (!--data->prof_counter) {
529 		/* We need to make like a normal interrupt -- otherwise
530 		   timer interrupts ignore the global interrupt lock,
531 		   which would be a Bad Thing.  */
532 		irq_enter();
533 
534 		update_process_times(user);
535 
536 		data->prof_counter = data->prof_multiplier;
537 
538 		irq_exit();
539 	}
540 	set_irq_regs(old_regs);
541 }
542 
543 int
setup_profiling_timer(unsigned int multiplier)544 setup_profiling_timer(unsigned int multiplier)
545 {
546 	return -EINVAL;
547 }
548 
549 
550 static void
send_ipi_message(const struct cpumask * to_whom,enum ipi_message_type operation)551 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
552 {
553 	int i;
554 
555 	mb();
556 	for_each_cpu(i, to_whom)
557 		set_bit(operation, &ipi_data[i].bits);
558 
559 	mb();
560 	for_each_cpu(i, to_whom)
561 		wripir(i);
562 }
563 
564 void
handle_ipi(struct pt_regs * regs)565 handle_ipi(struct pt_regs *regs)
566 {
567 	int this_cpu = smp_processor_id();
568 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
569 	unsigned long ops;
570 
571 #if 0
572 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
573 	      this_cpu, *pending_ipis, regs->pc));
574 #endif
575 
576 	mb();	/* Order interrupt and bit testing. */
577 	while ((ops = xchg(pending_ipis, 0)) != 0) {
578 	  mb();	/* Order bit clearing and data access. */
579 	  do {
580 		unsigned long which;
581 
582 		which = ops & -ops;
583 		ops &= ~which;
584 		which = __ffs(which);
585 
586 		switch (which) {
587 		case IPI_RESCHEDULE:
588 			scheduler_ipi();
589 			break;
590 
591 		case IPI_CALL_FUNC:
592 			generic_smp_call_function_interrupt();
593 			break;
594 
595 		case IPI_CALL_FUNC_SINGLE:
596 			generic_smp_call_function_single_interrupt();
597 			break;
598 
599 		case IPI_CPU_STOP:
600 			halt();
601 
602 		default:
603 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
604 			       this_cpu, which);
605 			break;
606 		}
607 	  } while (ops);
608 
609 	  mb();	/* Order data access and bit testing. */
610 	}
611 
612 	cpu_data[this_cpu].ipi_count++;
613 
614 	if (hwrpb->txrdy)
615 		recv_secondary_console_msg();
616 }
617 
618 void
smp_send_reschedule(int cpu)619 smp_send_reschedule(int cpu)
620 {
621 #ifdef DEBUG_IPI_MSG
622 	if (cpu == hard_smp_processor_id())
623 		printk(KERN_WARNING
624 		       "smp_send_reschedule: Sending IPI to self.\n");
625 #endif
626 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
627 }
628 
629 void
smp_send_stop(void)630 smp_send_stop(void)
631 {
632 	cpumask_t to_whom;
633 	cpumask_copy(&to_whom, cpu_possible_mask);
634 	cpumask_clear_cpu(smp_processor_id(), &to_whom);
635 #ifdef DEBUG_IPI_MSG
636 	if (hard_smp_processor_id() != boot_cpu_id)
637 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
638 #endif
639 	send_ipi_message(&to_whom, IPI_CPU_STOP);
640 }
641 
arch_send_call_function_ipi_mask(const struct cpumask * mask)642 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
643 {
644 	send_ipi_message(mask, IPI_CALL_FUNC);
645 }
646 
arch_send_call_function_single_ipi(int cpu)647 void arch_send_call_function_single_ipi(int cpu)
648 {
649 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
650 }
651 
652 static void
ipi_imb(void * ignored)653 ipi_imb(void *ignored)
654 {
655 	imb();
656 }
657 
658 void
smp_imb(void)659 smp_imb(void)
660 {
661 	/* Must wait other processors to flush their icache before continue. */
662 	if (on_each_cpu(ipi_imb, NULL, 1))
663 		printk(KERN_CRIT "smp_imb: timed out\n");
664 }
665 EXPORT_SYMBOL(smp_imb);
666 
667 static void
ipi_flush_tlb_all(void * ignored)668 ipi_flush_tlb_all(void *ignored)
669 {
670 	tbia();
671 }
672 
673 void
flush_tlb_all(void)674 flush_tlb_all(void)
675 {
676 	/* Although we don't have any data to pass, we do want to
677 	   synchronize with the other processors.  */
678 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
679 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
680 	}
681 }
682 
683 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
684 
685 static void
ipi_flush_tlb_mm(void * x)686 ipi_flush_tlb_mm(void *x)
687 {
688 	struct mm_struct *mm = (struct mm_struct *) x;
689 	if (mm == current->active_mm && !asn_locked())
690 		flush_tlb_current(mm);
691 	else
692 		flush_tlb_other(mm);
693 }
694 
695 void
flush_tlb_mm(struct mm_struct * mm)696 flush_tlb_mm(struct mm_struct *mm)
697 {
698 	preempt_disable();
699 
700 	if (mm == current->active_mm) {
701 		flush_tlb_current(mm);
702 		if (atomic_read(&mm->mm_users) <= 1) {
703 			int cpu, this_cpu = smp_processor_id();
704 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
705 				if (!cpu_online(cpu) || cpu == this_cpu)
706 					continue;
707 				if (mm->context[cpu])
708 					mm->context[cpu] = 0;
709 			}
710 			preempt_enable();
711 			return;
712 		}
713 	}
714 
715 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
716 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
717 	}
718 
719 	preempt_enable();
720 }
721 EXPORT_SYMBOL(flush_tlb_mm);
722 
723 struct flush_tlb_page_struct {
724 	struct vm_area_struct *vma;
725 	struct mm_struct *mm;
726 	unsigned long addr;
727 };
728 
729 static void
ipi_flush_tlb_page(void * x)730 ipi_flush_tlb_page(void *x)
731 {
732 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
733 	struct mm_struct * mm = data->mm;
734 
735 	if (mm == current->active_mm && !asn_locked())
736 		flush_tlb_current_page(mm, data->vma, data->addr);
737 	else
738 		flush_tlb_other(mm);
739 }
740 
741 void
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)742 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
743 {
744 	struct flush_tlb_page_struct data;
745 	struct mm_struct *mm = vma->vm_mm;
746 
747 	preempt_disable();
748 
749 	if (mm == current->active_mm) {
750 		flush_tlb_current_page(mm, vma, addr);
751 		if (atomic_read(&mm->mm_users) <= 1) {
752 			int cpu, this_cpu = smp_processor_id();
753 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
754 				if (!cpu_online(cpu) || cpu == this_cpu)
755 					continue;
756 				if (mm->context[cpu])
757 					mm->context[cpu] = 0;
758 			}
759 			preempt_enable();
760 			return;
761 		}
762 	}
763 
764 	data.vma = vma;
765 	data.mm = mm;
766 	data.addr = addr;
767 
768 	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
769 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
770 	}
771 
772 	preempt_enable();
773 }
774 EXPORT_SYMBOL(flush_tlb_page);
775 
776 void
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)777 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
778 {
779 	/* On the Alpha we always flush the whole user tlb.  */
780 	flush_tlb_mm(vma->vm_mm);
781 }
782 EXPORT_SYMBOL(flush_tlb_range);
783 
784 static void
ipi_flush_icache_page(void * x)785 ipi_flush_icache_page(void *x)
786 {
787 	struct mm_struct *mm = (struct mm_struct *) x;
788 	if (mm == current->active_mm && !asn_locked())
789 		__load_new_mm_context(mm);
790 	else
791 		flush_tlb_other(mm);
792 }
793 
794 void
flush_icache_user_range(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)795 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
796 			unsigned long addr, int len)
797 {
798 	struct mm_struct *mm = vma->vm_mm;
799 
800 	if ((vma->vm_flags & VM_EXEC) == 0)
801 		return;
802 
803 	preempt_disable();
804 
805 	if (mm == current->active_mm) {
806 		__load_new_mm_context(mm);
807 		if (atomic_read(&mm->mm_users) <= 1) {
808 			int cpu, this_cpu = smp_processor_id();
809 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
810 				if (!cpu_online(cpu) || cpu == this_cpu)
811 					continue;
812 				if (mm->context[cpu])
813 					mm->context[cpu] = 0;
814 			}
815 			preempt_enable();
816 			return;
817 		}
818 	}
819 
820 	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
821 		printk(KERN_CRIT "flush_icache_page: timed out\n");
822 	}
823 
824 	preempt_enable();
825 }
826