1 /*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/irq.h>
19 #include <asm/cpuinfo.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
23 #include <asm/time.h>
24
25 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
26
27 unsigned long secondary_release = -1;
28 struct thread_info *secondary_thread_info;
29
30 enum ipi_msg_type {
31 IPI_WAKEUP,
32 IPI_RESCHEDULE,
33 IPI_CALL_FUNC,
34 IPI_CALL_FUNC_SINGLE,
35 };
36
37 static DEFINE_SPINLOCK(boot_lock);
38
boot_secondary(unsigned int cpu,struct task_struct * idle)39 static void boot_secondary(unsigned int cpu, struct task_struct *idle)
40 {
41 /*
42 * set synchronisation state between this boot processor
43 * and the secondary one
44 */
45 spin_lock(&boot_lock);
46
47 secondary_release = cpu;
48 smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
49
50 /*
51 * now the secondary core is starting up let it run its
52 * calibrations, then wait for it to finish
53 */
54 spin_unlock(&boot_lock);
55 }
56
smp_prepare_boot_cpu(void)57 void __init smp_prepare_boot_cpu(void)
58 {
59 }
60
smp_init_cpus(void)61 void __init smp_init_cpus(void)
62 {
63 int i;
64
65 for (i = 0; i < NR_CPUS; i++)
66 set_cpu_possible(i, true);
67 }
68
smp_prepare_cpus(unsigned int max_cpus)69 void __init smp_prepare_cpus(unsigned int max_cpus)
70 {
71 int i;
72
73 /*
74 * Initialise the present map, which describes the set of CPUs
75 * actually populated at the present time.
76 */
77 for (i = 0; i < max_cpus; i++)
78 set_cpu_present(i, true);
79 }
80
smp_cpus_done(unsigned int max_cpus)81 void __init smp_cpus_done(unsigned int max_cpus)
82 {
83 }
84
85 static DECLARE_COMPLETION(cpu_running);
86
__cpu_up(unsigned int cpu,struct task_struct * idle)87 int __cpu_up(unsigned int cpu, struct task_struct *idle)
88 {
89 if (smp_cross_call == NULL) {
90 pr_warn("CPU%u: failed to start, IPI controller missing",
91 cpu);
92 return -EIO;
93 }
94
95 secondary_thread_info = task_thread_info(idle);
96 current_pgd[cpu] = init_mm.pgd;
97
98 boot_secondary(cpu, idle);
99 if (!wait_for_completion_timeout(&cpu_running,
100 msecs_to_jiffies(1000))) {
101 pr_crit("CPU%u: failed to start\n", cpu);
102 return -EIO;
103 }
104 synchronise_count_master(cpu);
105
106 return 0;
107 }
108
secondary_start_kernel(void)109 asmlinkage __init void secondary_start_kernel(void)
110 {
111 struct mm_struct *mm = &init_mm;
112 unsigned int cpu = smp_processor_id();
113 /*
114 * All kernel threads share the same mm context; grab a
115 * reference and switch to it.
116 */
117 mmgrab(mm);
118 current->active_mm = mm;
119 cpumask_set_cpu(cpu, mm_cpumask(mm));
120
121 pr_info("CPU%u: Booted secondary processor\n", cpu);
122
123 setup_cpuinfo();
124 openrisc_clockevent_init();
125
126 notify_cpu_starting(cpu);
127
128 /*
129 * OK, now it's safe to let the boot CPU continue
130 */
131 complete(&cpu_running);
132
133 synchronise_count_slave(cpu);
134 set_cpu_online(cpu, true);
135
136 local_irq_enable();
137 /*
138 * OK, it's off to the idle thread for us
139 */
140 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
141 }
142
handle_IPI(unsigned int ipi_msg)143 void handle_IPI(unsigned int ipi_msg)
144 {
145 unsigned int cpu = smp_processor_id();
146
147 switch (ipi_msg) {
148 case IPI_WAKEUP:
149 break;
150
151 case IPI_RESCHEDULE:
152 scheduler_ipi();
153 break;
154
155 case IPI_CALL_FUNC:
156 generic_smp_call_function_interrupt();
157 break;
158
159 case IPI_CALL_FUNC_SINGLE:
160 generic_smp_call_function_single_interrupt();
161 break;
162
163 default:
164 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
165 break;
166 }
167 }
168
smp_send_reschedule(int cpu)169 void smp_send_reschedule(int cpu)
170 {
171 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
172 }
173
stop_this_cpu(void * dummy)174 static void stop_this_cpu(void *dummy)
175 {
176 /* Remove this CPU */
177 set_cpu_online(smp_processor_id(), false);
178
179 local_irq_disable();
180 /* CPU Doze */
181 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
182 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
183 /* If that didn't work, infinite loop */
184 while (1)
185 ;
186 }
187
smp_send_stop(void)188 void smp_send_stop(void)
189 {
190 smp_call_function(stop_this_cpu, NULL, 0);
191 }
192
193 /* not supported, yet */
setup_profiling_timer(unsigned int multiplier)194 int setup_profiling_timer(unsigned int multiplier)
195 {
196 return -EINVAL;
197 }
198
set_smp_cross_call(void (* fn)(const struct cpumask *,unsigned int))199 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
200 {
201 smp_cross_call = fn;
202 }
203
arch_send_call_function_single_ipi(int cpu)204 void arch_send_call_function_single_ipi(int cpu)
205 {
206 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
207 }
208
arch_send_call_function_ipi_mask(const struct cpumask * mask)209 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
210 {
211 smp_cross_call(mask, IPI_CALL_FUNC);
212 }
213
214 /* TLB flush operations - Performed on each CPU*/
ipi_flush_tlb_all(void * ignored)215 static inline void ipi_flush_tlb_all(void *ignored)
216 {
217 local_flush_tlb_all();
218 }
219
ipi_flush_tlb_mm(void * info)220 static inline void ipi_flush_tlb_mm(void *info)
221 {
222 struct mm_struct *mm = (struct mm_struct *)info;
223
224 local_flush_tlb_mm(mm);
225 }
226
smp_flush_tlb_mm(struct cpumask * cmask,struct mm_struct * mm)227 static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
228 {
229 unsigned int cpuid;
230
231 if (cpumask_empty(cmask))
232 return;
233
234 cpuid = get_cpu();
235
236 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
237 /* local cpu is the only cpu present in cpumask */
238 local_flush_tlb_mm(mm);
239 } else {
240 on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
241 }
242 put_cpu();
243 }
244
245 struct flush_tlb_data {
246 unsigned long addr1;
247 unsigned long addr2;
248 };
249
ipi_flush_tlb_page(void * info)250 static inline void ipi_flush_tlb_page(void *info)
251 {
252 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
253
254 local_flush_tlb_page(NULL, fd->addr1);
255 }
256
ipi_flush_tlb_range(void * info)257 static inline void ipi_flush_tlb_range(void *info)
258 {
259 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
260
261 local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
262 }
263
smp_flush_tlb_range(const struct cpumask * cmask,unsigned long start,unsigned long end)264 static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
265 unsigned long end)
266 {
267 unsigned int cpuid;
268
269 if (cpumask_empty(cmask))
270 return;
271
272 cpuid = get_cpu();
273
274 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
275 /* local cpu is the only cpu present in cpumask */
276 if ((end - start) <= PAGE_SIZE)
277 local_flush_tlb_page(NULL, start);
278 else
279 local_flush_tlb_range(NULL, start, end);
280 } else {
281 struct flush_tlb_data fd;
282
283 fd.addr1 = start;
284 fd.addr2 = end;
285
286 if ((end - start) <= PAGE_SIZE)
287 on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
288 else
289 on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
290 }
291 put_cpu();
292 }
293
flush_tlb_all(void)294 void flush_tlb_all(void)
295 {
296 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
297 }
298
flush_tlb_mm(struct mm_struct * mm)299 void flush_tlb_mm(struct mm_struct *mm)
300 {
301 smp_flush_tlb_mm(mm_cpumask(mm), mm);
302 }
303
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)304 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
305 {
306 smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
307 }
308
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)309 void flush_tlb_range(struct vm_area_struct *vma,
310 unsigned long start, unsigned long end)
311 {
312 const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
313 : cpu_online_mask;
314 smp_flush_tlb_range(cmask, start, end);
315 }
316
317 /* Instruction cache invalidate - performed on each cpu */
ipi_icache_page_inv(void * arg)318 static void ipi_icache_page_inv(void *arg)
319 {
320 struct page *page = arg;
321
322 local_icache_page_inv(page);
323 }
324
smp_icache_page_inv(struct page * page)325 void smp_icache_page_inv(struct page *page)
326 {
327 on_each_cpu(ipi_icache_page_inv, page, 1);
328 }
329 EXPORT_SYMBOL(smp_icache_page_inv);
330