1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PARISC Architecture-dependent parts of process handling
4 * based on the work for i386
5 *
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
19 */
20 #include <linux/elf.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/fs.h>
25 #include <linux/cpu.h>
26 #include <linux/module.h>
27 #include <linux/personality.h>
28 #include <linux/ptrace.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/sched/task.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stddef.h>
35 #include <linux/unistd.h>
36 #include <linux/kallsyms.h>
37 #include <linux/uaccess.h>
38 #include <linux/rcupdate.h>
39 #include <linux/random.h>
40 #include <linux/nmi.h>
41
42 #include <asm/io.h>
43 #include <asm/asm-offsets.h>
44 #include <asm/assembly.h>
45 #include <asm/pdc.h>
46 #include <asm/pdc_chassis.h>
47 #include <asm/unwind.h>
48 #include <asm/sections.h>
49
50 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
51 #define CMD_RESET 5 /* reset any module */
52
53 /*
54 ** The Wright Brothers and Gecko systems have a H/W problem
55 ** (Lasi...'nuf said) may cause a broadcast reset to lockup
56 ** the system. An HVERSION dependent PDC call was developed
57 ** to perform a "safe", platform specific broadcast reset instead
58 ** of kludging up all the code.
59 **
60 ** Older machines which do not implement PDC_BROADCAST_RESET will
61 ** return (with an error) and the regular broadcast reset can be
62 ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
63 ** the PDC call will not return (the system will be reset).
64 */
machine_restart(char * cmd)65 void machine_restart(char *cmd)
66 {
67 #ifdef FASTBOOT_SELFTEST_SUPPORT
68 /*
69 ** If user has modified the Firmware Selftest Bitmap,
70 ** run the tests specified in the bitmap after the
71 ** system is rebooted w/PDC_DO_RESET.
72 **
73 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
74 **
75 ** Using "directed resets" at each processor with the MEM_TOC
76 ** vector cleared will also avoid running destructive
77 ** memory self tests. (Not implemented yet)
78 */
79 if (ftc_bitmap) {
80 pdc_do_firm_test_reset(ftc_bitmap);
81 }
82 #endif
83 /* set up a new led state on systems shipped with a LED State panel */
84 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
85
86 /* "Normal" system reset */
87 pdc_do_reset();
88
89 /* Nope...box should reset with just CMD_RESET now */
90 gsc_writel(CMD_RESET, COMMAND_GLOBAL);
91
92 /* Wait for RESET to lay us to rest. */
93 while (1) ;
94
95 }
96
97 void (*chassis_power_off)(void);
98
99 /*
100 * This routine is called from sys_reboot to actually turn off the
101 * machine
102 */
machine_power_off(void)103 void machine_power_off(void)
104 {
105 /* If there is a registered power off handler, call it. */
106 if (chassis_power_off)
107 chassis_power_off();
108
109 /* Put the soft power button back under hardware control.
110 * If the user had already pressed the power button, the
111 * following call will immediately power off. */
112 pdc_soft_power_button(0);
113
114 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
115
116 /* ipmi_poweroff may have been installed. */
117 if (pm_power_off)
118 pm_power_off();
119
120 /* It seems we have no way to power the system off via
121 * software. The user has to press the button himself. */
122
123 printk("Power off or press RETURN to reboot.\n");
124
125 /* prevent soft lockup/stalled CPU messages for endless loop. */
126 rcu_sysrq_start();
127 lockup_detector_soft_poweroff();
128 while (1) {
129 /* reboot if user presses RETURN key */
130 if (pdc_iodc_getc() == 13) {
131 printk("Rebooting...\n");
132 machine_restart(NULL);
133 }
134 }
135 }
136
137 void (*pm_power_off)(void);
138 EXPORT_SYMBOL(pm_power_off);
139
machine_halt(void)140 void machine_halt(void)
141 {
142 machine_power_off();
143 }
144
flush_thread(void)145 void flush_thread(void)
146 {
147 /* Only needs to handle fpu stuff or perf monitors.
148 ** REVISIT: several arches implement a "lazy fpu state".
149 */
150 }
151
release_thread(struct task_struct * dead_task)152 void release_thread(struct task_struct *dead_task)
153 {
154 }
155
156 /*
157 * Idle thread support
158 *
159 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
160 * QEMU idle the host too.
161 */
162
163 int running_on_qemu __ro_after_init;
164 EXPORT_SYMBOL(running_on_qemu);
165
arch_cpu_idle_dead(void)166 void __cpuidle arch_cpu_idle_dead(void)
167 {
168 /* nop on real hardware, qemu will offline CPU. */
169 asm volatile("or %%r31,%%r31,%%r31\n":::);
170 }
171
arch_cpu_idle(void)172 void __cpuidle arch_cpu_idle(void)
173 {
174 raw_local_irq_enable();
175
176 /* nop on real hardware, qemu will idle sleep. */
177 asm volatile("or %%r10,%%r10,%%r10\n":::);
178 }
179
parisc_idle_init(void)180 static int __init parisc_idle_init(void)
181 {
182 if (!running_on_qemu)
183 cpu_idle_poll_ctrl(1);
184
185 return 0;
186 }
187 arch_initcall(parisc_idle_init);
188
189 /*
190 * Copy architecture-specific thread state
191 */
192 int
copy_thread(unsigned long clone_flags,unsigned long usp,unsigned long kthread_arg,struct task_struct * p,unsigned long tls)193 copy_thread(unsigned long clone_flags, unsigned long usp,
194 unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
195 {
196 struct pt_regs *cregs = &(p->thread.regs);
197 void *stack = task_stack_page(p);
198
199 /* We have to use void * instead of a function pointer, because
200 * function pointers aren't a pointer to the function on 64-bit.
201 * Make them const so the compiler knows they live in .text */
202 extern void * const ret_from_kernel_thread;
203 extern void * const child_return;
204
205 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
206 /* kernel thread */
207 memset(cregs, 0, sizeof(struct pt_regs));
208 if (!usp) /* idle thread */
209 return 0;
210 /* Must exit via ret_from_kernel_thread in order
211 * to call schedule_tail()
212 */
213 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
214 cregs->kpc = (unsigned long) &ret_from_kernel_thread;
215 /*
216 * Copy function and argument to be called from
217 * ret_from_kernel_thread.
218 */
219 #ifdef CONFIG_64BIT
220 cregs->gr[27] = ((unsigned long *)usp)[3];
221 cregs->gr[26] = ((unsigned long *)usp)[2];
222 #else
223 cregs->gr[26] = usp;
224 #endif
225 cregs->gr[25] = kthread_arg;
226 } else {
227 /* user thread */
228 /* usp must be word aligned. This also prevents users from
229 * passing in the value 1 (which is the signal for a special
230 * return for a kernel thread) */
231 if (usp) {
232 usp = ALIGN(usp, 4);
233 if (likely(usp))
234 cregs->gr[30] = usp;
235 }
236 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
237 cregs->kpc = (unsigned long) &child_return;
238
239 /* Setup thread TLS area */
240 if (clone_flags & CLONE_SETTLS)
241 cregs->cr27 = tls;
242 }
243
244 return 0;
245 }
246
247 unsigned long
get_wchan(struct task_struct * p)248 get_wchan(struct task_struct *p)
249 {
250 struct unwind_frame_info info;
251 unsigned long ip;
252 int count = 0;
253
254 if (!p || p == current || task_is_running(p))
255 return 0;
256
257 /*
258 * These bracket the sleeping functions..
259 */
260
261 unwind_frame_init_from_blocked_task(&info, p);
262 do {
263 if (unwind_once(&info) < 0)
264 return 0;
265 if (task_is_running(p))
266 return 0;
267 ip = info.ip;
268 if (!in_sched_functions(ip))
269 return ip;
270 } while (count++ < MAX_UNWIND_ENTRIES);
271 return 0;
272 }
273
274 #ifdef CONFIG_64BIT
dereference_function_descriptor(void * ptr)275 void *dereference_function_descriptor(void *ptr)
276 {
277 Elf64_Fdesc *desc = ptr;
278 void *p;
279
280 if (!get_kernel_nofault(p, (void *)&desc->addr))
281 ptr = p;
282 return ptr;
283 }
284
dereference_kernel_function_descriptor(void * ptr)285 void *dereference_kernel_function_descriptor(void *ptr)
286 {
287 if (ptr < (void *)__start_opd ||
288 ptr >= (void *)__end_opd)
289 return ptr;
290
291 return dereference_function_descriptor(ptr);
292 }
293 #endif
294
brk_rnd(void)295 static inline unsigned long brk_rnd(void)
296 {
297 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
298 }
299
arch_randomize_brk(struct mm_struct * mm)300 unsigned long arch_randomize_brk(struct mm_struct *mm)
301 {
302 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
303
304 if (ret < mm->brk)
305 return mm->brk;
306 return ret;
307 }
308