1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PARISC Architecture-dependent parts of process handling
4 * based on the work for i386
5 *
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
19 */
20
21 #include <stdarg.h>
22
23 #include <linux/elf.h>
24 #include <linux/errno.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/fs.h>
28 #include <linux/cpu.h>
29 #include <linux/module.h>
30 #include <linux/personality.h>
31 #include <linux/ptrace.h>
32 #include <linux/sched.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/task.h>
35 #include <linux/sched/task_stack.h>
36 #include <linux/slab.h>
37 #include <linux/stddef.h>
38 #include <linux/unistd.h>
39 #include <linux/kallsyms.h>
40 #include <linux/uaccess.h>
41 #include <linux/rcupdate.h>
42 #include <linux/random.h>
43 #include <linux/nmi.h>
44
45 #include <asm/io.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/assembly.h>
48 #include <asm/pdc.h>
49 #include <asm/pdc_chassis.h>
50 #include <asm/unwind.h>
51 #include <asm/sections.h>
52
53 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
54 #define CMD_RESET 5 /* reset any module */
55
56 /*
57 ** The Wright Brothers and Gecko systems have a H/W problem
58 ** (Lasi...'nuf said) may cause a broadcast reset to lockup
59 ** the system. An HVERSION dependent PDC call was developed
60 ** to perform a "safe", platform specific broadcast reset instead
61 ** of kludging up all the code.
62 **
63 ** Older machines which do not implement PDC_BROADCAST_RESET will
64 ** return (with an error) and the regular broadcast reset can be
65 ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
66 ** the PDC call will not return (the system will be reset).
67 */
machine_restart(char * cmd)68 void machine_restart(char *cmd)
69 {
70 #ifdef FASTBOOT_SELFTEST_SUPPORT
71 /*
72 ** If user has modified the Firmware Selftest Bitmap,
73 ** run the tests specified in the bitmap after the
74 ** system is rebooted w/PDC_DO_RESET.
75 **
76 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
77 **
78 ** Using "directed resets" at each processor with the MEM_TOC
79 ** vector cleared will also avoid running destructive
80 ** memory self tests. (Not implemented yet)
81 */
82 if (ftc_bitmap) {
83 pdc_do_firm_test_reset(ftc_bitmap);
84 }
85 #endif
86 /* set up a new led state on systems shipped with a LED State panel */
87 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
88
89 /* "Normal" system reset */
90 pdc_do_reset();
91
92 /* Nope...box should reset with just CMD_RESET now */
93 gsc_writel(CMD_RESET, COMMAND_GLOBAL);
94
95 /* Wait for RESET to lay us to rest. */
96 while (1) ;
97
98 }
99
100 void (*chassis_power_off)(void);
101
102 /*
103 * This routine is called from sys_reboot to actually turn off the
104 * machine
105 */
machine_power_off(void)106 void machine_power_off(void)
107 {
108 /* If there is a registered power off handler, call it. */
109 if (chassis_power_off)
110 chassis_power_off();
111
112 /* Put the soft power button back under hardware control.
113 * If the user had already pressed the power button, the
114 * following call will immediately power off. */
115 pdc_soft_power_button(0);
116
117 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
118
119 /* ipmi_poweroff may have been installed. */
120 if (pm_power_off)
121 pm_power_off();
122
123 /* It seems we have no way to power the system off via
124 * software. The user has to press the button himself. */
125
126 printk("Power off or press RETURN to reboot.\n");
127
128 /* prevent soft lockup/stalled CPU messages for endless loop. */
129 rcu_sysrq_start();
130 lockup_detector_soft_poweroff();
131 while (1) {
132 /* reboot if user presses RETURN key */
133 if (pdc_iodc_getc() == 13) {
134 printk("Rebooting...\n");
135 machine_restart(NULL);
136 }
137 }
138 }
139
140 void (*pm_power_off)(void);
141 EXPORT_SYMBOL(pm_power_off);
142
machine_halt(void)143 void machine_halt(void)
144 {
145 machine_power_off();
146 }
147
flush_thread(void)148 void flush_thread(void)
149 {
150 /* Only needs to handle fpu stuff or perf monitors.
151 ** REVISIT: several arches implement a "lazy fpu state".
152 */
153 }
154
release_thread(struct task_struct * dead_task)155 void release_thread(struct task_struct *dead_task)
156 {
157 }
158
159 /*
160 * Idle thread support
161 *
162 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
163 * QEMU idle the host too.
164 */
165
166 int running_on_qemu __ro_after_init;
167 EXPORT_SYMBOL(running_on_qemu);
168
arch_cpu_idle_dead(void)169 void __cpuidle arch_cpu_idle_dead(void)
170 {
171 /* nop on real hardware, qemu will offline CPU. */
172 asm volatile("or %%r31,%%r31,%%r31\n":::);
173 }
174
arch_cpu_idle(void)175 void __cpuidle arch_cpu_idle(void)
176 {
177 raw_local_irq_enable();
178
179 /* nop on real hardware, qemu will idle sleep. */
180 asm volatile("or %%r10,%%r10,%%r10\n":::);
181 }
182
parisc_idle_init(void)183 static int __init parisc_idle_init(void)
184 {
185 if (!running_on_qemu)
186 cpu_idle_poll_ctrl(1);
187
188 return 0;
189 }
190 arch_initcall(parisc_idle_init);
191
192 /*
193 * Copy architecture-specific thread state
194 */
195 int
copy_thread(unsigned long clone_flags,unsigned long usp,unsigned long kthread_arg,struct task_struct * p,unsigned long tls)196 copy_thread(unsigned long clone_flags, unsigned long usp,
197 unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
198 {
199 struct pt_regs *cregs = &(p->thread.regs);
200 void *stack = task_stack_page(p);
201
202 /* We have to use void * instead of a function pointer, because
203 * function pointers aren't a pointer to the function on 64-bit.
204 * Make them const so the compiler knows they live in .text */
205 extern void * const ret_from_kernel_thread;
206 extern void * const child_return;
207
208 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
209 /* kernel thread */
210 memset(cregs, 0, sizeof(struct pt_regs));
211 if (!usp) /* idle thread */
212 return 0;
213 /* Must exit via ret_from_kernel_thread in order
214 * to call schedule_tail()
215 */
216 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
217 cregs->kpc = (unsigned long) &ret_from_kernel_thread;
218 /*
219 * Copy function and argument to be called from
220 * ret_from_kernel_thread.
221 */
222 #ifdef CONFIG_64BIT
223 cregs->gr[27] = ((unsigned long *)usp)[3];
224 cregs->gr[26] = ((unsigned long *)usp)[2];
225 #else
226 cregs->gr[26] = usp;
227 #endif
228 cregs->gr[25] = kthread_arg;
229 } else {
230 /* user thread */
231 /* usp must be word aligned. This also prevents users from
232 * passing in the value 1 (which is the signal for a special
233 * return for a kernel thread) */
234 if (usp) {
235 usp = ALIGN(usp, 4);
236 if (likely(usp))
237 cregs->gr[30] = usp;
238 }
239 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
240 cregs->kpc = (unsigned long) &child_return;
241
242 /* Setup thread TLS area */
243 if (clone_flags & CLONE_SETTLS)
244 cregs->cr27 = tls;
245 }
246
247 return 0;
248 }
249
250 unsigned long
get_wchan(struct task_struct * p)251 get_wchan(struct task_struct *p)
252 {
253 struct unwind_frame_info info;
254 unsigned long ip;
255 int count = 0;
256
257 if (!p || p == current || p->state == TASK_RUNNING)
258 return 0;
259
260 /*
261 * These bracket the sleeping functions..
262 */
263
264 unwind_frame_init_from_blocked_task(&info, p);
265 do {
266 if (unwind_once(&info) < 0)
267 return 0;
268 ip = info.ip;
269 if (!in_sched_functions(ip))
270 return ip;
271 } while (count++ < MAX_UNWIND_ENTRIES);
272 return 0;
273 }
274
275 #ifdef CONFIG_64BIT
dereference_function_descriptor(void * ptr)276 void *dereference_function_descriptor(void *ptr)
277 {
278 Elf64_Fdesc *desc = ptr;
279 void *p;
280
281 if (!get_kernel_nofault(p, (void *)&desc->addr))
282 ptr = p;
283 return ptr;
284 }
285
dereference_kernel_function_descriptor(void * ptr)286 void *dereference_kernel_function_descriptor(void *ptr)
287 {
288 if (ptr < (void *)__start_opd ||
289 ptr >= (void *)__end_opd)
290 return ptr;
291
292 return dereference_function_descriptor(ptr);
293 }
294 #endif
295
brk_rnd(void)296 static inline unsigned long brk_rnd(void)
297 {
298 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
299 }
300
arch_randomize_brk(struct mm_struct * mm)301 unsigned long arch_randomize_brk(struct mm_struct *mm)
302 {
303 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
304
305 if (ret < mm->brk)
306 return mm->brk;
307 return ret;
308 }
309