• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
4  *
5  * This file contains the architecture-dependent parts of process handling.
6  *
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/sched.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/unistd.h>
18 #include <linux/ptrace.h>
19 #include <linux/user.h>
20 #include <linux/reboot.h>
21 #include <linux/elfcore.h>
22 #include <linux/fs.h>
23 #include <linux/tick.h>
24 #include <linux/slab.h>
25 #include <linux/mman.h>
26 #include <linux/pm.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
29 #include <linux/smp.h>
30 #include <asm/core_reg.h>
31 #include <asm/user_gateway.h>
32 #include <asm/tcm.h>
33 #include <asm/traps.h>
34 #include <asm/switch_to.h>
35 
36 /*
37  * Wait for the next interrupt and enable local interrupts
38  */
arch_cpu_idle(void)39 void arch_cpu_idle(void)
40 {
41 	int tmp;
42 
43 	/*
44 	 * Quickly jump straight into the interrupt entry point without actually
45 	 * triggering an interrupt. When TXSTATI gets read the processor will
46 	 * block until an interrupt is triggered.
47 	 */
48 	asm volatile (/* Switch into ISTAT mode */
49 		      "RTH\n\t"
50 		      /* Enable local interrupts */
51 		      "MOV	TXMASKI, %1\n\t"
52 		      /*
53 		       * We can't directly "SWAP PC, PCX", so we swap via a
54 		       * temporary. Essentially we do:
55 		       *  PCX_new = 1f (the place to continue execution)
56 		       *  PC = PCX_old
57 		       */
58 		      "ADD	%0, CPC0, #(1f-.)\n\t"
59 		      "SWAP	PCX, %0\n\t"
60 		      "MOV	PC, %0\n"
61 		      /* Continue execution here with interrupts enabled */
62 		      "1:"
63 		      : "=a" (tmp)
64 		      : "r" (get_trigger_mask()));
65 }
66 
67 #ifdef CONFIG_HOTPLUG_CPU
arch_cpu_idle_dead(void)68 void arch_cpu_idle_dead(void)
69 {
70 	cpu_die();
71 }
72 #endif
73 
74 void (*pm_power_off)(void);
75 EXPORT_SYMBOL(pm_power_off);
76 
77 void (*soc_restart)(char *cmd);
78 void (*soc_halt)(void);
79 
machine_restart(char * cmd)80 void machine_restart(char *cmd)
81 {
82 	if (soc_restart)
83 		soc_restart(cmd);
84 	hard_processor_halt(HALT_OK);
85 }
86 
machine_halt(void)87 void machine_halt(void)
88 {
89 	if (soc_halt)
90 		soc_halt();
91 	smp_send_stop();
92 	hard_processor_halt(HALT_OK);
93 }
94 
machine_power_off(void)95 void machine_power_off(void)
96 {
97 	if (pm_power_off)
98 		pm_power_off();
99 	smp_send_stop();
100 	hard_processor_halt(HALT_OK);
101 }
102 
103 #define FLAG_Z 0x8
104 #define FLAG_N 0x4
105 #define FLAG_O 0x2
106 #define FLAG_C 0x1
107 
show_regs(struct pt_regs * regs)108 void show_regs(struct pt_regs *regs)
109 {
110 	int i;
111 	const char *AX0_names[] = {"A0StP", "A0FrP"};
112 	const char *AX1_names[] = {"A1GbP", "A1LbP"};
113 
114 	const char *DX0_names[] = {
115 		"D0Re0",
116 		"D0Ar6",
117 		"D0Ar4",
118 		"D0Ar2",
119 		"D0FrT",
120 		"D0.5 ",
121 		"D0.6 ",
122 		"D0.7 "
123 	};
124 
125 	const char *DX1_names[] = {
126 		"D1Re0",
127 		"D1Ar5",
128 		"D1Ar3",
129 		"D1Ar1",
130 		"D1RtP",
131 		"D1.5 ",
132 		"D1.6 ",
133 		"D1.7 "
134 	};
135 
136 	show_regs_print_info(KERN_INFO);
137 
138 	pr_info(" pt_regs @ %p\n", regs);
139 	pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
140 	pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
141 		regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
142 		regs->ctx.Flags & FLAG_N ? 'N' : 'n',
143 		regs->ctx.Flags & FLAG_O ? 'O' : 'o',
144 		regs->ctx.Flags & FLAG_C ? 'C' : 'c');
145 	pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
146 	pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
147 
148 	/* AX regs */
149 	for (i = 0; i < 2; i++) {
150 		pr_info(" %s = 0x%08x    ",
151 			AX0_names[i],
152 			regs->ctx.AX[i].U0);
153 		printk(" %s = 0x%08x\n",
154 			AX1_names[i],
155 			regs->ctx.AX[i].U1);
156 	}
157 
158 	if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
159 		pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
160 
161 	/* Special place with AXx.2 */
162 	pr_info(" A0.2  = 0x%08x    ",
163 		regs->ctx.Ext.AX2.U0);
164 	printk(" A1.2  = 0x%08x\n",
165 		regs->ctx.Ext.AX2.U1);
166 
167 	/* 'extended' AX regs (nominally, just AXx.3) */
168 	for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
169 		pr_info(" A0.%d  = 0x%08x    ", i + 3, regs->ctx.AX3[i].U0);
170 		printk(" A1.%d  = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
171 	}
172 
173 	for (i = 0; i < 8; i++) {
174 		pr_info(" %s = 0x%08x    ", DX0_names[i], regs->ctx.DX[i].U0);
175 		printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
176 	}
177 
178 	show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
179 }
180 
181 /*
182  * Copy architecture-specific thread state
183  */
copy_thread(unsigned long clone_flags,unsigned long usp,unsigned long kthread_arg,struct task_struct * tsk)184 int copy_thread(unsigned long clone_flags, unsigned long usp,
185 		unsigned long kthread_arg, struct task_struct *tsk)
186 {
187 	struct pt_regs *childregs = task_pt_regs(tsk);
188 	void *kernel_context = ((void *) childregs +
189 				sizeof(struct pt_regs));
190 	unsigned long global_base;
191 
192 	BUG_ON(((unsigned long)childregs) & 0x7);
193 	BUG_ON(((unsigned long)kernel_context) & 0x7);
194 
195 	memset(&tsk->thread.kernel_context, 0,
196 			sizeof(tsk->thread.kernel_context));
197 
198 	tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
199 						     ret_from_fork,
200 						     0, 0);
201 
202 	if (unlikely(tsk->flags & PF_KTHREAD)) {
203 		/*
204 		 * Make sure we don't leak any kernel data to child's regs
205 		 * if kernel thread becomes a userspace thread in the future
206 		 */
207 		memset(childregs, 0 , sizeof(struct pt_regs));
208 
209 		global_base = __core_reg_get(A1GbP);
210 		childregs->ctx.AX[0].U1 = (unsigned long) global_base;
211 		childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
212 		/* Set D1Ar1=kthread_arg and D1RtP=usp (fn) */
213 		childregs->ctx.DX[4].U1 = usp;
214 		childregs->ctx.DX[3].U1 = kthread_arg;
215 		tsk->thread.int_depth = 2;
216 		return 0;
217 	}
218 
219 	/*
220 	 * Get a pointer to where the new child's register block should have
221 	 * been pushed.
222 	 * The Meta's stack grows upwards, and the context is the the first
223 	 * thing to be pushed by TBX (phew)
224 	 */
225 	*childregs = *current_pt_regs();
226 	/* Set the correct stack for the clone mode */
227 	if (usp)
228 		childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
229 	tsk->thread.int_depth = 1;
230 
231 	/* set return value for child process */
232 	childregs->ctx.DX[0].U0 = 0;
233 
234 	/* The TLS pointer is passed as an argument to sys_clone. */
235 	if (clone_flags & CLONE_SETTLS)
236 		tsk->thread.tls_ptr =
237 				(__force void __user *)childregs->ctx.DX[1].U1;
238 
239 #ifdef CONFIG_METAG_FPU
240 	if (tsk->thread.fpu_context) {
241 		struct meta_fpu_context *ctx;
242 
243 		ctx = kmemdup(tsk->thread.fpu_context,
244 			      sizeof(struct meta_fpu_context), GFP_ATOMIC);
245 		tsk->thread.fpu_context = ctx;
246 	}
247 #endif
248 
249 #ifdef CONFIG_METAG_DSP
250 	if (tsk->thread.dsp_context) {
251 		struct meta_ext_context *ctx;
252 		int i;
253 
254 		ctx = kmemdup(tsk->thread.dsp_context,
255 			      sizeof(struct meta_ext_context), GFP_ATOMIC);
256 		for (i = 0; i < 2; i++)
257 			ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
258 					      GFP_ATOMIC);
259 		tsk->thread.dsp_context = ctx;
260 	}
261 #endif
262 
263 	return 0;
264 }
265 
266 #ifdef CONFIG_METAG_FPU
alloc_fpu_context(struct thread_struct * thread)267 static void alloc_fpu_context(struct thread_struct *thread)
268 {
269 	thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
270 				      GFP_ATOMIC);
271 }
272 
clear_fpu(struct thread_struct * thread)273 static void clear_fpu(struct thread_struct *thread)
274 {
275 	thread->user_flags &= ~TBICTX_FPAC_BIT;
276 	kfree(thread->fpu_context);
277 	thread->fpu_context = NULL;
278 }
279 #else
clear_fpu(struct thread_struct * thread)280 static void clear_fpu(struct thread_struct *thread)
281 {
282 }
283 #endif
284 
285 #ifdef CONFIG_METAG_DSP
clear_dsp(struct thread_struct * thread)286 static void clear_dsp(struct thread_struct *thread)
287 {
288 	if (thread->dsp_context) {
289 		kfree(thread->dsp_context->ram[0]);
290 		kfree(thread->dsp_context->ram[1]);
291 
292 		kfree(thread->dsp_context);
293 
294 		thread->dsp_context = NULL;
295 	}
296 
297 	__core_reg_set(D0.8, 0);
298 }
299 #else
clear_dsp(struct thread_struct * thread)300 static void clear_dsp(struct thread_struct *thread)
301 {
302 }
303 #endif
304 
__switch_to(struct task_struct * prev,struct task_struct * next)305 struct task_struct *__sched __switch_to(struct task_struct *prev,
306 					struct task_struct *next)
307 {
308 	TBIRES to, from;
309 
310 	to.Switch.pCtx = next->thread.kernel_context;
311 	to.Switch.pPara = prev;
312 
313 #ifdef CONFIG_METAG_FPU
314 	if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
315 		struct pt_regs *regs = task_pt_regs(prev);
316 		TBIRES state;
317 
318 		state.Sig.SaveMask = prev->thread.user_flags;
319 		state.Sig.pCtx = &regs->ctx;
320 
321 		if (!prev->thread.fpu_context)
322 			alloc_fpu_context(&prev->thread);
323 		if (prev->thread.fpu_context)
324 			__TBICtxFPUSave(state, prev->thread.fpu_context);
325 	}
326 	/*
327 	 * Force a restore of the FPU context next time this process is
328 	 * scheduled.
329 	 */
330 	if (prev->thread.fpu_context)
331 		prev->thread.fpu_context->needs_restore = true;
332 #endif
333 
334 
335 	from = __TBISwitch(to, &prev->thread.kernel_context);
336 
337 	/* Restore TLS pointer for this process. */
338 	set_gateway_tls(current->thread.tls_ptr);
339 
340 	return (struct task_struct *) from.Switch.pPara;
341 }
342 
flush_thread(void)343 void flush_thread(void)
344 {
345 	clear_fpu(&current->thread);
346 	clear_dsp(&current->thread);
347 }
348 
349 /*
350  * Free current thread data structures etc.
351  */
exit_thread(struct task_struct * tsk)352 void exit_thread(struct task_struct *tsk)
353 {
354 	clear_fpu(&tsk->thread);
355 	clear_dsp(&tsk->thread);
356 }
357 
358 /* TODO: figure out how to unwind the kernel stack here to figure out
359  * where we went to sleep. */
get_wchan(struct task_struct * p)360 unsigned long get_wchan(struct task_struct *p)
361 {
362 	return 0;
363 }
364 
dump_fpu(struct pt_regs * regs,elf_fpregset_t * fpu)365 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
366 {
367 	/* Returning 0 indicates that the FPU state was not stored (as it was
368 	 * not in use) */
369 	return 0;
370 }
371 
372 #ifdef CONFIG_METAG_USER_TCM
373 
374 #define ELF_MIN_ALIGN	PAGE_SIZE
375 
376 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
377 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
378 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
379 
380 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
381 
__metag_elf_map(struct file * filep,unsigned long addr,struct elf_phdr * eppnt,int prot,int type,unsigned long total_size)382 unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
383 			      struct elf_phdr *eppnt, int prot, int type,
384 			      unsigned long total_size)
385 {
386 	unsigned long map_addr, size;
387 	unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
388 	unsigned long raw_size = eppnt->p_filesz + page_off;
389 	unsigned long off = eppnt->p_offset - page_off;
390 	unsigned int tcm_tag;
391 	addr = ELF_PAGESTART(addr);
392 	size = ELF_PAGEALIGN(raw_size);
393 
394 	/* mmap() will return -EINVAL if given a zero size, but a
395 	 * segment with zero filesize is perfectly valid */
396 	if (!size)
397 		return addr;
398 
399 	tcm_tag = tcm_lookup_tag(addr);
400 
401 	if (tcm_tag != TCM_INVALID_TAG)
402 		type &= ~MAP_FIXED;
403 
404 	/*
405 	* total_size is the size of the ELF (interpreter) image.
406 	* The _first_ mmap needs to know the full size, otherwise
407 	* randomization might put this image into an overlapping
408 	* position with the ELF binary image. (since size < total_size)
409 	* So we first map the 'big' image - and unmap the remainder at
410 	* the end. (which unmap is needed for ELF images with holes.)
411 	*/
412 	if (total_size) {
413 		total_size = ELF_PAGEALIGN(total_size);
414 		map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
415 		if (!BAD_ADDR(map_addr))
416 			vm_munmap(map_addr+size, total_size-size);
417 	} else
418 		map_addr = vm_mmap(filep, addr, size, prot, type, off);
419 
420 	if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
421 		struct tcm_allocation *tcm;
422 		unsigned long tcm_addr;
423 
424 		tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
425 		if (!tcm)
426 			return -ENOMEM;
427 
428 		tcm_addr = tcm_alloc(tcm_tag, raw_size);
429 		if (tcm_addr != addr) {
430 			kfree(tcm);
431 			return -ENOMEM;
432 		}
433 
434 		tcm->tag = tcm_tag;
435 		tcm->addr = tcm_addr;
436 		tcm->size = raw_size;
437 
438 		list_add(&tcm->list, &current->mm->context.tcm);
439 
440 		eppnt->p_vaddr = map_addr;
441 		if (copy_from_user((void *) addr, (void __user *) map_addr,
442 				   raw_size))
443 			return -EFAULT;
444 	}
445 
446 	return map_addr;
447 }
448 #endif
449