1 /*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7 #include <linux/stddef.h>
8 #include <linux/err.h>
9 #include <linux/hardirq.h>
10 #include <linux/gfp.h>
11 #include <linux/mm.h>
12 #include <linux/personality.h>
13 #include <linux/proc_fs.h>
14 #include <linux/ptrace.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17 #include <linux/tick.h>
18 #include <linux/threads.h>
19 #include <asm/current.h>
20 #include <asm/pgtable.h>
21 #include <asm/uaccess.h>
22 #include "as-layout.h"
23 #include "kern_util.h"
24 #include "os.h"
25 #include "skas.h"
26 #include "tlb.h"
27
28 /*
29 * This is a per-cpu array. A processor only modifies its entry and it only
30 * cares about its entry, so it's OK if another processor is modifying its
31 * entry.
32 */
33 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
34
external_pid(void)35 static inline int external_pid(void)
36 {
37 /* FIXME: Need to look up userspace_pid by cpu */
38 return userspace_pid[0];
39 }
40
pid_to_processor_id(int pid)41 int pid_to_processor_id(int pid)
42 {
43 int i;
44
45 for (i = 0; i < ncpus; i++) {
46 if (cpu_tasks[i].pid == pid)
47 return i;
48 }
49 return -1;
50 }
51
free_stack(unsigned long stack,int order)52 void free_stack(unsigned long stack, int order)
53 {
54 free_pages(stack, order);
55 }
56
alloc_stack(int order,int atomic)57 unsigned long alloc_stack(int order, int atomic)
58 {
59 unsigned long page;
60 gfp_t flags = GFP_KERNEL;
61
62 if (atomic)
63 flags = GFP_ATOMIC;
64 page = __get_free_pages(flags, order);
65
66 return page;
67 }
68
kernel_thread(int (* fn)(void *),void * arg,unsigned long flags)69 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
70 {
71 int pid;
72
73 current->thread.request.u.thread.proc = fn;
74 current->thread.request.u.thread.arg = arg;
75 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
76 ¤t->thread.regs, 0, NULL, NULL);
77 return pid;
78 }
79
set_current(struct task_struct * task)80 static inline void set_current(struct task_struct *task)
81 {
82 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
83 { external_pid(), task });
84 }
85
86 extern void arch_switch_to(struct task_struct *to);
87
_switch_to(void * prev,void * next,void * last)88 void *_switch_to(void *prev, void *next, void *last)
89 {
90 struct task_struct *from = prev;
91 struct task_struct *to = next;
92
93 to->thread.prev_sched = from;
94 set_current(to);
95
96 do {
97 current->thread.saved_task = NULL;
98
99 switch_threads(&from->thread.switch_buf,
100 &to->thread.switch_buf);
101
102 arch_switch_to(current);
103
104 if (current->thread.saved_task)
105 show_regs(&(current->thread.regs));
106 to = current->thread.saved_task;
107 from = current;
108 } while (current->thread.saved_task);
109
110 return current->thread.prev_sched;
111
112 }
113
interrupt_end(void)114 void interrupt_end(void)
115 {
116 if (need_resched())
117 schedule();
118 if (test_tsk_thread_flag(current, TIF_SIGPENDING))
119 do_signal();
120 }
121
exit_thread(void)122 void exit_thread(void)
123 {
124 }
125
get_current(void)126 void *get_current(void)
127 {
128 return current;
129 }
130
131 /*
132 * This is called magically, by its address being stuffed in a jmp_buf
133 * and being longjmp-d to.
134 */
new_thread_handler(void)135 void new_thread_handler(void)
136 {
137 int (*fn)(void *), n;
138 void *arg;
139
140 if (current->thread.prev_sched != NULL)
141 schedule_tail(current->thread.prev_sched);
142 current->thread.prev_sched = NULL;
143
144 fn = current->thread.request.u.thread.proc;
145 arg = current->thread.request.u.thread.arg;
146
147 /*
148 * The return value is 1 if the kernel thread execs a process,
149 * 0 if it just exits
150 */
151 n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
152 if (n == 1) {
153 /* Handle any immediate reschedules or signals */
154 interrupt_end();
155 userspace(¤t->thread.regs.regs);
156 }
157 else do_exit(0);
158 }
159
160 /* Called magically, see new_thread_handler above */
fork_handler(void)161 void fork_handler(void)
162 {
163 force_flush_all();
164
165 schedule_tail(current->thread.prev_sched);
166
167 /*
168 * XXX: if interrupt_end() calls schedule, this call to
169 * arch_switch_to isn't needed. We could want to apply this to
170 * improve performance. -bb
171 */
172 arch_switch_to(current);
173
174 current->thread.prev_sched = NULL;
175
176 /* Handle any immediate reschedules or signals */
177 interrupt_end();
178
179 userspace(¤t->thread.regs.regs);
180 }
181
copy_thread(int nr,unsigned long clone_flags,unsigned long sp,unsigned long stack_top,struct task_struct * p,struct pt_regs * regs)182 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
183 unsigned long stack_top, struct task_struct * p,
184 struct pt_regs *regs)
185 {
186 void (*handler)(void);
187 int ret = 0;
188
189 p->thread = (struct thread_struct) INIT_THREAD;
190
191 if (current->thread.forking) {
192 memcpy(&p->thread.regs.regs, ®s->regs,
193 sizeof(p->thread.regs.regs));
194 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
195 if (sp != 0)
196 REGS_SP(p->thread.regs.regs.gp) = sp;
197
198 handler = fork_handler;
199
200 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
201 }
202 else {
203 get_safe_registers(p->thread.regs.regs.gp);
204 p->thread.request.u.thread = current->thread.request.u.thread;
205 handler = new_thread_handler;
206 }
207
208 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
209
210 if (current->thread.forking) {
211 clear_flushed_tls(p);
212
213 /*
214 * Set a new TLS for the child thread?
215 */
216 if (clone_flags & CLONE_SETTLS)
217 ret = arch_copy_tls(p);
218 }
219
220 return ret;
221 }
222
initial_thread_cb(void (* proc)(void *),void * arg)223 void initial_thread_cb(void (*proc)(void *), void *arg)
224 {
225 int save_kmalloc_ok = kmalloc_ok;
226
227 kmalloc_ok = 0;
228 initial_thread_cb_skas(proc, arg);
229 kmalloc_ok = save_kmalloc_ok;
230 }
231
default_idle(void)232 void default_idle(void)
233 {
234 unsigned long long nsecs;
235
236 while (1) {
237 /* endless idle loop with no priority at all */
238
239 /*
240 * although we are an idle CPU, we do not want to
241 * get into the scheduler unnecessarily.
242 */
243 if (need_resched())
244 schedule();
245
246 tick_nohz_stop_sched_tick(1);
247 nsecs = disable_timer();
248 idle_sleep(nsecs);
249 tick_nohz_restart_sched_tick();
250 }
251 }
252
cpu_idle(void)253 void cpu_idle(void)
254 {
255 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
256 default_idle();
257 }
258
__cant_sleep(void)259 int __cant_sleep(void) {
260 return in_atomic() || irqs_disabled() || in_interrupt();
261 /* Is in_interrupt() really needed? */
262 }
263
user_context(unsigned long sp)264 int user_context(unsigned long sp)
265 {
266 unsigned long stack;
267
268 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
269 return stack != (unsigned long) current_thread_info();
270 }
271
272 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
273
do_uml_exitcalls(void)274 void do_uml_exitcalls(void)
275 {
276 exitcall_t *call;
277
278 call = &__uml_exitcall_end;
279 while (--call >= &__uml_exitcall_begin)
280 (*call)();
281 }
282
uml_strdup(const char * string)283 char *uml_strdup(const char *string)
284 {
285 return kstrdup(string, GFP_KERNEL);
286 }
287
copy_to_user_proc(void __user * to,void * from,int size)288 int copy_to_user_proc(void __user *to, void *from, int size)
289 {
290 return copy_to_user(to, from, size);
291 }
292
copy_from_user_proc(void * to,void __user * from,int size)293 int copy_from_user_proc(void *to, void __user *from, int size)
294 {
295 return copy_from_user(to, from, size);
296 }
297
clear_user_proc(void __user * buf,int size)298 int clear_user_proc(void __user *buf, int size)
299 {
300 return clear_user(buf, size);
301 }
302
strlen_user_proc(char __user * str)303 int strlen_user_proc(char __user *str)
304 {
305 return strlen_user(str);
306 }
307
smp_sigio_handler(void)308 int smp_sigio_handler(void)
309 {
310 #ifdef CONFIG_SMP
311 int cpu = current_thread_info()->cpu;
312 IPI_handler(cpu);
313 if (cpu != 0)
314 return 1;
315 #endif
316 return 0;
317 }
318
cpu(void)319 int cpu(void)
320 {
321 return current_thread_info()->cpu;
322 }
323
324 static atomic_t using_sysemu = ATOMIC_INIT(0);
325 int sysemu_supported;
326
set_using_sysemu(int value)327 void set_using_sysemu(int value)
328 {
329 if (value > sysemu_supported)
330 return;
331 atomic_set(&using_sysemu, value);
332 }
333
get_using_sysemu(void)334 int get_using_sysemu(void)
335 {
336 return atomic_read(&using_sysemu);
337 }
338
proc_read_sysemu(char * buf,char ** start,off_t offset,int size,int * eof,void * data)339 static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
340 {
341 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
342 /* No overflow */
343 *eof = 1;
344
345 return strlen(buf);
346 }
347
proc_write_sysemu(struct file * file,const char __user * buf,unsigned long count,void * data)348 static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
349 {
350 char tmp[2];
351
352 if (copy_from_user(tmp, buf, 1))
353 return -EFAULT;
354
355 if (tmp[0] >= '0' && tmp[0] <= '2')
356 set_using_sysemu(tmp[0] - '0');
357 /* We use the first char, but pretend to write everything */
358 return count;
359 }
360
make_proc_sysemu(void)361 int __init make_proc_sysemu(void)
362 {
363 struct proc_dir_entry *ent;
364 if (!sysemu_supported)
365 return 0;
366
367 ent = create_proc_entry("sysemu", 0600, NULL);
368
369 if (ent == NULL)
370 {
371 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
372 return 0;
373 }
374
375 ent->read_proc = proc_read_sysemu;
376 ent->write_proc = proc_write_sysemu;
377
378 return 0;
379 }
380
381 late_initcall(make_proc_sysemu);
382
singlestepping(void * t)383 int singlestepping(void * t)
384 {
385 struct task_struct *task = t ? t : current;
386
387 if (!(task->ptrace & PT_DTRACE))
388 return 0;
389
390 if (task->thread.singlestep_syscall)
391 return 1;
392
393 return 2;
394 }
395
396 /*
397 * Only x86 and x86_64 have an arch_align_stack().
398 * All other arches have "#define arch_align_stack(x) (x)"
399 * in their asm/system.h
400 * As this is included in UML from asm-um/system-generic.h,
401 * we can use it to behave as the subarch does.
402 */
403 #ifndef arch_align_stack
arch_align_stack(unsigned long sp)404 unsigned long arch_align_stack(unsigned long sp)
405 {
406 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
407 sp -= get_random_int() % 8192;
408 return sp & ~0xf;
409 }
410 #endif
411
get_wchan(struct task_struct * p)412 unsigned long get_wchan(struct task_struct *p)
413 {
414 unsigned long stack_page, sp, ip;
415 bool seen_sched = 0;
416
417 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
418 return 0;
419
420 stack_page = (unsigned long) task_stack_page(p);
421 /* Bail if the process has no kernel stack for some reason */
422 if (stack_page == 0)
423 return 0;
424
425 sp = p->thread.switch_buf->JB_SP;
426 /*
427 * Bail if the stack pointer is below the bottom of the kernel
428 * stack for some reason
429 */
430 if (sp < stack_page)
431 return 0;
432
433 while (sp < stack_page + THREAD_SIZE) {
434 ip = *((unsigned long *) sp);
435 if (in_sched_functions(ip))
436 /* Ignore everything until we're above the scheduler */
437 seen_sched = 1;
438 else if (kernel_text_address(ip) && seen_sched)
439 return ip;
440
441 sp += sizeof(unsigned long);
442 }
443
444 return 0;
445 }
446
elf_core_copy_fpregs(struct task_struct * t,elf_fpregset_t * fpu)447 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
448 {
449 int cpu = current_thread_info()->cpu;
450
451 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
452 }
453
454