• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <sched.h>
9 #include <errno.h>
10 #include <string.h>
11 #include <sys/mman.h>
12 #include <sys/wait.h>
13 #include <asm/unistd.h>
14 #include "as-layout.h"
15 #include "init.h"
16 #include "kern_util.h"
17 #include "mem.h"
18 #include "os.h"
19 #include "proc_mm.h"
20 #include "ptrace_user.h"
21 #include "registers.h"
22 #include "skas.h"
23 #include "skas_ptrace.h"
24 #include "sysdep/stub.h"
25 
is_skas_winch(int pid,int fd,void * data)26 int is_skas_winch(int pid, int fd, void *data)
27 {
28 	return pid == getpgrp();
29 }
30 
ptrace_dump_regs(int pid)31 static int ptrace_dump_regs(int pid)
32 {
33 	unsigned long regs[MAX_REG_NR];
34 	int i;
35 
36 	if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
37 		return -errno;
38 
39 	printk(UM_KERN_ERR "Stub registers -\n");
40 	for (i = 0; i < ARRAY_SIZE(regs); i++)
41 		printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
42 
43 	return 0;
44 }
45 
46 /*
47  * Signals that are OK to receive in the stub - we'll just continue it.
48  * SIGWINCH will happen when UML is inside a detached screen.
49  */
50 #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
51 
52 /* Signals that the stub will finish with - anything else is an error */
53 #define STUB_DONE_MASK (1 << SIGTRAP)
54 
wait_stub_done(int pid)55 void wait_stub_done(int pid)
56 {
57 	int n, status, err;
58 
59 	while (1) {
60 		CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
61 		if ((n < 0) || !WIFSTOPPED(status))
62 			goto bad_wait;
63 
64 		if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
65 			break;
66 
67 		err = ptrace(PTRACE_CONT, pid, 0, 0);
68 		if (err) {
69 			printk(UM_KERN_ERR "wait_stub_done : continue failed, "
70 			       "errno = %d\n", errno);
71 			fatal_sigsegv();
72 		}
73 	}
74 
75 	if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 		return;
77 
78 bad_wait:
79 	err = ptrace_dump_regs(pid);
80 	if (err)
81 		printk(UM_KERN_ERR "Failed to get registers from stub, "
82 		       "errno = %d\n", -err);
83 	printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
84 	       "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
85 	       status);
86 	fatal_sigsegv();
87 }
88 
89 extern unsigned long current_stub_stack(void);
90 
get_skas_faultinfo(int pid,struct faultinfo * fi)91 static void get_skas_faultinfo(int pid, struct faultinfo *fi)
92 {
93 	int err;
94 
95 	if (ptrace_faultinfo) {
96 		err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
97 		if (err) {
98 			printk(UM_KERN_ERR "get_skas_faultinfo - "
99 			       "PTRACE_FAULTINFO failed, errno = %d\n", errno);
100 			fatal_sigsegv();
101 		}
102 
103 		/* Special handling for i386, which has different structs */
104 		if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
105 			memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
106 			       sizeof(struct faultinfo) -
107 			       sizeof(struct ptrace_faultinfo));
108 	}
109 	else {
110 		unsigned long fpregs[FP_SIZE];
111 
112 		err = get_fp_registers(pid, fpregs);
113 		if (err < 0) {
114 			printk(UM_KERN_ERR "save_fp_registers returned %d\n",
115 			       err);
116 			fatal_sigsegv();
117 		}
118 		err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
119 		if (err) {
120 			printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
121 			       "errno = %d\n", pid, errno);
122 			fatal_sigsegv();
123 		}
124 		wait_stub_done(pid);
125 
126 		/*
127 		 * faultinfo is prepared by the stub-segv-handler at start of
128 		 * the stub stack page. We just have to copy it.
129 		 */
130 		memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
131 
132 		err = put_fp_registers(pid, fpregs);
133 		if (err < 0) {
134 			printk(UM_KERN_ERR "put_fp_registers returned %d\n",
135 			       err);
136 			fatal_sigsegv();
137 		}
138 	}
139 }
140 
handle_segv(int pid,struct uml_pt_regs * regs)141 static void handle_segv(int pid, struct uml_pt_regs * regs)
142 {
143 	get_skas_faultinfo(pid, &regs->faultinfo);
144 	segv(regs->faultinfo, 0, 1, NULL);
145 }
146 
147 /*
148  * To use the same value of using_sysemu as the caller, ask it that value
149  * (in local_using_sysemu
150  */
handle_trap(int pid,struct uml_pt_regs * regs,int local_using_sysemu)151 static void handle_trap(int pid, struct uml_pt_regs *regs,
152 			int local_using_sysemu)
153 {
154 	int err, status;
155 
156 	if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
157 		fatal_sigsegv();
158 
159 	/* Mark this as a syscall */
160 	UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
161 
162 	if (!local_using_sysemu)
163 	{
164 		err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
165 			     __NR_getpid);
166 		if (err < 0) {
167 			printk(UM_KERN_ERR "handle_trap - nullifying syscall "
168 			       "failed, errno = %d\n", errno);
169 			fatal_sigsegv();
170 		}
171 
172 		err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
173 		if (err < 0) {
174 			printk(UM_KERN_ERR "handle_trap - continuing to end of "
175 			       "syscall failed, errno = %d\n", errno);
176 			fatal_sigsegv();
177 		}
178 
179 		CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
180 		if ((err < 0) || !WIFSTOPPED(status) ||
181 		    (WSTOPSIG(status) != SIGTRAP + 0x80)) {
182 			err = ptrace_dump_regs(pid);
183 			if (err)
184 				printk(UM_KERN_ERR "Failed to get registers "
185 				       "from process, errno = %d\n", -err);
186 			printk(UM_KERN_ERR "handle_trap - failed to wait at "
187 			       "end of syscall, errno = %d, status = %d\n",
188 			       errno, status);
189 			fatal_sigsegv();
190 		}
191 	}
192 
193 	handle_syscall(regs);
194 }
195 
196 extern int __syscall_stub_start;
197 
userspace_tramp(void * stack)198 static int userspace_tramp(void *stack)
199 {
200 	void *addr;
201 	int err;
202 
203 	ptrace(PTRACE_TRACEME, 0, 0, 0);
204 
205 	signal(SIGTERM, SIG_DFL);
206 	signal(SIGWINCH, SIG_IGN);
207 	err = set_interval();
208 	if (err) {
209 		printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
210 		       "errno = %d\n", err);
211 		exit(1);
212 	}
213 
214 	if (!proc_mm) {
215 		/*
216 		 * This has a pte, but it can't be mapped in with the usual
217 		 * tlb_flush mechanism because this is part of that mechanism
218 		 */
219 		int fd;
220 		unsigned long long offset;
221 		fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
222 		addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
223 			      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
224 		if (addr == MAP_FAILED) {
225 			printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
226 			       "errno = %d\n", STUB_CODE, errno);
227 			exit(1);
228 		}
229 
230 		if (stack != NULL) {
231 			fd = phys_mapping(to_phys(stack), &offset);
232 			addr = mmap((void *) STUB_DATA,
233 				    UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
234 				    MAP_FIXED | MAP_SHARED, fd, offset);
235 			if (addr == MAP_FAILED) {
236 				printk(UM_KERN_ERR "mapping segfault stack "
237 				       "at 0x%lx failed, errno = %d\n",
238 				       STUB_DATA, errno);
239 				exit(1);
240 			}
241 		}
242 	}
243 	if (!ptrace_faultinfo && (stack != NULL)) {
244 		struct sigaction sa;
245 
246 		unsigned long v = STUB_CODE +
247 				  (unsigned long) stub_segv_handler -
248 				  (unsigned long) &__syscall_stub_start;
249 
250 		set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
251 		sigemptyset(&sa.sa_mask);
252 		sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
253 		sa.sa_sigaction = (void *) v;
254 		sa.sa_restorer = NULL;
255 		if (sigaction(SIGSEGV, &sa, NULL) < 0) {
256 			printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
257 			       "handler failed - errno = %d\n", errno);
258 			exit(1);
259 		}
260 	}
261 
262 	kill(os_getpid(), SIGSTOP);
263 	return 0;
264 }
265 
266 /* Each element set once, and only accessed by a single processor anyway */
267 #undef NR_CPUS
268 #define NR_CPUS 1
269 int userspace_pid[NR_CPUS];
270 
start_userspace(unsigned long stub_stack)271 int start_userspace(unsigned long stub_stack)
272 {
273 	void *stack;
274 	unsigned long sp;
275 	int pid, status, n, flags, err;
276 
277 	stack = mmap(NULL, UM_KERN_PAGE_SIZE,
278 		     PROT_READ | PROT_WRITE | PROT_EXEC,
279 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
280 	if (stack == MAP_FAILED) {
281 		err = -errno;
282 		printk(UM_KERN_ERR "start_userspace : mmap failed, "
283 		       "errno = %d\n", errno);
284 		return err;
285 	}
286 
287 	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
288 
289 	flags = CLONE_FILES;
290 	if (proc_mm)
291 		flags |= CLONE_VM;
292 	else
293 		flags |= SIGCHLD;
294 
295 	pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
296 	if (pid < 0) {
297 		err = -errno;
298 		printk(UM_KERN_ERR "start_userspace : clone failed, "
299 		       "errno = %d\n", errno);
300 		return err;
301 	}
302 
303 	do {
304 		CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
305 		if (n < 0) {
306 			err = -errno;
307 			printk(UM_KERN_ERR "start_userspace : wait failed, "
308 			       "errno = %d\n", errno);
309 			goto out_kill;
310 		}
311 	} while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
312 
313 	if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
314 		err = -EINVAL;
315 		printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
316 		       "status = %d\n", status);
317 		goto out_kill;
318 	}
319 
320 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
321 		   (void *) PTRACE_O_TRACESYSGOOD) < 0) {
322 		err = -errno;
323 		printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
324 		       "failed, errno = %d\n", errno);
325 		goto out_kill;
326 	}
327 
328 	if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
329 		err = -errno;
330 		printk(UM_KERN_ERR "start_userspace : munmap failed, "
331 		       "errno = %d\n", errno);
332 		goto out_kill;
333 	}
334 
335 	return pid;
336 
337  out_kill:
338 	os_kill_ptraced_process(pid, 1);
339 	return err;
340 }
341 
userspace(struct uml_pt_regs * regs)342 void userspace(struct uml_pt_regs *regs)
343 {
344 	struct itimerval timer;
345 	unsigned long long nsecs, now;
346 	int err, status, op, pid = userspace_pid[0];
347 	/* To prevent races if using_sysemu changes under us.*/
348 	int local_using_sysemu;
349 
350 	if (getitimer(ITIMER_VIRTUAL, &timer))
351 		printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno);
352 	nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
353 		timer.it_value.tv_usec * UM_NSEC_PER_USEC;
354 	nsecs += os_nsecs();
355 
356 	while (1) {
357 		/*
358 		 * This can legitimately fail if the process loads a
359 		 * bogus value into a segment register.  It will
360 		 * segfault and PTRACE_GETREGS will read that value
361 		 * out of the process.  However, PTRACE_SETREGS will
362 		 * fail.  In this case, there is nothing to do but
363 		 * just kill the process.
364 		 */
365 		if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
366 			fatal_sigsegv();
367 
368 		if (put_fp_registers(pid, regs->fp))
369 			fatal_sigsegv();
370 
371 		/* Now we set local_using_sysemu to be used for one loop */
372 		local_using_sysemu = get_using_sysemu();
373 
374 		op = SELECT_PTRACE_OPERATION(local_using_sysemu,
375 					     singlestepping(NULL));
376 
377 		if (ptrace(op, pid, 0, 0)) {
378 			printk(UM_KERN_ERR "userspace - ptrace continue "
379 			       "failed, op = %d, errno = %d\n", op, errno);
380 			fatal_sigsegv();
381 		}
382 
383 		CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
384 		if (err < 0) {
385 			printk(UM_KERN_ERR "userspace - wait failed, "
386 			       "errno = %d\n", errno);
387 			fatal_sigsegv();
388 		}
389 
390 		regs->is_user = 1;
391 		if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
392 			printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
393 			       "errno = %d\n", errno);
394 			fatal_sigsegv();
395 		}
396 
397 		if (get_fp_registers(pid, regs->fp)) {
398 			printk(UM_KERN_ERR "userspace -  get_fp_registers failed, "
399 			       "errno = %d\n", errno);
400 			fatal_sigsegv();
401 		}
402 
403 		UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
404 
405 		if (WIFSTOPPED(status)) {
406 			int sig = WSTOPSIG(status);
407 			switch (sig) {
408 			case SIGSEGV:
409 				if (PTRACE_FULL_FAULTINFO ||
410 				    !ptrace_faultinfo) {
411 					get_skas_faultinfo(pid,
412 							   &regs->faultinfo);
413 					(*sig_info[SIGSEGV])(SIGSEGV, regs);
414 				}
415 				else handle_segv(pid, regs);
416 				break;
417 			case SIGTRAP + 0x80:
418 			        handle_trap(pid, regs, local_using_sysemu);
419 				break;
420 			case SIGTRAP:
421 				relay_signal(SIGTRAP, regs);
422 				break;
423 			case SIGVTALRM:
424 				now = os_nsecs();
425 				if (now < nsecs)
426 					break;
427 				block_signals();
428 				(*sig_info[sig])(sig, regs);
429 				unblock_signals();
430 				nsecs = timer.it_value.tv_sec *
431 					UM_NSEC_PER_SEC +
432 					timer.it_value.tv_usec *
433 					UM_NSEC_PER_USEC;
434 				nsecs += os_nsecs();
435 				break;
436 			case SIGIO:
437 			case SIGILL:
438 			case SIGBUS:
439 			case SIGFPE:
440 			case SIGWINCH:
441 				block_signals();
442 				(*sig_info[sig])(sig, regs);
443 				unblock_signals();
444 				break;
445 			default:
446 				printk(UM_KERN_ERR "userspace - child stopped "
447 				       "with signal %d\n", sig);
448 				fatal_sigsegv();
449 			}
450 			pid = userspace_pid[0];
451 			interrupt_end();
452 
453 			/* Avoid -ERESTARTSYS handling in host */
454 			if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
455 				PT_SYSCALL_NR(regs->gp) = -1;
456 		}
457 	}
458 }
459 
460 static unsigned long thread_regs[MAX_REG_NR];
461 static unsigned long thread_fp_regs[FP_SIZE];
462 
init_thread_regs(void)463 static int __init init_thread_regs(void)
464 {
465 	get_safe_registers(thread_regs, thread_fp_regs);
466 	/* Set parent's instruction pointer to start of clone-stub */
467 	thread_regs[REGS_IP_INDEX] = STUB_CODE +
468 				(unsigned long) stub_clone_handler -
469 				(unsigned long) &__syscall_stub_start;
470 	thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
471 		sizeof(void *);
472 #ifdef __SIGNAL_FRAMESIZE
473 	thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
474 #endif
475 	return 0;
476 }
477 
478 __initcall(init_thread_regs);
479 
copy_context_skas0(unsigned long new_stack,int pid)480 int copy_context_skas0(unsigned long new_stack, int pid)
481 {
482 	struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
483 	int err;
484 	unsigned long current_stack = current_stub_stack();
485 	struct stub_data *data = (struct stub_data *) current_stack;
486 	struct stub_data *child_data = (struct stub_data *) new_stack;
487 	unsigned long long new_offset;
488 	int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
489 
490 	/*
491 	 * prepare offset and fd of child's stack as argument for parent's
492 	 * and child's mmap2 calls
493 	 */
494 	*data = ((struct stub_data) { .offset	= MMAP_OFFSET(new_offset),
495 				      .fd	= new_fd,
496 				      .timer    = ((struct itimerval)
497 					           { .it_value = tv,
498 						     .it_interval = tv }) });
499 
500 	err = ptrace_setregs(pid, thread_regs);
501 	if (err < 0) {
502 		err = -errno;
503 		printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
504 		       "failed, pid = %d, errno = %d\n", pid, -err);
505 		return err;
506 	}
507 
508 	err = put_fp_registers(pid, thread_fp_regs);
509 	if (err < 0) {
510 		printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
511 		       "failed, pid = %d, err = %d\n", pid, err);
512 		return err;
513 	}
514 
515 	/* set a well known return code for detection of child write failure */
516 	child_data->err = 12345678;
517 
518 	/*
519 	 * Wait, until parent has finished its work: read child's pid from
520 	 * parent's stack, and check, if bad result.
521 	 */
522 	err = ptrace(PTRACE_CONT, pid, 0, 0);
523 	if (err) {
524 		err = -errno;
525 		printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
526 		       "errno = %d\n", pid, errno);
527 		return err;
528 	}
529 
530 	wait_stub_done(pid);
531 
532 	pid = data->err;
533 	if (pid < 0) {
534 		printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
535 		       "error %d\n", -pid);
536 		return pid;
537 	}
538 
539 	/*
540 	 * Wait, until child has finished too: read child's result from
541 	 * child's stack and check it.
542 	 */
543 	wait_stub_done(pid);
544 	if (child_data->err != STUB_DATA) {
545 		printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
546 		       "error %ld\n", child_data->err);
547 		err = child_data->err;
548 		goto out_kill;
549 	}
550 
551 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
552 		   (void *)PTRACE_O_TRACESYSGOOD) < 0) {
553 		err = -errno;
554 		printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
555 		       "failed, errno = %d\n", errno);
556 		goto out_kill;
557 	}
558 
559 	return pid;
560 
561  out_kill:
562 	os_kill_ptraced_process(pid, 1);
563 	return err;
564 }
565 
566 /*
567  * This is used only, if stub pages are needed, while proc_mm is
568  * available. Opening /proc/mm creates a new mm_context, which lacks
569  * the stub-pages. Thus, we map them using /proc/mm-fd
570  */
map_stub_pages(int fd,unsigned long code,unsigned long data,unsigned long stack)571 int map_stub_pages(int fd, unsigned long code, unsigned long data,
572 		   unsigned long stack)
573 {
574 	struct proc_mm_op mmop;
575 	int n;
576 	unsigned long long code_offset;
577 	int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
578 				   &code_offset);
579 
580 	mmop = ((struct proc_mm_op) { .op        = MM_MMAP,
581 				      .u         =
582 				      { .mmap    =
583 					{ .addr    = code,
584 					  .len     = UM_KERN_PAGE_SIZE,
585 					  .prot    = PROT_EXEC,
586 					  .flags   = MAP_FIXED | MAP_PRIVATE,
587 					  .fd      = code_fd,
588 					  .offset  = code_offset
589 	} } });
590 	CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
591 	if (n != sizeof(mmop)) {
592 		n = errno;
593 		printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
594 		       "offset = %llx\n", code, code_fd,
595 		       (unsigned long long) code_offset);
596 		printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
597 		       "failed, err = %d\n", n);
598 		return -n;
599 	}
600 
601 	if (stack) {
602 		unsigned long long map_offset;
603 		int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
604 		mmop = ((struct proc_mm_op)
605 				{ .op        = MM_MMAP,
606 				  .u         =
607 				  { .mmap    =
608 				    { .addr    = data,
609 				      .len     = UM_KERN_PAGE_SIZE,
610 				      .prot    = PROT_READ | PROT_WRITE,
611 				      .flags   = MAP_FIXED | MAP_SHARED,
612 				      .fd      = map_fd,
613 				      .offset  = map_offset
614 		} } });
615 		CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
616 		if (n != sizeof(mmop)) {
617 			n = errno;
618 			printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
619 			       "data failed, err = %d\n", n);
620 			return -n;
621 		}
622 	}
623 
624 	return 0;
625 }
626 
new_thread(void * stack,jmp_buf * buf,void (* handler)(void))627 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
628 {
629 	(*buf)[0].JB_IP = (unsigned long) handler;
630 	(*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
631 		sizeof(void *);
632 }
633 
634 #define INIT_JMP_NEW_THREAD 0
635 #define INIT_JMP_CALLBACK 1
636 #define INIT_JMP_HALT 2
637 #define INIT_JMP_REBOOT 3
638 
switch_threads(jmp_buf * me,jmp_buf * you)639 void switch_threads(jmp_buf *me, jmp_buf *you)
640 {
641 	if (UML_SETJMP(me) == 0)
642 		UML_LONGJMP(you, 1);
643 }
644 
645 static jmp_buf initial_jmpbuf;
646 
647 /* XXX Make these percpu */
648 static void (*cb_proc)(void *arg);
649 static void *cb_arg;
650 static jmp_buf *cb_back;
651 
start_idle_thread(void * stack,jmp_buf * switch_buf)652 int start_idle_thread(void *stack, jmp_buf *switch_buf)
653 {
654 	int n;
655 
656 	set_handler(SIGWINCH);
657 
658 	/*
659 	 * Can't use UML_SETJMP or UML_LONGJMP here because they save
660 	 * and restore signals, with the possible side-effect of
661 	 * trying to handle any signals which came when they were
662 	 * blocked, which can't be done on this stack.
663 	 * Signals must be blocked when jumping back here and restored
664 	 * after returning to the jumper.
665 	 */
666 	n = setjmp(initial_jmpbuf);
667 	switch (n) {
668 	case INIT_JMP_NEW_THREAD:
669 		(*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
670 		(*switch_buf)[0].JB_SP = (unsigned long) stack +
671 			UM_THREAD_SIZE - sizeof(void *);
672 		break;
673 	case INIT_JMP_CALLBACK:
674 		(*cb_proc)(cb_arg);
675 		longjmp(*cb_back, 1);
676 		break;
677 	case INIT_JMP_HALT:
678 		kmalloc_ok = 0;
679 		return 0;
680 	case INIT_JMP_REBOOT:
681 		kmalloc_ok = 0;
682 		return 1;
683 	default:
684 		printk(UM_KERN_ERR "Bad sigsetjmp return in "
685 		       "start_idle_thread - %d\n", n);
686 		fatal_sigsegv();
687 	}
688 	longjmp(*switch_buf, 1);
689 }
690 
initial_thread_cb_skas(void (* proc)(void *),void * arg)691 void initial_thread_cb_skas(void (*proc)(void *), void *arg)
692 {
693 	jmp_buf here;
694 
695 	cb_proc = proc;
696 	cb_arg = arg;
697 	cb_back = &here;
698 
699 	block_signals();
700 	if (UML_SETJMP(&here) == 0)
701 		UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
702 	unblock_signals();
703 
704 	cb_proc = NULL;
705 	cb_arg = NULL;
706 	cb_back = NULL;
707 }
708 
halt_skas(void)709 void halt_skas(void)
710 {
711 	block_signals();
712 	UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
713 }
714 
reboot_skas(void)715 void reboot_skas(void)
716 {
717 	block_signals();
718 	UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
719 }
720 
__switch_mm(struct mm_id * mm_idp)721 void __switch_mm(struct mm_id *mm_idp)
722 {
723 	int err;
724 
725 	/* FIXME: need cpu pid in __switch_mm */
726 	if (proc_mm) {
727 		err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
728 			     mm_idp->u.mm_fd);
729 		if (err) {
730 			printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
731 			       "failed, errno = %d\n", errno);
732 			fatal_sigsegv();
733 		}
734 	}
735 	else userspace_pid[0] = mm_idp->u.pid;
736 }
737