• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Contains the system-call and fault low-level handling routines.
3 * This also contains the timer-interrupt handler, as well as all
4 * interrupts and faults that can result in a task-switch.
5 *
6 * Copyright 2005-2009 Analog Devices Inc.
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11/* NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 */
14
15#include <linux/init.h>
16#include <linux/linkage.h>
17#include <linux/unistd.h>
18#include <asm/blackfin.h>
19#include <asm/errno.h>
20#include <asm/fixed_code.h>
21#include <asm/thread_info.h>  /* TIF_NEED_RESCHED */
22#include <asm/asm-offsets.h>
23#include <asm/trace.h>
24#include <asm/traps.h>
25
26#include <asm/context.S>
27
28
29#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
30.section .l1.text
31#else
32.text
33#endif
34
35/* Slightly simplified and streamlined entry point for CPLB misses.
36 * This one does not lower the level to IRQ5, and thus can be used to
37 * patch up CPLB misses on the kernel stack.
38 */
39#if ANOMALY_05000261
40#define _ex_dviol _ex_workaround_261
41#define _ex_dmiss _ex_workaround_261
42#define _ex_dmult _ex_workaround_261
43
44ENTRY(_ex_workaround_261)
45	/*
46	 * Work around an anomaly: if we see a new DCPLB fault, return
47	 * without doing anything.  Then, if we get the same fault again,
48	 * handle it.
49	 */
50	P4 = R7;	/* Store EXCAUSE */
51
52	GET_PDA(p5, r7);
53	r7 = [p5 + PDA_LFRETX];
54	r6 = retx;
55	[p5 + PDA_LFRETX] = r6;
56	cc = r6 == r7;
57	if !cc jump _bfin_return_from_exception;
58	/* fall through */
59	R7 = P4;
60	R6 = VEC_CPLB_M;	/* Data CPLB Miss */
61	cc = R6 == R7;
62	if cc jump _ex_dcplb_miss (BP);
63#ifdef CONFIG_MPU
64	R6 = VEC_CPLB_VL;	/* Data CPLB Violation */
65	cc = R6 == R7;
66	if cc jump _ex_dcplb_viol (BP);
67#endif
68	/* Handle Data CPLB Protection Violation
69	 * and Data CPLB Multiple Hits - Linux Trap Zero
70	 */
71	jump _ex_trap_c;
72ENDPROC(_ex_workaround_261)
73
74#else
75#ifdef CONFIG_MPU
76#define _ex_dviol _ex_dcplb_viol
77#else
78#define _ex_dviol _ex_trap_c
79#endif
80#define _ex_dmiss _ex_dcplb_miss
81#define _ex_dmult _ex_trap_c
82#endif
83
84
85ENTRY(_ex_dcplb_viol)
86ENTRY(_ex_dcplb_miss)
87ENTRY(_ex_icplb_miss)
88	(R7:6,P5:4) = [sp++];
89	/* We leave the previously pushed ASTAT on the stack.  */
90	SAVE_CONTEXT_CPLB
91
92	/* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
93	 * will change the stack pointer.  */
94	R0 = SEQSTAT;
95	R1 = SP;
96
97	DEBUG_HWTRACE_SAVE(p5, r7)
98
99	sp += -12;
100	call _cplb_hdr;
101	sp += 12;
102	CC = R0 == 0;
103	IF !CC JUMP _handle_bad_cplb;
104
105#ifdef CONFIG_DEBUG_DOUBLEFAULT
106	/* While we were processing this, did we double fault? */
107	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
108	r6.l = lo(SEQSTAT_EXCAUSE);
109	r6.h = hi(SEQSTAT_EXCAUSE);
110	r7 = r7 & r6;
111	r6 = 0x25;
112	CC = R7 == R6;
113	if CC JUMP _double_fault;
114#endif
115
116	DEBUG_HWTRACE_RESTORE(p5, r7)
117	RESTORE_CONTEXT_CPLB
118	ASTAT = [SP++];
119	SP = EX_SCRATCH_REG;
120	rtx;
121ENDPROC(_ex_icplb_miss)
122
123ENTRY(_ex_syscall)
124	raise 15;		/* invoked by TRAP #0, for sys call */
125	jump.s _bfin_return_from_exception;
126ENDPROC(_ex_syscall)
127
128ENTRY(_ex_single_step)
129	/* If we just returned from an interrupt, the single step event is
130	   for the RTI instruction.  */
131	r7 = retx;
132	r6 = reti;
133	cc = r7 == r6;
134	if cc jump _bfin_return_from_exception;
135
136#ifdef CONFIG_KGDB
137	/* Don't do single step in hardware exception handler */
138        p5.l = lo(IPEND);
139        p5.h = hi(IPEND);
140	r6 = [p5];
141	cc = bittst(r6, 4);
142	if cc jump _bfin_return_from_exception;
143	cc = bittst(r6, 5);
144	if cc jump _bfin_return_from_exception;
145
146	/* skip single step if current interrupt priority is higher than
147	 * that of the first instruction, from which gdb starts single step */
148	r6 >>= 6;
149	r7 = 10;
150.Lfind_priority_start:
151	cc = bittst(r6, 0);
152	if cc jump .Lfind_priority_done;
153	r6 >>= 1;
154	r7 += -1;
155	cc = r7 == 0;
156	if cc jump .Lfind_priority_done;
157	jump.s .Lfind_priority_start;
158.Lfind_priority_done:
159	p4.l = _kgdb_single_step;
160	p4.h = _kgdb_single_step;
161	r6 = [p4];
162	cc = r6 == 0;
163	if cc jump .Ldo_single_step;
164	r6 += -1;
165	cc = r6 < r7;
166	if cc jump 1f;
167.Ldo_single_step:
168#else
169	/* If we were in user mode, do the single step normally.  */
170	p5.l = lo(IPEND);
171	p5.h = hi(IPEND);
172	r6 = [p5];
173	r7 = 0xffe0 (z);
174	r7 = r7 & r6;
175	cc = r7 == 0;
176	if !cc jump 1f;
177#endif
178#ifdef CONFIG_EXACT_HWERR
179	/* Read the ILAT, and to check to see if the process we are
180	 * single stepping caused a previous hardware error
181	 * If so, do not single step, (which lowers to IRQ5, and makes
182	 * us miss the error).
183	 */
184	p5.l = lo(ILAT);
185	p5.h = hi(ILAT);
186	r7 = [p5];
187	cc = bittst(r7, EVT_IVHW_P);
188	if cc jump 1f;
189#endif
190	/* Single stepping only a single instruction, so clear the trace
191	 * bit here.  */
192	r7 = syscfg;
193	bitclr (r7, SYSCFG_SSSTEP_P);
194	syscfg = R7;
195	jump _ex_trap_c;
196
1971:
198	/*
199	 * We were in an interrupt handler.  By convention, all of them save
200	 * SYSCFG with their first instruction, so by checking whether our
201	 * RETX points at the entry point, we can determine whether to allow
202	 * a single step, or whether to clear SYSCFG.
203	 *
204	 * First, find out the interrupt level and the event vector for it.
205	 */
206	p5.l = lo(EVT0);
207	p5.h = hi(EVT0);
208	p5 += -4;
2092:
210	r7 = rot r7 by -1;
211	p5 += 4;
212	if !cc jump 2b;
213
214	/* What we actually do is test for the _second_ instruction in the
215	 * IRQ handler.  That way, if there are insns following the restore
216	 * of SYSCFG after leaving the handler, we will not turn off SYSCFG
217	 * for them.  */
218
219	r7 = [p5];
220	r7 += 2;
221	r6 = RETX;
222	cc = R7 == R6;
223	if !cc jump _bfin_return_from_exception;
224
225	r7 = syscfg;
226	bitclr (r7, SYSCFG_SSSTEP_P);	/* Turn off single step */
227	syscfg = R7;
228
229	/* Fall through to _bfin_return_from_exception.  */
230ENDPROC(_ex_single_step)
231
232ENTRY(_bfin_return_from_exception)
233#if ANOMALY_05000257
234	R7=LC0;
235	LC0=R7;
236	R7=LC1;
237	LC1=R7;
238#endif
239
240#ifdef CONFIG_DEBUG_DOUBLEFAULT
241	/* While we were processing the current exception,
242	 * did we cause another, and double fault?
243	 */
244	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
245	r6.l = lo(SEQSTAT_EXCAUSE);
246	r6.h = hi(SEQSTAT_EXCAUSE);
247	r7 = r7 & r6;
248	r6 = VEC_UNCOV;
249	CC = R7 == R6;
250	if CC JUMP _double_fault;
251#endif
252
253	(R7:6,P5:4) = [sp++];
254	ASTAT = [sp++];
255	sp = EX_SCRATCH_REG;
256	rtx;
257ENDPROC(_bfin_return_from_exception)
258
259ENTRY(_handle_bad_cplb)
260	DEBUG_HWTRACE_RESTORE(p5, r7)
261	/* To get here, we just tried and failed to change a CPLB
262	 * so, handle things in trap_c (C code), by lowering to
263	 * IRQ5, just like we normally do. Since this is not a
264	 * "normal" return path, we have a do a lot of stuff to
265	 * the stack to get ready so, we can fall through - we
266	 * need to make a CPLB exception look like a normal exception
267	 */
268	RESTORE_CONTEXT_CPLB
269	/* ASTAT is still on the stack, where it is needed.  */
270	[--sp] = (R7:6,P5:4);
271
272ENTRY(_ex_replaceable)
273	nop;
274
275ENTRY(_ex_trap_c)
276	/* The only thing that has been saved in this context is
277	 * (R7:6,P5:4), ASTAT & SP - don't use anything else
278	 */
279
280	GET_PDA(p5, r6);
281
282	/* Make sure we are not in a double fault */
283	p4.l = lo(IPEND);
284	p4.h = hi(IPEND);
285	r7 = [p4];
286	CC = BITTST (r7, 5);
287	if CC jump _double_fault;
288	[p5 + PDA_EXIPEND] = r7;
289
290	/* Call C code (trap_c) to handle the exception, which most
291	 * likely involves sending a signal to the current process.
292	 * To avoid double faults, lower our priority to IRQ5 first.
293	 */
294	r7.h = _exception_to_level5;
295	r7.l = _exception_to_level5;
296	p4.l = lo(EVT5);
297	p4.h = hi(EVT5);
298	[p4] = r7;
299	csync;
300
301	/*
302	 * Save these registers, as they are only valid in exception context
303	 *  (where we are now - as soon as we defer to IRQ5, they can change)
304	 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
305	 * but they are not very interesting, so don't save them
306	 */
307
308	p4.l = lo(DCPLB_FAULT_ADDR);
309	p4.h = hi(DCPLB_FAULT_ADDR);
310	r7 = [p4];
311	[p5 + PDA_DCPLB] = r7;
312
313	p4.l = lo(ICPLB_FAULT_ADDR);
314	p4.h = hi(ICPLB_FAULT_ADDR);
315	r6 = [p4];
316	[p5 + PDA_ICPLB] = r6;
317
318	r6 = retx;
319	[p5 + PDA_RETX] = r6;
320
321	r6 = SEQSTAT;
322	[p5 + PDA_SEQSTAT] = r6;
323
324	/* Save the state of single stepping */
325	r6 = SYSCFG;
326	[p5 + PDA_SYSCFG] = r6;
327	/* Clear it while we handle the exception in IRQ5 mode */
328	BITCLR(r6, SYSCFG_SSSTEP_P);
329	SYSCFG = r6;
330
331	/* Save the current IMASK, since we change in order to jump to level 5 */
332	cli r6;
333	[p5 + PDA_EXIMASK] = r6;
334
335	p4.l = lo(SAFE_USER_INSTRUCTION);
336	p4.h = hi(SAFE_USER_INSTRUCTION);
337	retx = p4;
338
339	/* Disable all interrupts, but make sure level 5 is enabled so
340	 * we can switch to that level.
341	 */
342	r6 = 0x3f;
343	sti r6;
344
345	/* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
346	 * clear it (re-enabling interrupts again) by the special sequence of pushing
347	 * RETI onto the stack.  This way we can lower ourselves to IVG5 even if the
348	 * exception was taken after the interrupt handler was called but before it
349	 * got a chance to enable global interrupts itself.
350	 */
351	[--sp] = reti;
352	sp += 4;
353
354	raise 5;
355	jump.s _bfin_return_from_exception;
356ENDPROC(_ex_trap_c)
357
358/* We just realized we got an exception, while we were processing a different
359 * exception. This is a unrecoverable event, so crash.
360 * Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
361 */
362ENTRY(_double_fault)
363	/* Turn caches & protection off, to ensure we don't get any more
364	 * double exceptions
365	 */
366
367	P4.L = LO(IMEM_CONTROL);
368	P4.H = HI(IMEM_CONTROL);
369
370	R5 = [P4];              /* Control Register*/
371	BITCLR(R5,ENICPLB_P);
372	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
373	[P4] = R5;
374	SSYNC;
375
376	P4.L = LO(DMEM_CONTROL);
377	P4.H = HI(DMEM_CONTROL);
378	R5 = [P4];
379	BITCLR(R5,ENDCPLB_P);
380	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
381	[P4] = R5;
382	SSYNC;
383
384	/* Fix up the stack */
385	(R7:6,P5:4) = [sp++];
386	ASTAT = [sp++];
387	SP = EX_SCRATCH_REG;
388
389	/* We should be out of the exception stack, and back down into
390	 * kernel or user space stack
391	 */
392	SAVE_ALL_SYS
393
394	/* The dumping functions expect the return address in the RETI
395	 * slot.  */
396	r6 = retx;
397	[sp + PT_PC] = r6;
398
399	r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
400	SP += -12;
401	pseudo_long_call _double_fault_c, p5;
402	SP += 12;
403.L_double_fault_panic:
404        JUMP .L_double_fault_panic
405
406ENDPROC(_double_fault)
407
408ENTRY(_exception_to_level5)
409	SAVE_ALL_SYS
410
411	GET_PDA(p5, r7);        /* Fetch current PDA */
412	r6 = [p5 + PDA_RETX];
413	[sp + PT_PC] = r6;
414
415	r6 = [p5 + PDA_SYSCFG];
416	[sp + PT_SYSCFG] = r6;
417
418	r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */
419	[sp + PT_SEQSTAT] = r6;
420
421	/* Restore the hardware error vector.  */
422	r7.h = _evt_ivhw;
423	r7.l = _evt_ivhw;
424	p4.l = lo(EVT5);
425	p4.h = hi(EVT5);
426	[p4] = r7;
427	csync;
428
429#ifdef CONFIG_DEBUG_DOUBLEFAULT
430	/* Now that we have the hardware error vector programmed properly
431	 * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes
432	 * another hardware error, we can catch it (self-nesting).
433	 */
434	[--sp] = reti;
435	sp += 4;
436#endif
437
438	r7 = [p5 + PDA_EXIPEND]	/* Read the IPEND from the Exception state */
439	[sp + PT_IPEND] = r7;   /* Store IPEND onto the stack */
440
441	r0 = sp; 	/* stack frame pt_regs pointer argument ==> r0 */
442	SP += -12;
443	pseudo_long_call _trap_c, p4;
444	SP += 12;
445
446	/* If interrupts were off during the exception (IPEND[4] = 1), turn them off
447	 * before we return.
448	 */
449	CC = BITTST(r7, EVT_IRPTEN_P)
450	if !CC jump 1f;
451	/* this will load a random value into the reti register - but that is OK,
452	 * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro
453	 */
454	sp += -4;
455	reti = [sp++];
4561:
457	/* restore the interrupt mask (IMASK) */
458	r6 = [p5 + PDA_EXIMASK];
459	sti r6;
460
461	call _ret_from_exception;
462	RESTORE_ALL_SYS
463	rti;
464ENDPROC(_exception_to_level5)
465
466ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
467	/* Since the kernel stack can be anywhere, it's not guaranteed to be
468	 * covered by a CPLB.  Switch to an exception stack; use RETN as a
469	 * scratch register (for want of a better option).
470	 */
471	EX_SCRATCH_REG = sp;
472	GET_PDA_SAFE(sp);
473	sp = [sp + PDA_EXSTACK];
474	/* Try to deal with syscalls quickly.  */
475	[--sp] = ASTAT;
476	[--sp] = (R7:6,P5:4);
477
478	ANOMALY_283_315_WORKAROUND(p5, r7)
479
480#ifdef CONFIG_EXACT_HWERR
481	/* Make sure all pending read/writes complete. This will ensure any
482	 * accesses which could cause hardware errors completes, and signal
483	 * the the hardware before we do something silly, like crash the
484	 * kernel. We don't need to work around anomaly 05000312, since
485	 * we are already atomic
486	 */
487	ssync;
488#endif
489
490#ifdef CONFIG_DEBUG_DOUBLEFAULT
491	/*
492	 * Save these registers, as they are only valid in exception context
493	 * (where we are now - as soon as we defer to IRQ5, they can change)
494	 * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
495	 * but they are not very interesting, so don't save them
496	 */
497
498	GET_PDA(p5, r7);
499	p4.l = lo(DCPLB_FAULT_ADDR);
500	p4.h = hi(DCPLB_FAULT_ADDR);
501	r7 = [p4];
502	[p5 + PDA_DF_DCPLB] = r7;
503
504	p4.l = lo(ICPLB_FAULT_ADDR);
505	p4.h = hi(ICPLB_FAULT_ADDR);
506	r7 = [p4];
507	[p5 + PDA_DF_ICPLB] = r7;
508
509	r7 = retx;
510	[p5 + PDA_DF_RETX] = r7;
511
512	r7 = SEQSTAT;		/* reason code is in bit 5:0 */
513	[p5 + PDA_DF_SEQSTAT] = r7;
514#else
515	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
516#endif
517	r6.l = lo(SEQSTAT_EXCAUSE);
518	r6.h = hi(SEQSTAT_EXCAUSE);
519	r7 = r7 & r6;
520	p5.h = _ex_table;
521	p5.l = _ex_table;
522	p4 = r7;
523	p5 = p5 + (p4 << 2);
524	p4 = [p5];
525	jump (p4);
526
527.Lbadsys:
528	r7 = -ENOSYS; 		/* signextending enough */
529	[sp + PT_R0] = r7;	/* return value from system call */
530	jump .Lsyscall_really_exit;
531ENDPROC(_trap)
532
533ENTRY(_system_call)
534	/* Store IPEND */
535	p2.l = lo(IPEND);
536	p2.h = hi(IPEND);
537	csync;
538	r0 = [p2];
539	[sp + PT_IPEND] = r0;
540
541	/* Store RETS for now */
542	r0 = rets;
543	[sp + PT_RESERVED] = r0;
544	/* Set the stack for the current process */
545	r7 = sp;
546	r6.l = lo(ALIGN_PAGE_MASK);
547	r6.h = hi(ALIGN_PAGE_MASK);
548	r7 = r7 & r6;  		/* thread_info */
549	p2 = r7;
550	p2 = [p2];
551
552	[p2+(TASK_THREAD+THREAD_KSP)] = sp;
553#ifdef CONFIG_IPIPE
554	r0 = sp;
555	SP += -12;
556	pseudo_long_call ___ipipe_syscall_root, p0;
557	SP += 12;
558	cc = r0 == 1;
559	if cc jump .Lsyscall_really_exit;
560	cc = r0 == -1;
561	if cc jump .Lresume_userspace;
562	r3 = [sp + PT_R3];
563	r4 = [sp + PT_R4];
564	p0 = [sp + PT_ORIG_P0];
565#endif /* CONFIG_IPIPE */
566
567	/* are we tracing syscalls?*/
568	r7 = sp;
569	r6.l = lo(ALIGN_PAGE_MASK);
570	r6.h = hi(ALIGN_PAGE_MASK);
571	r7 = r7 & r6;
572	p2 = r7;
573	r7 = [p2+TI_FLAGS];
574	CC = BITTST(r7,TIF_SYSCALL_TRACE);
575	if CC JUMP _sys_trace;
576	CC = BITTST(r7,TIF_SINGLESTEP);
577	if CC JUMP _sys_trace;
578
579	/* Make sure the system call # is valid */
580	p4 = __NR_syscall;
581	/* System call number is passed in P0 */
582	cc = p4 <= p0;
583	if cc jump .Lbadsys;
584
585	/* Execute the appropriate system call */
586
587	p4 = p0;
588	p5.l = _sys_call_table;
589	p5.h = _sys_call_table;
590	p5 = p5 + (p4 << 2);
591	r0 = [sp + PT_R0];
592	r1 = [sp + PT_R1];
593	r2 = [sp + PT_R2];
594	p5 = [p5];
595
596	[--sp] = r5;
597	[--sp] = r4;
598	[--sp] = r3;
599	SP += -12;
600	call (p5);
601	SP += 24;
602	[sp + PT_R0] = r0;
603
604.Lresume_userspace:
605	r7 = sp;
606	r4.l = lo(ALIGN_PAGE_MASK);
607	r4.h = hi(ALIGN_PAGE_MASK);
608	r7 = r7 & r4;		/* thread_info->flags */
609	p5 = r7;
610.Lresume_userspace_1:
611	/* Disable interrupts.  */
612	[--sp] = reti;
613	reti = [sp++];
614
615	r7 = [p5 + TI_FLAGS];
616	r4.l = lo(_TIF_WORK_MASK);
617	r4.h = hi(_TIF_WORK_MASK);
618	r7 =  r7 & r4;
619
620.Lsyscall_resched:
621#ifdef CONFIG_IPIPE
622	cc = BITTST(r7, TIF_IRQ_SYNC);
623	if !cc jump .Lsyscall_no_irqsync;
624	/*
625	 * Clear IPEND[4] manually to undo what resume_userspace_1 just did;
626	 * we need this so that high priority domain interrupts may still
627	 * preempt the current domain while the pipeline log is being played
628	 * back.
629	 */
630	[--sp] = reti;
631	SP += 4; /* don't merge with next insn to keep the pattern obvious */
632	SP += -12;
633	pseudo_long_call ___ipipe_sync_root, p4;
634	SP += 12;
635	jump .Lresume_userspace_1;
636.Lsyscall_no_irqsync:
637#endif
638	cc = BITTST(r7, TIF_NEED_RESCHED);
639	if !cc jump .Lsyscall_sigpending;
640
641	/* Reenable interrupts.  */
642	[--sp] = reti;
643	sp += 4;
644
645	SP += -12;
646	pseudo_long_call _schedule, p4;
647	SP += 12;
648
649	jump .Lresume_userspace_1;
650
651.Lsyscall_sigpending:
652	cc = BITTST(r7, TIF_SIGPENDING);
653	if cc jump .Lsyscall_do_signals;
654	cc = BITTST(r7, TIF_NOTIFY_RESUME);
655	if !cc jump .Lsyscall_really_exit;
656.Lsyscall_do_signals:
657	/* Reenable interrupts.  */
658	[--sp] = reti;
659	sp += 4;
660
661	r0 = sp;
662	SP += -12;
663	pseudo_long_call _do_notify_resume, p5;
664	SP += 12;
665
666.Lsyscall_really_exit:
667	r5 = [sp + PT_RESERVED];
668	rets = r5;
669	rts;
670ENDPROC(_system_call)
671
672/* Do not mark as ENTRY() to avoid error in assembler ...
673 * this symbol need not be global anyways, so ...
674 */
675_sys_trace:
676	r0 = sp;
677	pseudo_long_call _syscall_trace_enter, p5;
678
679	/* Make sure the system call # is valid */
680	p4 = [SP + PT_P0];
681	p3 = __NR_syscall;
682	cc = p3 <= p4;
683	r0 = -ENOSYS;
684	if cc jump .Lsys_trace_badsys;
685
686	/* Execute the appropriate system call */
687	p5.l = _sys_call_table;
688	p5.h = _sys_call_table;
689	p5 = p5 + (p4 << 2);
690	r0 = [sp + PT_R0];
691	r1 = [sp + PT_R1];
692	r2 = [sp + PT_R2];
693	r3 = [sp + PT_R3];
694	r4 = [sp + PT_R4];
695	r5 = [sp + PT_R5];
696	p5 = [p5];
697
698	[--sp] = r5;
699	[--sp] = r4;
700	[--sp] = r3;
701	SP += -12;
702	call (p5);
703	SP += 24;
704.Lsys_trace_badsys:
705	[sp + PT_R0] = r0;
706
707	r0 = sp;
708	pseudo_long_call _syscall_trace_leave, p5;
709	jump .Lresume_userspace;
710ENDPROC(_sys_trace)
711
712ENTRY(_resume)
713	/*
714	 * Beware - when entering resume, prev (the current task) is
715	 * in r0, next (the new task) is in r1.
716	 */
717	p0 = r0;
718	p1 = r1;
719	[--sp] = rets;
720	[--sp] = fp;
721	[--sp] = (r7:4, p5:3);
722
723	/* save usp */
724	p2 = usp;
725	[p0+(TASK_THREAD+THREAD_USP)] = p2;
726
727	/* save current kernel stack pointer */
728	[p0+(TASK_THREAD+THREAD_KSP)] = sp;
729
730	/* save program counter */
731	r1.l = _new_old_task;
732	r1.h = _new_old_task;
733	[p0+(TASK_THREAD+THREAD_PC)] = r1;
734
735	/* restore the kernel stack pointer */
736	sp = [p1+(TASK_THREAD+THREAD_KSP)];
737
738	/* restore user stack pointer */
739	p0 = [p1+(TASK_THREAD+THREAD_USP)];
740	usp = p0;
741
742	/* restore pc */
743	p0 = [p1+(TASK_THREAD+THREAD_PC)];
744	jump (p0);
745
746	/*
747	 * Following code actually lands up in a new (old) task.
748	 */
749
750_new_old_task:
751	(r7:4, p5:3) = [sp++];
752	fp = [sp++];
753	rets = [sp++];
754
755	/*
756	 * When we come out of resume, r0 carries "old" task, because we are
757	 * in "new" task.
758	 */
759	rts;
760ENDPROC(_resume)
761
762ENTRY(_ret_from_exception)
763#ifdef CONFIG_IPIPE
764	p2.l = _ipipe_percpu_domain;
765	p2.h = _ipipe_percpu_domain;
766	r0.l = _ipipe_root;
767	r0.h = _ipipe_root;
768	r2 = [p2];
769	cc = r0 == r2;
770	if !cc jump 4f;  /* not on behalf of the root domain, get out */
771#endif /* CONFIG_IPIPE */
772	p2.l = lo(IPEND);
773	p2.h = hi(IPEND);
774
775	csync;
776	r0 = [p2];
777	[sp + PT_IPEND] = r0;
778
7791:
780	r2 = LO(~0x37) (Z);
781	r0 = r2 & r0;
782	cc = r0 == 0;
783	if !cc jump 4f;	/* if not return to user mode, get out */
784
785	/* Make sure any pending system call or deferred exception
786	 * return in ILAT for this process to get executed, otherwise
787	 * in case context switch happens, system call of
788	 * first process (i.e in ILAT) will be carried
789	 * forward to the switched process
790	 */
791
792	p2.l = lo(ILAT);
793	p2.h = hi(ILAT);
794	r0 = [p2];
795	r1 = (EVT_IVG14 | EVT_IVG15) (z);
796	r0 = r0 & r1;
797	cc = r0 == 0;
798	if !cc jump 5f;
799
800	/* Set the stack for the current process */
801	r7 = sp;
802	r4.l = lo(ALIGN_PAGE_MASK);
803	r4.h = hi(ALIGN_PAGE_MASK);
804	r7 = r7 & r4;		/* thread_info->flags */
805	p5 = r7;
806	r7 = [p5 + TI_FLAGS];
807	r4.l = lo(_TIF_WORK_MASK);
808	r4.h = hi(_TIF_WORK_MASK);
809	r7 =  r7 & r4;
810	cc = r7 == 0;
811	if cc jump 4f;
812
813	p0.l = lo(EVT15);
814	p0.h = hi(EVT15);
815	p1.l = _schedule_and_signal;
816	p1.h = _schedule_and_signal;
817	[p0] = p1;
818	csync;
819	raise 15;		/* raise evt15 to do signal or reschedule */
8204:
821	r0 = syscfg;
822	bitclr(r0, SYSCFG_SSSTEP_P);		/* Turn off single step */
823	syscfg = r0;
8245:
825	rts;
826ENDPROC(_ret_from_exception)
827
828#if defined(CONFIG_PREEMPT)
829
830ENTRY(_up_to_irq14)
831#if ANOMALY_05000281 || ANOMALY_05000461
832	r0.l = lo(SAFE_USER_INSTRUCTION);
833	r0.h = hi(SAFE_USER_INSTRUCTION);
834	reti = r0;
835#endif
836
837#ifdef CONFIG_DEBUG_HWERR
838	/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
839	r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
840#else
841	/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
842	r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
843#endif
844	sti r0;
845
846	p0.l = lo(EVT14);
847	p0.h = hi(EVT14);
848	p1.l = _evt_up_evt14;
849	p1.h = _evt_up_evt14;
850	[p0] = p1;
851	csync;
852
853	raise 14;
8541:
855	jump 1b;
856ENDPROC(_up_to_irq14)
857
858ENTRY(_evt_up_evt14)
859#ifdef CONFIG_DEBUG_HWERR
860	r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
861	sti r0;
862#else
863	cli r0;
864#endif
865#ifdef CONFIG_TRACE_IRQFLAGS
866	[--sp] = rets;
867	sp += -12;
868	call _trace_hardirqs_off;
869	sp += 12;
870	rets = [sp++];
871#endif
872	[--sp] = RETI;
873	SP += 4;
874
875	/* restore normal evt14 */
876	p0.l = lo(EVT14);
877	p0.h = hi(EVT14);
878	p1.l = _evt_evt14;
879	p1.h = _evt_evt14;
880	[p0] = p1;
881	csync;
882
883	rts;
884ENDPROC(_evt_up_evt14)
885
886#endif
887
888#ifdef CONFIG_IPIPE
889
890_resume_kernel_from_int:
891	r1 = LO(~0x8000) (Z);
892	r1 = r0 & r1;
893	r0 = 1;
894	r0 = r1 - r0;
895	r2 = r1 & r0;
896	cc = r2 == 0;
897	/* Sync the root stage only from the outer interrupt level. */
898	if !cc jump .Lnosync;
899	r0.l = ___ipipe_sync_root;
900	r0.h = ___ipipe_sync_root;
901	[--sp] = reti;
902	[--sp] = rets;
903	[--sp] = ( r7:4, p5:3 );
904	SP += -12;
905	call ___ipipe_call_irqtail
906	SP += 12;
907	( r7:4, p5:3 ) = [sp++];
908	rets = [sp++];
909	reti = [sp++];
910.Lnosync:
911	rts
912#elif defined(CONFIG_PREEMPT)
913
914_resume_kernel_from_int:
915	/* check preempt_count */
916	r7 = sp;
917	r4.l = lo(ALIGN_PAGE_MASK);
918	r4.h = hi(ALIGN_PAGE_MASK);
919	r7 = r7 & r4;
920	p5 = r7;
921	r7 = [p5 + TI_PREEMPT];
922	cc = r7 == 0x0;
923	if !cc jump .Lreturn_to_kernel;
924.Lneed_schedule:
925	r7 = [p5 + TI_FLAGS];
926	r4.l = lo(_TIF_WORK_MASK);
927	r4.h = hi(_TIF_WORK_MASK);
928	r7 =  r7 & r4;
929	cc = BITTST(r7, TIF_NEED_RESCHED);
930	if !cc jump .Lreturn_to_kernel;
931	/*
932	 * let schedule done at level 15, otherwise sheduled process will run
933	 * at high level and block low level interrupt
934	 */
935	r6 = reti;  /* save reti */
936	r5.l = .Lkernel_schedule;
937	r5.h = .Lkernel_schedule;
938	reti = r5;
939	rti;
940.Lkernel_schedule:
941	[--sp] = rets;
942	sp += -12;
943	pseudo_long_call _preempt_schedule_irq, p4;
944	sp += 12;
945	rets = [sp++];
946
947	[--sp] = rets;
948	sp += -12;
949	/* up to irq14 so that reti after restore_all can return to irq15(kernel) */
950	pseudo_long_call _up_to_irq14, p4;
951	sp += 12;
952	rets = [sp++];
953
954	reti = r6; /* restore reti so that origin process can return to interrupted point */
955
956	jump .Lneed_schedule;
957#else
958
959#define _resume_kernel_from_int	.Lreturn_to_kernel
960#endif
961
962ENTRY(_return_from_int)
963	/* If someone else already raised IRQ 15, do nothing.  */
964	csync;
965	p2.l = lo(ILAT);
966	p2.h = hi(ILAT);
967	r0 = [p2];
968	cc = bittst (r0, EVT_IVG15_P);
969	if cc jump .Lreturn_to_kernel;
970
971	/* if not return to user mode, get out */
972	p2.l = lo(IPEND);
973	p2.h = hi(IPEND);
974	r0 = [p2];
975	r1 = 0x17(Z);
976	r2 = ~r1;
977	r2.h = 0;
978	r0 = r2 & r0;
979	r1 = 1;
980	r1 = r0 - r1;
981	r2 = r0 & r1;
982	cc = r2 == 0;
983	if !cc jump _resume_kernel_from_int;
984
985	/* Lower the interrupt level to 15.  */
986	p0.l = lo(EVT15);
987	p0.h = hi(EVT15);
988	p1.l = _schedule_and_signal_from_int;
989	p1.h = _schedule_and_signal_from_int;
990	[p0] = p1;
991	csync;
992#if ANOMALY_05000281 || ANOMALY_05000461
993	r0.l = lo(SAFE_USER_INSTRUCTION);
994	r0.h = hi(SAFE_USER_INSTRUCTION);
995	reti = r0;
996#endif
997	r0 = 0x801f (z);
998	STI r0;
999	raise 15;	/* raise evt15 to do signal or reschedule */
1000	rti;
1001.Lreturn_to_kernel:
1002	rts;
1003ENDPROC(_return_from_int)
1004
1005ENTRY(_lower_to_irq14)
1006#if ANOMALY_05000281 || ANOMALY_05000461
1007	r0.l = lo(SAFE_USER_INSTRUCTION);
1008	r0.h = hi(SAFE_USER_INSTRUCTION);
1009	reti = r0;
1010#endif
1011
1012#ifdef CONFIG_DEBUG_HWERR
1013	/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
1014	r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
1015#else
1016	/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
1017	r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
1018#endif
1019	sti r0;
1020	raise 14;
1021	rti;
1022ENDPROC(_lower_to_irq14)
1023
1024ENTRY(_evt_evt14)
1025#ifdef CONFIG_DEBUG_HWERR
1026	r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
1027	sti r0;
1028#else
1029	cli r0;
1030#endif
1031#ifdef CONFIG_TRACE_IRQFLAGS
1032	[--sp] = rets;
1033	sp += -12;
1034	call _trace_hardirqs_off;
1035	sp += 12;
1036	rets = [sp++];
1037#endif
1038	[--sp] = RETI;
1039	SP += 4;
1040	rts;
1041ENDPROC(_evt_evt14)
1042
1043ENTRY(_schedule_and_signal_from_int)
1044	/* To end up here, vector 15 was changed - so we have to change it
1045	 * back.
1046	 */
1047	p0.l = lo(EVT15);
1048	p0.h = hi(EVT15);
1049	p1.l = _evt_system_call;
1050	p1.h = _evt_system_call;
1051	[p0] = p1;
1052	csync;
1053
1054	/* Set orig_p0 to -1 to indicate this isn't the end of a syscall.  */
1055	r0 = -1 (x);
1056	[sp + PT_ORIG_P0] = r0;
1057
1058	p1 = rets;
1059	[sp + PT_RESERVED] = p1;
1060
1061#ifdef CONFIG_TRACE_IRQFLAGS
1062	/* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
1063	 * is turned on, so disable all irqs. */
1064	cli r0;
1065	sp += -12;
1066	call _trace_hardirqs_on;
1067	sp += 12;
1068#endif
1069#ifdef CONFIG_SMP
1070	GET_PDA(p0, r0); 	/* Fetch current PDA (can't migrate to other CPU here) */
1071	r0 = [p0 + PDA_IRQFLAGS];
1072#else
1073	p0.l = _bfin_irq_flags;
1074	p0.h = _bfin_irq_flags;
1075	r0 = [p0];
1076#endif
1077	sti r0;
1078
1079	/* finish the userspace "atomic" functions for it */
1080	r1.l = lo(FIXED_CODE_END);
1081	r1.h = hi(FIXED_CODE_END);
1082	r2 = [sp + PT_PC];
1083	cc = r1 <= r2;
1084	if cc jump .Lresume_userspace (bp);
1085
1086	r0 = sp;
1087	sp += -12;
1088
1089	pseudo_long_call _finish_atomic_sections, p5;
1090	sp += 12;
1091	jump.s .Lresume_userspace;
1092ENDPROC(_schedule_and_signal_from_int)
1093
1094ENTRY(_schedule_and_signal)
1095	SAVE_CONTEXT_SYSCALL
1096	/* To end up here, vector 15 was changed - so we have to change it
1097	 * back.
1098	 */
1099	p0.l = lo(EVT15);
1100	p0.h = hi(EVT15);
1101	p1.l = _evt_system_call;
1102	p1.h = _evt_system_call;
1103	[p0] = p1;
1104	csync;
1105	p0.l = 1f;
1106	p0.h = 1f;
1107	[sp + PT_RESERVED] = P0;
1108	call .Lresume_userspace;
11091:
1110	RESTORE_CONTEXT
1111	rti;
1112ENDPROC(_schedule_and_signal)
1113
1114/* We handle this 100% in exception space - to reduce overhead
1115 * Only potiential problem is if the software buffer gets swapped out of the
1116 * CPLB table - then double fault. - so we don't let this happen in other places
1117 */
1118#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
1119ENTRY(_ex_trace_buff_full)
1120	[--sp] = P3;
1121	[--sp] = P2;
1122	[--sp] = LC0;
1123	[--sp] = LT0;
1124	[--sp] = LB0;
1125	P5.L = _trace_buff_offset;
1126	P5.H = _trace_buff_offset;
1127	P3 = [P5];              /* trace_buff_offset */
1128	P5.L = lo(TBUFSTAT);
1129	P5.H = hi(TBUFSTAT);
1130	R7 = [P5];
1131	R7 <<= 1;               /* double, since we need to read twice */
1132	LC0 = R7;
1133	R7 <<= 2;               /* need to shift over again,
1134				 * to get the number of bytes */
1135	P5.L = lo(TBUF);
1136	P5.H = hi(TBUF);
1137	R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1;
1138
1139	P2 = R7;
1140	P3 = P3 + P2;
1141	R7 = P3;
1142	R7 = R7 & R6;
1143	P3 = R7;
1144	P2.L = _trace_buff_offset;
1145	P2.H = _trace_buff_offset;
1146	[P2] = P3;
1147
1148	P2.L = _software_trace_buff;
1149	P2.H = _software_trace_buff;
1150
1151	LSETUP (.Lstart, .Lend) LC0;
1152.Lstart:
1153	R7 = [P5];      /* read TBUF */
1154	P4 = P3 + P2;
1155	[P4] = R7;
1156	P3 += -4;
1157	R7 = P3;
1158	R7 = R7 & R6;
1159.Lend:
1160	P3 = R7;
1161
1162	LB0 = [sp++];
1163	LT0 = [sp++];
1164	LC0 = [sp++];
1165	P2 = [sp++];
1166	P3 = [sp++];
1167	jump _bfin_return_from_exception;
1168ENDPROC(_ex_trace_buff_full)
1169
1170#if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4
1171.data
1172#else
1173.section .l1.data.B
1174#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */
1175ENTRY(_trace_buff_offset)
1176        .long 0;
1177ALIGN
1178ENTRY(_software_trace_buff)
1179	.rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256);
1180	.long 0
1181	.endr
1182#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */
1183
1184#ifdef CONFIG_EARLY_PRINTK
1185__INIT
1186ENTRY(_early_trap)
1187	SAVE_ALL_SYS
1188	trace_buffer_stop(p0,r0);
1189
1190	ANOMALY_283_315_WORKAROUND(p4, r5)
1191
1192	/* Turn caches off, to ensure we don't get double exceptions */
1193
1194	P4.L = LO(IMEM_CONTROL);
1195	P4.H = HI(IMEM_CONTROL);
1196
1197	R5 = [P4];              /* Control Register*/
1198	BITCLR(R5,ENICPLB_P);
1199	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
1200	[P4] = R5;
1201	SSYNC;
1202
1203	P4.L = LO(DMEM_CONTROL);
1204	P4.H = HI(DMEM_CONTROL);
1205	R5 = [P4];
1206	BITCLR(R5,ENDCPLB_P);
1207	CSYNC;          /* Disabling of CPLBs should be proceeded by a CSYNC */
1208	[P4] = R5;
1209	SSYNC;
1210
1211	r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
1212	r1 = RETX;
1213
1214	SP += -12;
1215	call _early_trap_c;
1216	SP += 12;
1217ENDPROC(_early_trap)
1218__FINIT
1219#endif /* CONFIG_EARLY_PRINTK */
1220
1221/*
1222 * Put these in the kernel data section - that should always be covered by
1223 * a CPLB. This is needed to ensure we don't get double fault conditions
1224 */
1225
1226#ifdef CONFIG_SYSCALL_TAB_L1
1227.section .l1.data
1228#else
1229.data
1230#endif
1231
1232ENTRY(_ex_table)
1233	/* entry for each EXCAUSE[5:0]
1234	 * This table must be in sync with the table in ./kernel/traps.c
1235	 * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
1236	 */
1237	.long _ex_syscall       /* 0x00 - User Defined - Linux Syscall */
1238	.long _ex_trap_c        /* 0x01 - User Defined - Software breakpoint */
1239#ifdef	CONFIG_KGDB
1240	.long _ex_trap_c	/* 0x02 - User Defined - KGDB initial connection
1241							 and break signal trap */
1242#else
1243	.long _ex_replaceable   /* 0x02 - User Defined */
1244#endif
1245	.long _ex_trap_c        /* 0x03 - User Defined - userspace stack overflow */
1246	.long _ex_trap_c        /* 0x04 - User Defined - dump trace buffer */
1247	.long _ex_replaceable   /* 0x05 - User Defined */
1248	.long _ex_replaceable   /* 0x06 - User Defined */
1249	.long _ex_replaceable   /* 0x07 - User Defined */
1250	.long _ex_replaceable   /* 0x08 - User Defined */
1251	.long _ex_replaceable   /* 0x09 - User Defined */
1252	.long _ex_replaceable   /* 0x0A - User Defined */
1253	.long _ex_replaceable   /* 0x0B - User Defined */
1254	.long _ex_replaceable   /* 0x0C - User Defined */
1255	.long _ex_replaceable   /* 0x0D - User Defined */
1256	.long _ex_replaceable   /* 0x0E - User Defined */
1257	.long _ex_replaceable   /* 0x0F - User Defined */
1258	.long _ex_single_step   /* 0x10 - HW Single step */
1259#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
1260	.long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */
1261#else
1262	.long _ex_trap_c        /* 0x11 - Trace Buffer Full */
1263#endif
1264	.long _ex_trap_c        /* 0x12 - Reserved */
1265	.long _ex_trap_c        /* 0x13 - Reserved */
1266	.long _ex_trap_c        /* 0x14 - Reserved */
1267	.long _ex_trap_c        /* 0x15 - Reserved */
1268	.long _ex_trap_c        /* 0x16 - Reserved */
1269	.long _ex_trap_c        /* 0x17 - Reserved */
1270	.long _ex_trap_c        /* 0x18 - Reserved */
1271	.long _ex_trap_c        /* 0x19 - Reserved */
1272	.long _ex_trap_c        /* 0x1A - Reserved */
1273	.long _ex_trap_c        /* 0x1B - Reserved */
1274	.long _ex_trap_c        /* 0x1C - Reserved */
1275	.long _ex_trap_c        /* 0x1D - Reserved */
1276	.long _ex_trap_c        /* 0x1E - Reserved */
1277	.long _ex_trap_c        /* 0x1F - Reserved */
1278	.long _ex_trap_c        /* 0x20 - Reserved */
1279	.long _ex_trap_c        /* 0x21 - Undefined Instruction */
1280	.long _ex_trap_c        /* 0x22 - Illegal Instruction Combination */
1281	.long _ex_dviol         /* 0x23 - Data CPLB Protection Violation */
1282	.long _ex_trap_c        /* 0x24 - Data access misaligned */
1283	.long _ex_trap_c        /* 0x25 - Unrecoverable Event */
1284	.long _ex_dmiss         /* 0x26 - Data CPLB Miss */
1285	.long _ex_dmult         /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */
1286	.long _ex_trap_c        /* 0x28 - Emulation Watchpoint */
1287	.long _ex_trap_c        /* 0x29 - Instruction fetch access error (535 only) */
1288	.long _ex_trap_c        /* 0x2A - Instruction fetch misaligned */
1289	.long _ex_trap_c        /* 0x2B - Instruction CPLB protection Violation */
1290	.long _ex_icplb_miss    /* 0x2C - Instruction CPLB miss */
1291	.long _ex_trap_c        /* 0x2D - Instruction CPLB Multiple Hits */
1292	.long _ex_trap_c        /* 0x2E - Illegal use of Supervisor Resource */
1293	.long _ex_trap_c        /* 0x2E - Illegal use of Supervisor Resource */
1294	.long _ex_trap_c        /* 0x2F - Reserved */
1295	.long _ex_trap_c        /* 0x30 - Reserved */
1296	.long _ex_trap_c        /* 0x31 - Reserved */
1297	.long _ex_trap_c        /* 0x32 - Reserved */
1298	.long _ex_trap_c        /* 0x33 - Reserved */
1299	.long _ex_trap_c        /* 0x34 - Reserved */
1300	.long _ex_trap_c        /* 0x35 - Reserved */
1301	.long _ex_trap_c        /* 0x36 - Reserved */
1302	.long _ex_trap_c        /* 0x37 - Reserved */
1303	.long _ex_trap_c        /* 0x38 - Reserved */
1304	.long _ex_trap_c        /* 0x39 - Reserved */
1305	.long _ex_trap_c        /* 0x3A - Reserved */
1306	.long _ex_trap_c        /* 0x3B - Reserved */
1307	.long _ex_trap_c        /* 0x3C - Reserved */
1308	.long _ex_trap_c        /* 0x3D - Reserved */
1309	.long _ex_trap_c        /* 0x3E - Reserved */
1310	.long _ex_trap_c        /* 0x3F - Reserved */
1311END(_ex_table)
1312
1313ENTRY(_sys_call_table)
1314	.long _sys_restart_syscall	/* 0 */
1315	.long _sys_exit
1316	.long _sys_ni_syscall	/* fork */
1317	.long _sys_read
1318	.long _sys_write
1319	.long _sys_open		/* 5 */
1320	.long _sys_close
1321	.long _sys_ni_syscall	/* old waitpid */
1322	.long _sys_creat
1323	.long _sys_link
1324	.long _sys_unlink	/* 10 */
1325	.long _sys_execve
1326	.long _sys_chdir
1327	.long _sys_time
1328	.long _sys_mknod
1329	.long _sys_chmod		/* 15 */
1330	.long _sys_chown	/* chown16 */
1331	.long _sys_ni_syscall	/* old break syscall holder */
1332	.long _sys_ni_syscall	/* old stat */
1333	.long _sys_lseek
1334	.long _sys_getpid	/* 20 */
1335	.long _sys_mount
1336	.long _sys_ni_syscall	/* old umount */
1337	.long _sys_setuid
1338	.long _sys_getuid
1339	.long _sys_stime		/* 25 */
1340	.long _sys_ptrace
1341	.long _sys_alarm
1342	.long _sys_ni_syscall	/* old fstat */
1343	.long _sys_pause
1344	.long _sys_ni_syscall	/* old utime */ /* 30 */
1345	.long _sys_ni_syscall	/* old stty syscall holder */
1346	.long _sys_ni_syscall	/* old gtty syscall holder */
1347	.long _sys_access
1348	.long _sys_nice
1349	.long _sys_ni_syscall	/* 35 */ /* old ftime syscall holder */
1350	.long _sys_sync
1351	.long _sys_kill
1352	.long _sys_rename
1353	.long _sys_mkdir
1354	.long _sys_rmdir		/* 40 */
1355	.long _sys_dup
1356	.long _sys_pipe
1357	.long _sys_times
1358	.long _sys_ni_syscall	/* old prof syscall holder */
1359	.long _sys_brk		/* 45 */
1360	.long _sys_setgid
1361	.long _sys_getgid
1362	.long _sys_ni_syscall	/* old sys_signal */
1363	.long _sys_geteuid	/* geteuid16 */
1364	.long _sys_getegid	/* getegid16 */	/* 50 */
1365	.long _sys_acct
1366	.long _sys_umount	/* recycled never used phys() */
1367	.long _sys_ni_syscall	/* old lock syscall holder */
1368	.long _sys_ioctl
1369	.long _sys_fcntl		/* 55 */
1370	.long _sys_ni_syscall	/* old mpx syscall holder */
1371	.long _sys_setpgid
1372	.long _sys_ni_syscall	/* old ulimit syscall holder */
1373	.long _sys_ni_syscall	/* old old uname */
1374	.long _sys_umask		/* 60 */
1375	.long _sys_chroot
1376	.long _sys_ustat
1377	.long _sys_dup2
1378	.long _sys_getppid
1379	.long _sys_getpgrp	/* 65 */
1380	.long _sys_setsid
1381	.long _sys_ni_syscall	/* old sys_sigaction */
1382	.long _sys_sgetmask
1383	.long _sys_ssetmask
1384	.long _sys_setreuid	/* setreuid16 */	/* 70 */
1385	.long _sys_setregid	/* setregid16 */
1386	.long _sys_ni_syscall	/* old sys_sigsuspend */
1387	.long _sys_ni_syscall	/* old sys_sigpending */
1388	.long _sys_sethostname
1389	.long _sys_setrlimit	/* 75 */
1390	.long _sys_ni_syscall	/* old getrlimit */
1391	.long _sys_getrusage
1392	.long _sys_gettimeofday
1393	.long _sys_settimeofday
1394	.long _sys_getgroups	/* getgroups16 */	/* 80 */
1395	.long _sys_setgroups	/* setgroups16 */
1396	.long _sys_ni_syscall	/* old_select */
1397	.long _sys_symlink
1398	.long _sys_ni_syscall	/* old lstat */
1399	.long _sys_readlink	/* 85 */
1400	.long _sys_uselib
1401	.long _sys_ni_syscall	/* sys_swapon */
1402	.long _sys_reboot
1403	.long _sys_ni_syscall	/* old_readdir */
1404	.long _sys_ni_syscall	/* sys_mmap */	/* 90 */
1405	.long _sys_munmap
1406	.long _sys_truncate
1407	.long _sys_ftruncate
1408	.long _sys_fchmod
1409	.long _sys_fchown	/* fchown16 */	/* 95 */
1410	.long _sys_getpriority
1411	.long _sys_setpriority
1412	.long _sys_ni_syscall	/* old profil syscall holder */
1413	.long _sys_statfs
1414	.long _sys_fstatfs	/* 100 */
1415	.long _sys_ni_syscall
1416	.long _sys_ni_syscall	/* old sys_socketcall */
1417	.long _sys_syslog
1418	.long _sys_setitimer
1419	.long _sys_getitimer	/* 105 */
1420	.long _sys_newstat
1421	.long _sys_newlstat
1422	.long _sys_newfstat
1423	.long _sys_ni_syscall	/* old uname */
1424	.long _sys_ni_syscall	/* iopl for i386 */ /* 110 */
1425	.long _sys_vhangup
1426	.long _sys_ni_syscall	/* obsolete idle() syscall */
1427	.long _sys_ni_syscall	/* vm86old for i386 */
1428	.long _sys_wait4
1429	.long _sys_ni_syscall	/* 115 */ /* sys_swapoff */
1430	.long _sys_sysinfo
1431	.long _sys_ni_syscall	/* old sys_ipc */
1432	.long _sys_fsync
1433	.long _sys_ni_syscall	/* old sys_sigreturn */
1434	.long _bfin_clone		/* 120 */
1435	.long _sys_setdomainname
1436	.long _sys_newuname
1437	.long _sys_ni_syscall	/* old sys_modify_ldt */
1438	.long _sys_adjtimex
1439	.long _sys_mprotect	/* 125 */
1440	.long _sys_ni_syscall	/* old sys_sigprocmask */
1441	.long _sys_ni_syscall	/* old "creat_module" */
1442	.long _sys_init_module
1443	.long _sys_delete_module
1444	.long _sys_ni_syscall	/* 130: old "get_kernel_syms" */
1445	.long _sys_quotactl
1446	.long _sys_getpgid
1447	.long _sys_fchdir
1448	.long _sys_bdflush
1449	.long _sys_ni_syscall	/* 135 */ /* sys_sysfs */
1450	.long _sys_personality
1451	.long _sys_ni_syscall	/* for afs_syscall */
1452	.long _sys_setfsuid	/* setfsuid16 */
1453	.long _sys_setfsgid	/* setfsgid16 */
1454	.long _sys_llseek	/* 140 */
1455	.long _sys_getdents
1456	.long _sys_ni_syscall	/* sys_select */
1457	.long _sys_flock
1458	.long _sys_msync
1459	.long _sys_readv		/* 145 */
1460	.long _sys_writev
1461	.long _sys_getsid
1462	.long _sys_fdatasync
1463	.long _sys_sysctl
1464	.long _sys_mlock	/* 150 */
1465	.long _sys_munlock
1466	.long _sys_mlockall
1467	.long _sys_munlockall
1468	.long _sys_sched_setparam
1469	.long _sys_sched_getparam /* 155 */
1470	.long _sys_sched_setscheduler
1471	.long _sys_sched_getscheduler
1472	.long _sys_sched_yield
1473	.long _sys_sched_get_priority_max
1474	.long _sys_sched_get_priority_min  /* 160 */
1475	.long _sys_sched_rr_get_interval
1476	.long _sys_nanosleep
1477	.long _sys_mremap
1478	.long _sys_setresuid	/* setresuid16 */
1479	.long _sys_getresuid	/* getresuid16 */	/* 165 */
1480	.long _sys_ni_syscall	/* for vm86 */
1481	.long _sys_ni_syscall	/* old "query_module" */
1482	.long _sys_ni_syscall	/* sys_poll */
1483	.long _sys_ni_syscall   /* old nfsservctl */
1484	.long _sys_setresgid	/* setresgid16 */	/* 170 */
1485	.long _sys_getresgid	/* getresgid16 */
1486	.long _sys_prctl
1487	.long _sys_rt_sigreturn
1488	.long _sys_rt_sigaction
1489	.long _sys_rt_sigprocmask /* 175 */
1490	.long _sys_rt_sigpending
1491	.long _sys_rt_sigtimedwait
1492	.long _sys_rt_sigqueueinfo
1493	.long _sys_rt_sigsuspend
1494	.long _sys_pread64	/* 180 */
1495	.long _sys_pwrite64
1496	.long _sys_lchown	/* lchown16 */
1497	.long _sys_getcwd
1498	.long _sys_capget
1499	.long _sys_capset	/* 185 */
1500	.long _sys_sigaltstack
1501	.long _sys_sendfile
1502	.long _sys_ni_syscall	/* streams1 */
1503	.long _sys_ni_syscall	/* streams2 */
1504	.long _sys_vfork		/* 190 */
1505	.long _sys_getrlimit
1506	.long _sys_mmap_pgoff
1507	.long _sys_truncate64
1508	.long _sys_ftruncate64
1509	.long _sys_stat64	/* 195 */
1510	.long _sys_lstat64
1511	.long _sys_fstat64
1512	.long _sys_chown
1513	.long _sys_getuid
1514	.long _sys_getgid	/* 200 */
1515	.long _sys_geteuid
1516	.long _sys_getegid
1517	.long _sys_setreuid
1518	.long _sys_setregid
1519	.long _sys_getgroups	/* 205 */
1520	.long _sys_setgroups
1521	.long _sys_fchown
1522	.long _sys_setresuid
1523	.long _sys_getresuid
1524	.long _sys_setresgid	/* 210 */
1525	.long _sys_getresgid
1526	.long _sys_lchown
1527	.long _sys_setuid
1528	.long _sys_setgid
1529	.long _sys_setfsuid	/* 215 */
1530	.long _sys_setfsgid
1531	.long _sys_pivot_root
1532	.long _sys_mincore
1533	.long _sys_madvise
1534	.long _sys_getdents64	/* 220 */
1535	.long _sys_fcntl64
1536	.long _sys_ni_syscall	/* reserved for TUX */
1537	.long _sys_ni_syscall
1538	.long _sys_gettid
1539	.long _sys_readahead	/* 225 */
1540	.long _sys_setxattr
1541	.long _sys_lsetxattr
1542	.long _sys_fsetxattr
1543	.long _sys_getxattr
1544	.long _sys_lgetxattr	/* 230 */
1545	.long _sys_fgetxattr
1546	.long _sys_listxattr
1547	.long _sys_llistxattr
1548	.long _sys_flistxattr
1549	.long _sys_removexattr	/* 235 */
1550	.long _sys_lremovexattr
1551	.long _sys_fremovexattr
1552	.long _sys_tkill
1553	.long _sys_sendfile64
1554	.long _sys_futex		/* 240 */
1555	.long _sys_sched_setaffinity
1556	.long _sys_sched_getaffinity
1557	.long _sys_ni_syscall	/* sys_set_thread_area */
1558	.long _sys_ni_syscall	/* sys_get_thread_area */
1559	.long _sys_io_setup	/* 245 */
1560	.long _sys_io_destroy
1561	.long _sys_io_getevents
1562	.long _sys_io_submit
1563	.long _sys_io_cancel
1564	.long _sys_ni_syscall	/* 250 */ /* sys_alloc_hugepages */
1565	.long _sys_ni_syscall	/* sys_freec_hugepages */
1566	.long _sys_exit_group
1567	.long _sys_lookup_dcookie
1568	.long _sys_bfin_spinlock
1569	.long _sys_epoll_create	/* 255 */
1570	.long _sys_epoll_ctl
1571	.long _sys_epoll_wait
1572	.long _sys_ni_syscall /* remap_file_pages */
1573	.long _sys_set_tid_address
1574	.long _sys_timer_create	/* 260 */
1575	.long _sys_timer_settime
1576	.long _sys_timer_gettime
1577	.long _sys_timer_getoverrun
1578	.long _sys_timer_delete
1579	.long _sys_clock_settime /* 265 */
1580	.long _sys_clock_gettime
1581	.long _sys_clock_getres
1582	.long _sys_clock_nanosleep
1583	.long _sys_statfs64
1584	.long _sys_fstatfs64	/* 270 */
1585	.long _sys_tgkill
1586	.long _sys_utimes
1587	.long _sys_fadvise64_64
1588	.long _sys_ni_syscall /* vserver */
1589	.long _sys_mbind	/* 275 */
1590	.long _sys_ni_syscall /* get_mempolicy */
1591	.long _sys_ni_syscall /* set_mempolicy */
1592	.long _sys_mq_open
1593	.long _sys_mq_unlink
1594	.long _sys_mq_timedsend	/* 280 */
1595	.long _sys_mq_timedreceive
1596	.long _sys_mq_notify
1597	.long _sys_mq_getsetattr
1598	.long _sys_ni_syscall /* kexec_load */
1599	.long _sys_waitid	/* 285 */
1600	.long _sys_add_key
1601	.long _sys_request_key
1602	.long _sys_keyctl
1603	.long _sys_ioprio_set
1604	.long _sys_ioprio_get	/* 290 */
1605	.long _sys_inotify_init
1606	.long _sys_inotify_add_watch
1607	.long _sys_inotify_rm_watch
1608	.long _sys_ni_syscall /* migrate_pages */
1609	.long _sys_openat	/* 295 */
1610	.long _sys_mkdirat
1611	.long _sys_mknodat
1612	.long _sys_fchownat
1613	.long _sys_futimesat
1614	.long _sys_fstatat64	/* 300 */
1615	.long _sys_unlinkat
1616	.long _sys_renameat
1617	.long _sys_linkat
1618	.long _sys_symlinkat
1619	.long _sys_readlinkat	/* 305 */
1620	.long _sys_fchmodat
1621	.long _sys_faccessat
1622	.long _sys_pselect6
1623	.long _sys_ppoll
1624	.long _sys_unshare	/* 310 */
1625	.long _sys_sram_alloc
1626	.long _sys_sram_free
1627	.long _sys_dma_memcpy
1628	.long _sys_accept
1629	.long _sys_bind		/* 315 */
1630	.long _sys_connect
1631	.long _sys_getpeername
1632	.long _sys_getsockname
1633	.long _sys_getsockopt
1634	.long _sys_listen	/* 320 */
1635	.long _sys_recv
1636	.long _sys_recvfrom
1637	.long _sys_recvmsg
1638	.long _sys_send
1639	.long _sys_sendmsg	/* 325 */
1640	.long _sys_sendto
1641	.long _sys_setsockopt
1642	.long _sys_shutdown
1643	.long _sys_socket
1644	.long _sys_socketpair	/* 330 */
1645	.long _sys_semctl
1646	.long _sys_semget
1647	.long _sys_semop
1648	.long _sys_msgctl
1649	.long _sys_msgget	/* 335 */
1650	.long _sys_msgrcv
1651	.long _sys_msgsnd
1652	.long _sys_shmat
1653	.long _sys_shmctl
1654	.long _sys_shmdt	/* 340 */
1655	.long _sys_shmget
1656	.long _sys_splice
1657	.long _sys_sync_file_range
1658	.long _sys_tee
1659	.long _sys_vmsplice	/* 345 */
1660	.long _sys_epoll_pwait
1661	.long _sys_utimensat
1662	.long _sys_signalfd
1663	.long _sys_timerfd_create
1664	.long _sys_eventfd	/* 350 */
1665	.long _sys_pread64
1666	.long _sys_pwrite64
1667	.long _sys_fadvise64
1668	.long _sys_set_robust_list
1669	.long _sys_get_robust_list	/* 355 */
1670	.long _sys_fallocate
1671	.long _sys_semtimedop
1672	.long _sys_timerfd_settime
1673	.long _sys_timerfd_gettime
1674	.long _sys_signalfd4		/* 360 */
1675	.long _sys_eventfd2
1676	.long _sys_epoll_create1
1677	.long _sys_dup3
1678	.long _sys_pipe2
1679	.long _sys_inotify_init1	/* 365 */
1680	.long _sys_preadv
1681	.long _sys_pwritev
1682	.long _sys_rt_tgsigqueueinfo
1683	.long _sys_perf_event_open
1684	.long _sys_recvmmsg		/* 370 */
1685	.long _sys_fanotify_init
1686	.long _sys_fanotify_mark
1687	.long _sys_prlimit64
1688	.long _sys_cacheflush
1689	.long _sys_name_to_handle_at	/* 375 */
1690	.long _sys_open_by_handle_at
1691	.long _sys_clock_adjtime
1692	.long _sys_syncfs
1693	.long _sys_setns
1694	.long _sys_sendmmsg		/* 380 */
1695	.long _sys_process_vm_readv
1696	.long _sys_process_vm_writev
1697
1698	.rept NR_syscalls-(.-_sys_call_table)/4
1699	.long _sys_ni_syscall
1700	.endr
1701END(_sys_call_table)
1702