• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*******************************************************************************
2Copyright (c) 2006-2015 Cadence Design Systems Inc.
3
4Permission is hereby granted, free of charge, to any person obtaining
5a copy of this software and associated documentation files (the
6"Software"), to deal in the Software without restriction, including
7without limitation the rights to use, copy, modify, merge, publish,
8distribute, sublicense, and/or sell copies of the Software, and to
9permit persons to whom the Software is furnished to do so, subject to
10the following conditions:
11
12The above copyright notice and this permission notice shall be included
13in all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22--------------------------------------------------------------------------------
23
24        XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS
25
26  Xtensa low level exception and interrupt vectors and handlers for an RTOS.
27
28  Interrupt handlers and user exception handlers support interaction with
29  the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and
30  after user's specific interrupt handlers. These macros are defined in
31  xtensa_<rtos>.h to call suitable functions in a specific RTOS.
32
33  Users can install application-specific interrupt handlers for low and
34  medium level interrupts, by calling xt_set_interrupt_handler(). These
35  handlers can be written in C, and must obey C calling convention. The
36  handler table is indexed by the interrupt number. Each handler may be
37  provided with an argument.
38
39  Note that the system timer interrupt is handled specially, and is
40  dispatched to the RTOS-specific handler. This timer cannot be hooked
41  by application code.
42
43  Optional hooks are also provided to install a handler per level at
44  run-time, made available by compiling this source file with
45  '-DXT_INTEXC_HOOKS' (useful for automated testing).
46
47!!  This file is a template that usually needs to be modified to handle       !!
48!!  application specific interrupts. Search USER_EDIT for helpful comments    !!
49!!  on where to insert handlers and how to write them.                        !!
50
51  Users can also install application-specific exception handlers in the
52  same way, by calling xt_set_exception_handler(). One handler slot is
53  provided for each exception type. Note that some exceptions are handled
54  by the porting layer itself, and cannot be taken over by application
55  code in this manner. These are the alloca, syscall, and coprocessor
56  exceptions.
57
58  The exception handlers can be written in C, and must follow C calling
59  convention. Each handler is passed a pointer to an exception frame as
60  its single argument. The exception frame is created on the stack, and
61  holds the saved context of the thread that took the exception. If the
62  handler returns, the context will be restored and the instruction that
63  caused the exception will be retried. If the handler makes any changes
64  to the saved state in the exception frame, the changes will be applied
65  when restoring the context.
66
67  Because Xtensa is a configurable architecture, this port supports all user
68  generated configurations (except restrictions stated in the release notes).
69  This is accomplished by conditional compilation using macros and functions
70  defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
71  Only the relevant parts of this file will be included in your RTOS build.
72  For example, this file provides interrupt vector templates for all types and
73  all priority levels, but only the ones in your configuration are built.
74
75  NOTES on the use of 'call0' for long jumps instead of 'j':
76   1. This file should be assembled with the -mlongcalls option to xt-xcc.
77   2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to
78      a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the
79      distance from the call to the destination. The linker then relaxes
80      it back to 'call0 dest' if it determines that dest is within range.
81      This allows more flexibility in locating code without the performance
82      overhead of the 'l32r' literal data load in cases where the destination
83      is in range of 'call0'. There is an additional benefit in that 'call0'
84      has a longer range than 'j' due to the target being word-aligned, so
85      the 'l32r' sequence is less likely needed.
86   3. The use of 'call0' with -mlongcalls requires that register a0 not be
87      live at the time of the call, which is always the case for a function
88      call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
89   4. This use of 'call0' is independent of the C function call ABI.
90
91*******************************************************************************/
92
93#include "xtensa_rtos.h"
94#include "esp_private/panic_reason.h"
95#include "sdkconfig.h"
96#include "soc/soc.h"
97
98/*
99  Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
100  Please change this when the tcb structure is changed
101*/
102#define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
103
104/*
105--------------------------------------------------------------------------------
106    In order for backtracing to be able to trace from the pre-exception stack
107    across to the exception stack (including nested interrupts), we need to create
108    a pseudo base-save area to make it appear like the exception dispatcher was
109    triggered by a CALL4 from the pre-exception code. In reality, the exception
110    dispatcher uses the same window as pre-exception code, and only CALL0s are
111    used within the exception dispatcher.
112
113    To create the pseudo base-save area, we need to store a copy of the pre-exception's
114    base save area (a0 to a4) below the exception dispatcher's SP. EXCSAVE_x will
115    be used to store a copy of the SP that points to the interrupted code's exception
116    frame just in case the exception dispatcher's SP does not point to the exception
117    frame (which is the case when switching from task to interrupt stack).
118
119    Clearing the pseudo base-save area is uncessary as the interrupt dispatcher
120    will restore the current SP to that of the pre-exception SP.
121--------------------------------------------------------------------------------
122*/
123#ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
124#define XT_DEBUG_BACKTRACE    1
125#endif
126
127
128/*
129--------------------------------------------------------------------------------
130  Defines used to access _xtos_interrupt_table.
131--------------------------------------------------------------------------------
132*/
133#define XIE_HANDLER     0
134#define XIE_ARG         4
135#define XIE_SIZE        8
136
137
138/*
139  Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.
140  Basically does reg=reg*portNUM_PROCESSORS+current_core_id
141  Multiple versions here to optimize for specific portNUM_PROCESSORS values.
142*/
143    .macro get_percpu_entry_for reg scratch
144#if (portNUM_PROCESSORS == 1)
145    /* No need to do anything */
146#elif  (portNUM_PROCESSORS == 2)
147    /* Optimized 2-core code. */
148    getcoreid \scratch
149    addx2 \reg,\reg,\scratch
150#else
151    /* Generalized n-core code. Untested! */
152    movi \scratch,portNUM_PROCESSORS
153    mull \scratch,\reg,\scratch
154    getcoreid \reg
155    add \reg,\scratch,\reg
156#endif
157   .endm
158/*
159--------------------------------------------------------------------------------
160  Macro extract_msb - return the input with only the highest bit set.
161
162  Input  : "ain"  - Input value, clobbered.
163  Output : "aout" - Output value, has only one bit set, MSB of "ain".
164  The two arguments must be different AR registers.
165--------------------------------------------------------------------------------
166*/
167
168    .macro  extract_msb     aout ain
1691:
170    addi    \aout, \ain, -1         /* aout = ain - 1        */
171    and     \ain, \ain, \aout       /* ain  = ain & aout     */
172    bnez    \ain, 1b                /* repeat until ain == 0 */
173    addi    \aout, \aout, 1         /* return aout + 1       */
174    .endm
175
176/*
177--------------------------------------------------------------------------------
178  Macro dispatch_c_isr - dispatch interrupts to user ISRs.
179  This will dispatch to user handlers (if any) that are registered in the
180  XTOS dispatch table (_xtos_interrupt_table). These handlers would have
181  been registered by calling _xtos_set_interrupt_handler(). There is one
182  exception - the timer interrupt used by the OS will not be dispatched
183  to a user handler - this must be handled by the caller of this macro.
184
185  Level triggered and software interrupts are automatically deasserted by
186  this code.
187
188  ASSUMPTIONS:
189    -- PS.INTLEVEL is set to "level" at entry
190    -- PS.EXCM = 0, C calling enabled
191
192  NOTE: For CALL0 ABI, a12-a15 have not yet been saved.
193
194  NOTE: This macro will use registers a0 and a2-a7. The arguments are:
195    level -- interrupt level
196    mask  -- interrupt bitmask for this level
197--------------------------------------------------------------------------------
198*/
199
200    .macro  dispatch_c_isr    level  mask
201
202    #ifdef CONFIG_PM_TRACE
203    movi a6, 0 /* = ESP_PM_TRACE_IDLE */
204    getcoreid a7
205    call4 esp_pm_trace_exit
206    #endif // CONFIG_PM_TRACE
207
208    /* Get mask of pending, enabled interrupts at this level into a2. */
209
210.L_xt_user_int_&level&:
211    rsr     a2, INTENABLE
212    rsr     a3, INTERRUPT
213    movi    a4, \mask
214    and     a2, a2, a3
215    and     a2, a2, a4
216    beqz    a2, 9f                          /* nothing to do */
217
218    /* This bit of code provides a nice debug backtrace in the debugger.
219       It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
220       if you want to save the cycles.
221       At this point, the exception frame should have been allocated and filled,
222       and current sp points to the interrupt stack (for non-nested interrupt)
223       or below the allocated exception frame (for nested interrupts). Copy the
224       pre-exception's base save area below the current SP.
225    */
226    #ifdef XT_DEBUG_BACKTRACE
227    #ifndef __XTENSA_CALL0_ABI__
228    rsr     a0, EXCSAVE_1 + \level - 1      /* Get exception frame pointer stored in EXCSAVE_x */
229    l32i    a3, a0, XT_STK_A0               /* Copy pre-exception a0 (return address) */
230    s32e    a3, a1, -16
231    l32i    a3, a0, XT_STK_A1               /* Copy pre-exception a1 (stack pointer) */
232    s32e    a3, a1, -12
233    /* Backtracing only needs a0 and a1, no need to create full base save area.
234       Also need to change current frame's return address to point to pre-exception's
235       last run instruction.
236     */
237    rsr     a0, EPC_1 + \level - 1          /* return address */
238    movi    a4, 0xC0000000                  /* constant with top 2 bits set (call size) */
239    or      a0, a0, a4                      /* set top 2 bits */
240    addx2   a0, a4, a0                      /* clear top bit -- simulating call4 size   */
241    #endif
242    #endif
243
244    #ifdef CONFIG_PM_ENABLE
245    call4 esp_pm_impl_isr_hook
246    #endif
247
248    #ifdef XT_INTEXC_HOOKS
249    /* Call interrupt hook if present to (pre)handle interrupts. */
250    movi    a4, _xt_intexc_hooks
251    l32i    a4, a4, \level << 2
252    beqz    a4, 2f
253    #ifdef __XTENSA_CALL0_ABI__
254    callx0  a4
255    beqz    a2, 9f
256    #else
257    mov     a6, a2
258    callx4  a4
259    beqz    a6, 9f
260    mov     a2, a6
261    #endif
2622:
263    #endif
264
265    /* Now look up in the dispatch table and call user ISR if any. */
266    /* If multiple bits are set then MSB has highest priority.     */
267
268    extract_msb  a4, a2                     /* a4 = MSB of a2, a2 trashed */
269
270    #ifdef XT_USE_SWPRI
271    /* Enable all interrupts at this level that are numerically higher
272       than the one we just selected, since they are treated as higher
273       priority.
274    */
275    movi    a3, \mask                       /* a3 = all interrupts at this level */
276    add     a2, a4, a4                      /* a2 = a4 << 1 */
277    addi    a2, a2, -1                      /* a2 = mask of 1's <= a4 bit */
278    and     a2, a2, a3                      /* a2 = mask of all bits <= a4 at this level */
279    movi    a3, _xt_intdata
280    l32i    a6, a3, 4                       /* a6 = _xt_vpri_mask */
281    neg     a2, a2
282    addi    a2, a2, -1                      /* a2 = mask to apply */
283    and     a5, a6, a2                      /* mask off all bits <= a4 bit */
284    s32i    a5, a3, 4                       /* update _xt_vpri_mask */
285    rsr     a3, INTENABLE
286    and     a3, a3, a2                      /* mask off all bits <= a4 bit */
287    wsr     a3, INTENABLE
288    rsil    a3, \level - 1                  /* lower interrupt level by 1 */
289    #endif
290
291    movi    a3, XT_TIMER_INTEN              /* a3 = timer interrupt bit */
292    wsr     a4, INTCLEAR                    /* clear sw or edge-triggered interrupt */
293    beq     a3, a4, 7f                      /* if timer interrupt then skip table */
294
295    find_ms_setbit a3, a4, a3, 0            /* a3 = interrupt number */
296
297    get_percpu_entry_for a3, a12
298    movi    a4, _xt_interrupt_table
299    addx8   a3, a3, a4                      /* a3 = address of interrupt table entry */
300    l32i    a4, a3, XIE_HANDLER             /* a4 = handler address */
301    #ifdef __XTENSA_CALL0_ABI__
302    mov     a12, a6                         /* save in callee-saved reg */
303    l32i    a2, a3, XIE_ARG                 /* a2 = handler arg */
304    callx0  a4                              /* call handler */
305    mov     a2, a12
306    #else
307    mov     a2, a6                          /* save in windowed reg */
308    l32i    a6, a3, XIE_ARG                 /* a6 = handler arg */
309    callx4  a4                              /* call handler */
310    #endif
311
312    #ifdef XT_USE_SWPRI
313    j       8f
314    #else
315    j       .L_xt_user_int_&level&          /* check for more interrupts */
316    #endif
317
3187:
319
320    .ifeq XT_TIMER_INTPRI - \level
321.L_xt_user_int_timer_&level&:
322    /*
323    Interrupt handler for the RTOS tick timer if at this level.
324    We'll be reading the interrupt state again after this call
325    so no need to preserve any registers except a6 (vpri_mask).
326    */
327
328    movi    a2, port_switch_flag
329    movi    a3, 1
330    s32i    a3, a2, 0
331    #ifdef __XTENSA_CALL0_ABI__
332    mov     a12, a6
333    call0   XT_RTOS_TIMER_INT
334    mov     a2, a12
335    #else
336    mov     a2, a6
337    call4   XT_RTOS_TIMER_INT
338    #endif
339    .endif
340
341    #ifdef XT_USE_SWPRI
342    j       8f
343    #else
344    j       .L_xt_user_int_&level&          /* check for more interrupts */
345    #endif
346
347    #ifdef XT_USE_SWPRI
3488:
349    /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from
350       virtual _xt_intenable which _could_ have changed during interrupt
351       processing. */
352
353    movi    a3, _xt_intdata
354    l32i    a4, a3, 0                       /* a4 = _xt_intenable    */
355    s32i    a2, a3, 4                       /* update _xt_vpri_mask  */
356    and     a4, a4, a2                      /* a4 = masked intenable */
357    wsr     a4, INTENABLE                   /* update INTENABLE      */
358    #endif
359
3609:
361    /* done */
362
363    .endm
364
365
366    .section    .rodata, "a"
367    .align      4
368
369/*
370--------------------------------------------------------------------------------
371    Hooks to dynamically install handlers for exceptions and interrupts.
372    Allows automated regression frameworks to install handlers per test.
373    Consists of an array of function pointers indexed by interrupt level,
374    with index 0 containing the entry for user exceptions.
375    Initialized with all 0s, meaning no handler is installed at each level.
376    See comment in xtensa_rtos.h for more details.
377
378    *WARNING*  This array is for all CPUs, that is, installing a hook for
379    one CPU will install it for all others as well!
380--------------------------------------------------------------------------------
381*/
382
383    #ifdef XT_INTEXC_HOOKS
384    .data
385    .global     _xt_intexc_hooks
386    .type       _xt_intexc_hooks,@object
387    .align      4
388
389_xt_intexc_hooks:
390    .fill       XT_INTEXC_HOOK_NUM, 4, 0
391    #endif
392
393
394/*
395--------------------------------------------------------------------------------
396  EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS
397  (except window exception vectors).
398
399  Each vector goes at a predetermined location according to the Xtensa
400  hardware configuration, which is ensured by its placement in a special
401  section known to the Xtensa linker support package (LSP). It performs
402  the minimum necessary before jumping to the handler in the .text section.
403
404  The corresponding handler goes in the normal .text section. It sets up
405  the appropriate stack frame, saves a few vector-specific registers and
406  calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
407  and enter the RTOS, then sets up a C environment. It then calls the
408  user's interrupt handler code (which may be coded in C) and finally
409  calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
410
411  While XT_RTOS_INT_EXIT does not return directly to the interruptee,
412  eventually the RTOS scheduler will want to dispatch the interrupted
413  task or handler. The scheduler will return to the exit point that was
414  saved in the interrupt stack frame at XT_STK_EXIT.
415--------------------------------------------------------------------------------
416*/
417
418
419/*
420--------------------------------------------------------------------------------
421Debug Exception.
422--------------------------------------------------------------------------------
423*/
424
425#if XCHAL_HAVE_DEBUG
426
427    .begin      literal_prefix .DebugExceptionVector
428    .section    .DebugExceptionVector.text, "ax"
429    .global     _DebugExceptionVector
430    .align      4
431    .global     xt_debugexception
432_DebugExceptionVector:
433    wsr     a0, EXCSAVE+XCHAL_DEBUGLEVEL    /* preserve a0 */
434    call0   xt_debugexception            /* load exception handler */
435
436    .end        literal_prefix
437
438#endif
439
440/*
441--------------------------------------------------------------------------------
442Double Exception.
443Double exceptions are not a normal occurrence. They indicate a bug of some kind.
444--------------------------------------------------------------------------------
445*/
446
447#ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR
448
449    .begin      literal_prefix .DoubleExceptionVector
450    .section    .DoubleExceptionVector.text, "ax"
451    .global     _DoubleExceptionVector
452    .align      4
453
454_DoubleExceptionVector:
455
456    #if XCHAL_HAVE_DEBUG
457    break   1, 4                            /* unhandled double exception */
458    #endif
459    movi    a0,PANIC_RSN_DOUBLEEXCEPTION
460    wsr     a0,EXCCAUSE
461    call0   _xt_panic                       /* does not return */
462    rfde                                    /* make a0 point here not later */
463
464    .end        literal_prefix
465
466#endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */
467
468/*
469--------------------------------------------------------------------------------
470Kernel Exception (including Level 1 Interrupt from kernel mode).
471--------------------------------------------------------------------------------
472*/
473
474    .begin      literal_prefix .KernelExceptionVector
475    .section    .KernelExceptionVector.text, "ax"
476    .global     _KernelExceptionVector
477    .align      4
478
479_KernelExceptionVector:
480
481    wsr     a0, EXCSAVE_1                   /* preserve a0 */
482    call0   _xt_kernel_exc                  /* kernel exception handler */
483    /* never returns here - call0 is used as a jump (see note at top) */
484
485    .end        literal_prefix
486
487    .section .iram1,"ax"
488    .align      4
489
490_xt_kernel_exc:
491    #if XCHAL_HAVE_DEBUG
492    break   1, 0                            /* unhandled kernel exception */
493    #endif
494    movi    a0,PANIC_RSN_KERNELEXCEPTION
495    wsr     a0,EXCCAUSE
496    call0   _xt_panic                       /* does not return */
497    rfe                                     /* make a0 point here not there */
498
499
500/*
501--------------------------------------------------------------------------------
502User Exception (including Level 1 Interrupt from user mode).
503--------------------------------------------------------------------------------
504*/
505
506    .begin      literal_prefix .UserExceptionVector
507    .section    .UserExceptionVector.text, "ax"
508    .global     _UserExceptionVector
509    .type       _UserExceptionVector,@function
510    .align      4
511
512_UserExceptionVector:
513
514    wsr     a0, EXCSAVE_1                   /* preserve a0 */
515    call0   _xt_user_exc                    /* user exception handler */
516    /* never returns here - call0 is used as a jump (see note at top) */
517
518    .end        literal_prefix
519
520/*
521--------------------------------------------------------------------------------
522  Insert some waypoints for jumping beyond the signed 8-bit range of
523  conditional branch instructions, so the conditional branchces to specific
524  exception handlers are not taken in the mainline. Saves some cycles in the
525  mainline.
526--------------------------------------------------------------------------------
527*/
528
529#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
530    .global   LoadStoreErrorHandler
531    .global   AlignmentErrorHandler
532#endif
533
534    .section .iram1,"ax"
535
536    #if XCHAL_HAVE_WINDOWED
537    .align      4
538_xt_to_alloca_exc:
539    call0   _xt_alloca_exc                  /* in window vectors section */
540    /* never returns here - call0 is used as a jump (see note at top) */
541    #endif
542
543    .align      4
544_xt_to_syscall_exc:
545    call0   _xt_syscall_exc
546    /* never returns here - call0 is used as a jump (see note at top) */
547
548    #if XCHAL_CP_NUM > 0
549    .align      4
550_xt_to_coproc_exc:
551    call0   _xt_coproc_exc
552    /* never returns here - call0 is used as a jump (see note at top) */
553    #endif
554
555#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
556    .align      4
557_call_loadstore_handler:
558    call0   LoadStoreErrorHandler
559    /* This will return only if wrong opcode or address out of range*/
560    j       .LS_exit
561
562    .align      4
563_call_alignment_handler:
564    call0   AlignmentErrorHandler
565    /* This will return only if wrong opcode or address out of range*/
566    addi    a0, a0, 1
567    j       .LS_exit
568#endif
569
570/*
571--------------------------------------------------------------------------------
572  User exception handler.
573--------------------------------------------------------------------------------
574*/
575
576    .type       _xt_user_exc,@function
577    .align      4
578
579_xt_user_exc:
580
581    /* If level 1 interrupt then jump to the dispatcher */
582    rsr     a0, EXCCAUSE
583    beqi    a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_lowint1
584
585    /* Handle any coprocessor exceptions. Rely on the fact that exception
586       numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.
587    */
588    #if XCHAL_CP_NUM > 0
589    bgeui   a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc
590    #endif
591
592    /* Handle alloca and syscall exceptions */
593    #if XCHAL_HAVE_WINDOWED
594    beqi    a0, EXCCAUSE_ALLOCA,  _xt_to_alloca_exc
595    #endif
596    beqi    a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc
597
598#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
599    beqi    a0, EXCCAUSE_LOAD_STORE_ERROR, _call_loadstore_handler
600
601    addi    a0, a0, -1
602    beqi    a0, 8, _call_alignment_handler
603    addi    a0, a0, 1
604.LS_exit:
605#endif
606
607    /* Handle all other exceptions. All can have user-defined handlers. */
608    /* NOTE: we'll stay on the user stack for exception handling.       */
609
610    /* Allocate exception frame and save minimal context. */
611    mov     a0, sp
612    addi    sp, sp, -XT_STK_FRMSZ
613    s32i    a0, sp, XT_STK_A1
614    #if XCHAL_HAVE_WINDOWED
615    s32e    a0, sp, -12                     /* for debug backtrace */
616    #endif
617    rsr     a0, PS                          /* save interruptee's PS */
618    s32i    a0, sp, XT_STK_PS
619    rsr     a0, EPC_1                       /* save interruptee's PC */
620    s32i    a0, sp, XT_STK_PC
621    #if XCHAL_HAVE_WINDOWED
622    s32e    a0, sp, -16                     /* for debug backtrace */
623    #endif
624    s32i    a12, sp, XT_STK_A12             /* _xt_context_save requires A12- */
625    s32i    a13, sp, XT_STK_A13             /* A13 to have already been saved */
626    call0   _xt_context_save
627
628    /* Save exc cause and vaddr into exception frame */
629    rsr     a0, EXCCAUSE
630    s32i    a0, sp, XT_STK_EXCCAUSE
631    rsr     a0, EXCVADDR
632    s32i    a0, sp, XT_STK_EXCVADDR
633
634    /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
635    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
636    s32i    a0, sp, XT_STK_A0
637
638    /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */
639    #ifdef __XTENSA_CALL0_ABI__
640    movi    a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM
641    #else
642    movi    a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE
643    #endif
644    wsr     a0, PS
645
646    /*
647        Create pseudo base save area. At this point, sp is still pointing to the
648        allocated and filled exception stack frame.
649    */
650    #ifdef XT_DEBUG_BACKTRACE
651    #ifndef __XTENSA_CALL0_ABI__
652    l32i    a3, sp, XT_STK_A0               /* Copy pre-exception a0 (return address) */
653    s32e    a3, sp, -16
654    l32i    a3, sp, XT_STK_A1               /* Copy pre-exception a1 (stack pointer) */
655    s32e    a3, sp, -12
656    rsr     a0, EPC_1                       /* return address for debug backtrace */
657    movi    a5, 0xC0000000                  /* constant with top 2 bits set (call size) */
658    rsync                                   /* wait for WSR.PS to complete */
659    or      a0, a0, a5                      /* set top 2 bits */
660    addx2   a0, a5, a0                      /* clear top bit -- thus simulating call4 size */
661    #else
662    rsync                                   /* wait for WSR.PS to complete */
663    #endif
664    #endif
665
666    rsr     a2, EXCCAUSE                    /* recover exc cause */
667
668    #ifdef XT_INTEXC_HOOKS
669    /*
670    Call exception hook to pre-handle exceptions (if installed).
671    Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).
672    */
673    movi    a4, _xt_intexc_hooks
674    l32i    a4, a4, 0                       /* user exception hook index 0 */
675    beqz    a4, 1f
676.Ln_xt_user_exc_call_hook:
677    #ifdef __XTENSA_CALL0_ABI__
678    callx0  a4
679    beqi    a2, -1, .L_xt_user_done
680    #else
681    mov     a6, a2
682    callx4  a4
683    beqi    a6, -1, .L_xt_user_done
684    mov     a2, a6
685    #endif
6861:
687    #endif
688
689    rsr     a2, EXCCAUSE                    /* recover exc cause */
690    movi    a3, _xt_exception_table
691    get_percpu_entry_for a2, a4
692    addx4   a4, a2, a3                      /* a4 = address of exception table entry */
693    l32i    a4, a4, 0                       /* a4 = handler address */
694    #ifdef __XTENSA_CALL0_ABI__
695    mov     a2, sp                          /* a2 = pointer to exc frame */
696    callx0  a4                              /* call handler */
697    #else
698    mov     a6, sp                          /* a6 = pointer to exc frame */
699    callx4  a4                              /* call handler */
700    #endif
701
702.L_xt_user_done:
703
704    /* Restore context and return */
705    call0   _xt_context_restore
706    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
707    wsr     a0, PS
708    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
709    wsr     a0, EPC_1
710    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
711    l32i    sp, sp, XT_STK_A1               /* remove exception frame */
712    rsync                                   /* ensure PS and EPC written */
713    rfe                                     /* PS.EXCM is cleared */
714
715
716/*
717--------------------------------------------------------------------------------
718  Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
719  on entry and used to return to a thread or interrupted interrupt handler.
720--------------------------------------------------------------------------------
721*/
722
723    .global     _xt_user_exit
724    .type       _xt_user_exit,@function
725    .align      4
726_xt_user_exit:
727    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
728    wsr     a0, PS
729    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
730    wsr     a0, EPC_1
731    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
732    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
733    rsync                                   /* ensure PS and EPC written */
734    rfe                                     /* PS.EXCM is cleared */
735
736
737/*
738
739--------------------------------------------------------------------------------
740Syscall Exception Handler (jumped to from User Exception Handler).
741Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).
742Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.
743--------------------------------------------------------------------------------
744*/
745
746    .section .iram1,"ax"
747    .type       _xt_syscall_exc,@function
748    .align      4
749_xt_syscall_exc:
750
751    #ifdef __XTENSA_CALL0_ABI__
752    /*
753    Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.
754    Use a minimal stack frame (16B) to save A2 & A3 for scratch.
755    PS.EXCM could be cleared here, but unlikely to improve worst-case latency.
756    rsr     a0, PS
757    addi    a0, a0, -PS_EXCM_MASK
758    wsr     a0, PS
759    */
760    addi    sp, sp, -16
761    s32i    a2, sp, 8
762    s32i    a3, sp, 12
763    #else   /* Windowed ABI */
764    /*
765    Save necessary context and spill the register windows.
766    PS.EXCM is still set and must remain set until after the spill.
767    Reuse context save function though it saves more than necessary.
768    For this reason, a full interrupt stack frame is allocated.
769    */
770    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
771    s32i    a12, sp, XT_STK_A12             /* _xt_context_save requires A12- */
772    s32i    a13, sp, XT_STK_A13             /* A13 to have already been saved */
773    call0   _xt_context_save
774    #endif
775
776    /*
777    Grab the interruptee's PC and skip over the 'syscall' instruction.
778    If it's at the end of a zero-overhead loop and it's not on the last
779    iteration, decrement loop counter and skip to beginning of loop.
780    */
781    rsr     a2, EPC_1                       /* a2 = PC of 'syscall' */
782    addi    a3, a2, 3                       /* ++PC                 */
783    #if XCHAL_HAVE_LOOPS
784    rsr     a0, LEND                        /* if (PC == LEND       */
785    bne     a3, a0, 1f
786    rsr     a0, LCOUNT                      /*     && LCOUNT != 0)  */
787    beqz    a0, 1f                          /* {                    */
788    addi    a0, a0, -1                      /*   --LCOUNT           */
789    rsr     a3, LBEG                        /*   PC = LBEG          */
790    wsr     a0, LCOUNT                      /* }                    */
791    #endif
7921:  wsr     a3, EPC_1                       /* update PC            */
793
794    /* Restore interruptee's context and return from exception. */
795    #ifdef __XTENSA_CALL0_ABI__
796    l32i    a2, sp, 8
797    l32i    a3, sp, 12
798    addi    sp, sp, 16
799    #else
800    call0   _xt_context_restore
801    addi    sp, sp, XT_STK_FRMSZ
802    #endif
803    movi    a0, -1
804    movnez  a2, a0, a2                      /* return -1 if not syscall 0 */
805    rsr     a0, EXCSAVE_1
806    rfe
807
808/*
809--------------------------------------------------------------------------------
810Co-Processor Exception Handler (jumped to from User Exception Handler).
811These exceptions are generated by co-processor instructions, which are only
812allowed in thread code (not in interrupts or kernel code). This restriction is
813deliberately imposed to reduce the burden of state-save/restore in interrupts.
814--------------------------------------------------------------------------------
815*/
816#if XCHAL_CP_NUM > 0
817
818    .section .rodata, "a"
819
820/* Offset to CP n save area in thread's CP save area. */
821    .global _xt_coproc_sa_offset
822    .type   _xt_coproc_sa_offset,@object
823    .align  16                      /* minimize crossing cache boundaries */
824_xt_coproc_sa_offset:
825    .word   XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
826    .word   XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
827
828/* Bitmask for CP n's CPENABLE bit. */
829    .type   _xt_coproc_mask,@object
830    .align  16,,8                   /* try to keep it all in one cache line */
831    .set    i, 0
832_xt_coproc_mask:
833    .rept   XCHAL_CP_MAX
834    .long   (i<<16) | (1<<i)    // upper 16-bits = i, lower = bitmask
835    .set    i, i+1
836    .endr
837
838    .data
839
840/* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */
841    .global _xt_coproc_owner_sa
842    .type   _xt_coproc_owner_sa,@object
843    .align  16,,XCHAL_CP_MAX<<2     /* minimize crossing cache boundaries */
844_xt_coproc_owner_sa:
845    .space  (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
846
847    .section .iram1,"ax"
848
849
850    .align  4
851.L_goto_invalid:
852    j   .L_xt_coproc_invalid    /* not in a thread (invalid) */
853    .align  4
854.L_goto_done:
855    j   .L_xt_coproc_done
856
857
858/*
859--------------------------------------------------------------------------------
860  Coprocessor exception handler.
861  At entry, only a0 has been saved (in EXCSAVE_1).
862--------------------------------------------------------------------------------
863*/
864
865    .type   _xt_coproc_exc,@function
866    .align  4
867
868_xt_coproc_exc:
869
870    /* Allocate interrupt stack frame and save minimal context. */
871    mov     a0, sp                          /* sp == a1 */
872    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
873    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
874    #if XCHAL_HAVE_WINDOWED
875    s32e    a0, sp, -12                     /* for debug backtrace */
876    #endif
877    rsr     a0, PS                          /* save interruptee's PS */
878    s32i    a0, sp, XT_STK_PS
879    rsr     a0, EPC_1                       /* save interruptee's PC */
880    s32i    a0, sp, XT_STK_PC
881    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
882    s32i    a0, sp, XT_STK_A0
883    #if XCHAL_HAVE_WINDOWED
884    s32e    a0, sp, -16                     /* for debug backtrace */
885    #endif
886    movi    a0, _xt_user_exit               /* save exit point for dispatch */
887    s32i    a0, sp, XT_STK_EXIT
888
889    rsr     a0, EXCCAUSE
890    s32i    a5, sp, XT_STK_A5               /* save a5 */
891    addi    a5, a0, -EXCCAUSE_CP0_DISABLED  /* a5 = CP index */
892
893    /* Save a few more of interruptee's registers (a5 was already saved). */
894    s32i    a2,  sp, XT_STK_A2
895    s32i    a3,  sp, XT_STK_A3
896    s32i    a4,  sp, XT_STK_A4
897    s32i    a15, sp, XT_STK_A15
898
899    /* Get co-processor state save area of new owner thread. */
900    call0   XT_RTOS_CP_STATE                /* a15 = new owner's save area */
901
902    #ifndef CONFIG_FREERTOS_FPU_IN_ISR
903    beqz    a15, .L_goto_invalid
904    #endif
905
906    /*When FPU in ISR is enabled we could deal with zeroed a15 */
907
908    /* Enable the co-processor's bit in CPENABLE. */
909    movi    a0, _xt_coproc_mask
910    rsr     a4, CPENABLE                    /* a4 = CPENABLE */
911    addx4   a0, a5, a0                      /* a0 = &_xt_coproc_mask[n] */
912    l32i    a0, a0, 0                       /* a0 = (n << 16) | (1 << n) */
913
914    /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
915       here, to keep the entire thing from crashing, it's better to pin the task to whatever
916       core we're running on now. */
917//    movi    a2, g_losTask
918//    getcoreid a3
919//    l32i    a2, a2, 0
920//    l32i    a2, a2, 0                       /* a2 = start of pxCurrentTCB[cpuid] */
921//    addi    a2, a2, TASKTCB_XCOREID_OFFSET  /* offset to xCoreID in tcb struct */
922//    s32i    a3, a2, 0                       /* store current cpuid */
923
924    /* Grab correct xt_coproc_owner_sa for this core */
925    movi    a2, XCHAL_CP_MAX << 2
926//    mull    a2, a2, a3                      /* multiply by current processor id */
927    movi    a3, _xt_coproc_owner_sa         /* a3 = base of owner array */
928    add     a3, a3, a2                      /* a3 = owner area needed for this processor */
929
930    extui   a2, a0, 0, 16                   /* coprocessor bitmask portion */
931    or      a4, a4, a2                      /* a4 = CPENABLE | (1 << n) */
932    wsr     a4, CPENABLE
933
934/*
935Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
936everywhere): _xt_coproc_release assumes it works like this in order not to need
937locking.
938*/
939
940
941    /* Get old coprocessor owner thread (save area ptr) and assign new one.  */
942    addx4   a3,  a5, a3                      /* a3 = &_xt_coproc_owner_sa[n] */
943    l32i    a2,  a3, 0                       /* a2 = old owner's save area */
944    s32i    a15, a3, 0                       /* _xt_coproc_owner_sa[n] = new */
945    rsync                                    /* ensure wsr.CPENABLE is complete */
946
947    /* Only need to context switch if new owner != old owner. */
948    /* If float is necessary on ISR, we need to remove this check */
949    /* below, because on restoring from ISR we may have new == old condition used
950     * to force cp restore to next thread
951     */
952    #ifndef CONFIG_FREERTOS_FPU_IN_ISR
953    beq     a15, a2, .L_goto_done           /* new owner == old, we're done */
954    #endif
955
956    /* If no old owner then nothing to save. */
957    beqz    a2, .L_check_new
958
959    /* If old owner not actively using CP then nothing to save. */
960    l16ui   a4,  a2,  XT_CPENABLE           /* a4 = old owner's CPENABLE */
961    bnone   a4,  a0,  .L_check_new          /* old owner not using CP    */
962
963.L_save_old:
964    /* Save old owner's coprocessor state. */
965
966    movi    a5, _xt_coproc_sa_offset
967
968    /* Mark old owner state as no longer active (CPENABLE bit n clear). */
969    xor     a4,  a4,  a0                    /* clear CP bit in CPENABLE    */
970    s16i    a4,  a2,  XT_CPENABLE           /* update old owner's CPENABLE */
971
972    extui   a4,  a0,  16,  5                /* a4 = CP index = n */
973    addx4   a5,  a4,  a5                    /* a5 = &_xt_coproc_sa_offset[n] */
974
975    /* Mark old owner state as saved (CPSTORED bit n set). */
976    l16ui   a4,  a2,  XT_CPSTORED           /* a4 = old owner's CPSTORED */
977    l32i    a5,  a5,  0                     /* a5 = XT_CP[n]_SA offset */
978    or      a4,  a4,  a0                    /* set CP in old owner's CPSTORED */
979    s16i    a4,  a2,  XT_CPSTORED           /* update old owner's CPSTORED */
980    l32i    a2, a2, XT_CP_ASA               /* ptr to actual (aligned) save area */
981    extui   a3, a0, 16, 5                   /* a3 = CP index = n */
982    add     a2, a2, a5                      /* a2 = old owner's area for CP n */
983
984    /*
985    The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
986    It is theoretically possible for Xtensa processor designers to write TIE
987    that causes more address registers to be affected, but it is generally
988    unlikely. If that ever happens, more registers needs to be saved/restored
989    around this macro invocation, and the value in a15 needs to be recomputed.
990    */
991    xchal_cpi_store_funcbody
992
993.L_check_new:
994    /* Check if any state has to be restored for new owner. */
995    /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
996    beqz    a15, .L_xt_coproc_done
997
998    l16ui   a3,  a15, XT_CPSTORED           /* a3 = new owner's CPSTORED */
999    movi    a4, _xt_coproc_sa_offset
1000    bnone   a3,  a0,  .L_check_cs           /* full CP not saved, check callee-saved */
1001    xor     a3,  a3,  a0                    /* CPSTORED bit is set, clear it */
1002    s16i    a3,  a15, XT_CPSTORED           /* update new owner's CPSTORED */
1003
1004    /* Adjust new owner's save area pointers to area for CP n. */
1005    extui   a3,  a0, 16, 5                  /* a3 = CP index = n */
1006    addx4   a4,  a3, a4                     /* a4 = &_xt_coproc_sa_offset[n] */
1007    l32i    a4,  a4, 0                      /* a4 = XT_CP[n]_SA */
1008    l32i    a5, a15, XT_CP_ASA              /* ptr to actual (aligned) save area */
1009    add     a2,  a4, a5                     /* a2 = new owner's area for CP */
1010
1011    /*
1012    The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
1013    It is theoretically possible for Xtensa processor designers to write TIE
1014    that causes more address registers to be affected, but it is generally
1015    unlikely. If that ever happens, more registers needs to be saved/restored
1016    around this macro invocation.
1017    */
1018    xchal_cpi_load_funcbody
1019
1020    /* Restore interruptee's saved registers. */
1021    /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
1022.L_xt_coproc_done:
1023    l32i    a15, sp, XT_STK_A15
1024    l32i    a5,  sp, XT_STK_A5
1025    l32i    a4,  sp, XT_STK_A4
1026    l32i    a3,  sp, XT_STK_A3
1027    l32i    a2,  sp, XT_STK_A2
1028    call0   _xt_user_exit                   /* return via exit dispatcher */
1029    /* Never returns here - call0 is used as a jump (see note at top) */
1030
1031.L_check_cs:
1032    /* a0 = CP mask in low bits, a15 = new owner's save area */
1033    l16ui   a2, a15, XT_CP_CS_ST            /* a2 = mask of CPs saved    */
1034    bnone   a2,  a0, .L_xt_coproc_done      /* if no match then done     */
1035    and     a2,  a2, a0                     /* a2 = which CPs to restore */
1036    extui   a2,  a2, 0, 8                   /* extract low 8 bits        */
1037    s32i    a6,  sp, XT_STK_A6              /* save extra needed regs    */
1038    s32i    a7,  sp, XT_STK_A7
1039    s32i    a13, sp, XT_STK_A13
1040    s32i    a14, sp, XT_STK_A14
1041    call0   _xt_coproc_restorecs            /* restore CP registers      */
1042    l32i    a6,  sp, XT_STK_A6              /* restore saved registers   */
1043    l32i    a7,  sp, XT_STK_A7
1044    l32i    a13, sp, XT_STK_A13
1045    l32i    a14, sp, XT_STK_A14
1046    j       .L_xt_coproc_done
1047
1048    /* Co-processor exception occurred outside a thread (not supported). */
1049.L_xt_coproc_invalid:
1050    movi    a0,PANIC_RSN_COPROCEXCEPTION
1051    wsr     a0,EXCCAUSE
1052    call0   _xt_panic                       /* not in a thread (invalid) */
1053    /* never returns */
1054
1055
1056#endif /* XCHAL_CP_NUM */
1057
1058
1059/*
1060-------------------------------------------------------------------------------
1061  Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.
1062-------------------------------------------------------------------------------
1063*/
1064
1065    .section .iram1,"ax"
1066    .type       _xt_lowint1,@function
1067    .align      4
1068
1069_xt_lowint1:
1070    mov     a0, sp                          /* sp == a1 */
1071    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1072    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1073    rsr     a0, PS                          /* save interruptee's PS */
1074    s32i    a0, sp, XT_STK_PS
1075    rsr     a0, EPC_1                       /* save interruptee's PC */
1076    s32i    a0, sp, XT_STK_PC
1077    rsr     a0, EXCSAVE_1                   /* save interruptee's a0 */
1078    s32i    a0, sp, XT_STK_A0
1079    movi    a0, _xt_user_exit               /* save exit point for dispatch */
1080    s32i    a0, sp, XT_STK_EXIT
1081
1082    /* EXCSAVE_1 should now be free to use. Use it to keep a copy of the
1083    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1084    #ifdef XT_DEBUG_BACKTRACE
1085    #ifndef __XTENSA_CALL0_ABI__
1086    mov     a0, sp
1087    wsr     a0, EXCSAVE_1
1088    #endif
1089    #endif
1090
1091    /* Save rest of interrupt context and enter RTOS. */
1092    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1093
1094    /* !! We are now on the RTOS system stack !! */
1095
1096    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1097    #ifdef __XTENSA_CALL0_ABI__
1098    movi    a0, PS_INTLEVEL(1) | PS_UM
1099    #else
1100    movi    a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
1101    #endif
1102    wsr     a0, PS
1103    rsync
1104
1105    /* OK to call C code at this point, dispatch user ISRs */
1106
1107    dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK
1108
1109    /* Done handling interrupts, transfer control to OS */
1110    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1111
1112
1113/*
1114-------------------------------------------------------------------------------
1115  MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.
1116
1117  Medium priority interrupts are by definition those with priority greater
1118  than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by
1119  setting PS.EXCM and therefore can easily support a C environment for
1120  handlers in C, and interact safely with an RTOS.
1121
1122  Each vector goes at a predetermined location according to the Xtensa
1123  hardware configuration, which is ensured by its placement in a special
1124  section known to the Xtensa linker support package (LSP). It performs
1125  the minimum necessary before jumping to the handler in the .text section.
1126
1127  The corresponding handler goes in the normal .text section. It sets up
1128  the appropriate stack frame, saves a few vector-specific registers and
1129  calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
1130  and enter the RTOS, then sets up a C environment. It then calls the
1131  user's interrupt handler code (which may be coded in C) and finally
1132  calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
1133
1134  While XT_RTOS_INT_EXIT does not return directly to the interruptee,
1135  eventually the RTOS scheduler will want to dispatch the interrupted
1136  task or handler. The scheduler will return to the exit point that was
1137  saved in the interrupt stack frame at XT_STK_EXIT.
1138-------------------------------------------------------------------------------
1139*/
1140
1141#if XCHAL_EXCM_LEVEL >= 2
1142
1143    .begin      literal_prefix .Level2InterruptVector
1144    .section    .Level2InterruptVector.text, "ax"
1145    .global     _Level2Vector
1146    .type       _Level2Vector,@function
1147    .align      4
1148_Level2Vector:
1149    wsr     a0, EXCSAVE_2                   /* preserve a0 */
1150    call0   _xt_medint2                     /* load interrupt handler */
1151    /* never returns here - call0 is used as a jump (see note at top) */
1152
1153    .end        literal_prefix
1154
1155    .section .iram1,"ax"
1156    .type       _xt_medint2,@function
1157    .align      4
1158_xt_medint2:
1159    mov     a0, sp                          /* sp == a1 */
1160    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1161    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1162    rsr     a0, EPS_2                       /* save interruptee's PS */
1163    s32i    a0, sp, XT_STK_PS
1164    rsr     a0, EPC_2                       /* save interruptee's PC */
1165    s32i    a0, sp, XT_STK_PC
1166    rsr     a0, EXCSAVE_2                   /* save interruptee's a0 */
1167    s32i    a0, sp, XT_STK_A0
1168    movi    a0, _xt_medint2_exit            /* save exit point for dispatch */
1169    s32i    a0, sp, XT_STK_EXIT
1170
1171    /* EXCSAVE_2 should now be free to use. Use it to keep a copy of the
1172    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1173    #ifdef XT_DEBUG_BACKTRACE
1174    #ifndef __XTENSA_CALL0_ABI__
1175    mov     a0, sp
1176    wsr     a0, EXCSAVE_2
1177    #endif
1178    #endif
1179
1180    /* Save rest of interrupt context and enter RTOS. */
1181    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1182
1183    /* !! We are now on the RTOS system stack !! */
1184
1185    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1186    #ifdef __XTENSA_CALL0_ABI__
1187    movi    a0, PS_INTLEVEL(2) | PS_UM
1188    #else
1189    movi    a0, PS_INTLEVEL(2) | PS_UM | PS_WOE
1190    #endif
1191    wsr     a0, PS
1192    rsync
1193
1194    /* OK to call C code at this point, dispatch user ISRs */
1195
1196    dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK
1197
1198    /* Done handling interrupts, transfer control to OS */
1199    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1200
1201    /*
1202    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1203    on entry and used to return to a thread or interrupted interrupt handler.
1204    */
1205    .global     _xt_medint2_exit
1206    .type       _xt_medint2_exit,@function
1207    .align      4
1208_xt_medint2_exit:
1209    /* Restore only level-specific regs (the rest were already restored) */
1210    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1211    wsr     a0, EPS_2
1212    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1213    wsr     a0, EPC_2
1214    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1215    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1216    rsync                                   /* ensure EPS and EPC written */
1217    rfi     2
1218
1219#endif  /* Level 2 */
1220
1221#if XCHAL_EXCM_LEVEL >= 3
1222
1223    .begin      literal_prefix .Level3InterruptVector
1224    .section    .Level3InterruptVector.text, "ax"
1225    .global     _Level3Vector
1226    .type       _Level3Vector,@function
1227    .align      4
1228_Level3Vector:
1229    wsr     a0, EXCSAVE_3                   /* preserve a0 */
1230    call0   _xt_medint3                     /* load interrupt handler */
1231    /* never returns here - call0 is used as a jump (see note at top) */
1232
1233    .end        literal_prefix
1234
1235    .section .iram1,"ax"
1236    .type       _xt_medint3,@function
1237    .align      4
1238_xt_medint3:
1239    mov     a0, sp                          /* sp == a1 */
1240    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1241    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1242    rsr     a0, EPS_3                       /* save interruptee's PS */
1243    s32i    a0, sp, XT_STK_PS
1244    rsr     a0, EPC_3                       /* save interruptee's PC */
1245    s32i    a0, sp, XT_STK_PC
1246    rsr     a0, EXCSAVE_3                   /* save interruptee's a0 */
1247    s32i    a0, sp, XT_STK_A0
1248    movi    a0, _xt_medint3_exit            /* save exit point for dispatch */
1249    s32i    a0, sp, XT_STK_EXIT
1250
1251    /* EXCSAVE_3 should now be free to use. Use it to keep a copy of the
1252    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1253    #ifdef XT_DEBUG_BACKTRACE
1254    #ifndef __XTENSA_CALL0_ABI__
1255    mov     a0, sp
1256    wsr     a0, EXCSAVE_3
1257    #endif
1258    #endif
1259
1260    /* Save rest of interrupt context and enter RTOS. */
1261    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1262
1263    /* !! We are now on the RTOS system stack !! */
1264
1265    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1266    #ifdef __XTENSA_CALL0_ABI__
1267    movi    a0, PS_INTLEVEL(3) | PS_UM
1268    #else
1269    movi    a0, PS_INTLEVEL(3) | PS_UM | PS_WOE
1270    #endif
1271    wsr     a0, PS
1272    rsync
1273
1274    /* OK to call C code at this point, dispatch user ISRs */
1275
1276    dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK
1277
1278    /* Done handling interrupts, transfer control to OS */
1279    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1280
1281    /*
1282    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1283    on entry and used to return to a thread or interrupted interrupt handler.
1284    */
1285    .global     _xt_medint3_exit
1286    .type       _xt_medint3_exit,@function
1287    .align      4
1288_xt_medint3_exit:
1289    /* Restore only level-specific regs (the rest were already restored) */
1290    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1291    wsr     a0, EPS_3
1292    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1293    wsr     a0, EPC_3
1294    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1295    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1296    rsync                                   /* ensure EPS and EPC written */
1297    rfi     3
1298
1299#endif  /* Level 3 */
1300
1301#if XCHAL_EXCM_LEVEL >= 4
1302
1303    .begin      literal_prefix .Level4InterruptVector
1304    .section    .Level4InterruptVector.text, "ax"
1305    .global     _Level4Vector
1306    .type       _Level4Vector,@function
1307    .align      4
1308_Level4Vector:
1309    wsr     a0, EXCSAVE_4                   /* preserve a0 */
1310    call0   _xt_medint4                     /* load interrupt handler */
1311
1312    .end        literal_prefix
1313
1314    .section .iram1,"ax"
1315    .type       _xt_medint4,@function
1316    .align      4
1317_xt_medint4:
1318    mov     a0, sp                          /* sp == a1 */
1319    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1320    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1321    rsr     a0, EPS_4                       /* save interruptee's PS */
1322    s32i    a0, sp, XT_STK_PS
1323    rsr     a0, EPC_4                       /* save interruptee's PC */
1324    s32i    a0, sp, XT_STK_PC
1325    rsr     a0, EXCSAVE_4                   /* save interruptee's a0 */
1326    s32i    a0, sp, XT_STK_A0
1327    movi    a0, _xt_medint4_exit            /* save exit point for dispatch */
1328    s32i    a0, sp, XT_STK_EXIT
1329
1330    /* EXCSAVE_4 should now be free to use. Use it to keep a copy of the
1331    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1332    #ifdef XT_DEBUG_BACKTRACE
1333    #ifndef __XTENSA_CALL0_ABI__
1334    mov     a0, sp
1335    wsr     a0, EXCSAVE_4
1336    #endif
1337    #endif
1338
1339    /* Save rest of interrupt context and enter RTOS. */
1340    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1341
1342    /* !! We are now on the RTOS system stack !! */
1343
1344    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1345    #ifdef __XTENSA_CALL0_ABI__
1346    movi    a0, PS_INTLEVEL(4) | PS_UM
1347    #else
1348    movi    a0, PS_INTLEVEL(4) | PS_UM | PS_WOE
1349    #endif
1350    wsr     a0, PS
1351    rsync
1352
1353    /* OK to call C code at this point, dispatch user ISRs */
1354
1355    dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK
1356
1357    /* Done handling interrupts, transfer control to OS */
1358    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1359
1360    /*
1361    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1362    on entry and used to return to a thread or interrupted interrupt handler.
1363    */
1364    .global     _xt_medint4_exit
1365    .type       _xt_medint4_exit,@function
1366    .align      4
1367_xt_medint4_exit:
1368    /* Restore only level-specific regs (the rest were already restored) */
1369    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1370    wsr     a0, EPS_4
1371    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1372    wsr     a0, EPC_4
1373    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1374    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1375    rsync                                   /* ensure EPS and EPC written */
1376    rfi     4
1377
1378#endif  /* Level 4 */
1379
1380#if XCHAL_EXCM_LEVEL >= 5
1381
1382    .begin      literal_prefix .Level5InterruptVector
1383    .section    .Level5InterruptVector.text, "ax"
1384    .global     _Level5Vector
1385    .type       _Level5Vector,@function
1386    .align      4
1387_Level5Vector:
1388    wsr     a0, EXCSAVE_5                   /* preserve a0 */
1389    call0   _xt_medint5                     /* load interrupt handler */
1390
1391    .end        literal_prefix
1392
1393    .section .iram1,"ax"
1394    .type       _xt_medint5,@function
1395    .align      4
1396_xt_medint5:
1397    mov     a0, sp                          /* sp == a1 */
1398    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1399    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1400    rsr     a0, EPS_5                       /* save interruptee's PS */
1401    s32i    a0, sp, XT_STK_PS
1402    rsr     a0, EPC_5                       /* save interruptee's PC */
1403    s32i    a0, sp, XT_STK_PC
1404    rsr     a0, EXCSAVE_5                   /* save interruptee's a0 */
1405    s32i    a0, sp, XT_STK_A0
1406    movi    a0, _xt_medint5_exit            /* save exit point for dispatch */
1407    s32i    a0, sp, XT_STK_EXIT
1408
1409    /* EXCSAVE_5 should now be free to use. Use it to keep a copy of the
1410    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1411    #ifdef XT_DEBUG_BACKTRACE
1412    #ifndef __XTENSA_CALL0_ABI__
1413    mov     a0, sp
1414    wsr     a0, EXCSAVE_5
1415    #endif
1416    #endif
1417
1418    /* Save rest of interrupt context and enter RTOS. */
1419    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1420
1421    /* !! We are now on the RTOS system stack !! */
1422
1423    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1424    #ifdef __XTENSA_CALL0_ABI__
1425    movi    a0, PS_INTLEVEL(5) | PS_UM
1426    #else
1427    movi    a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
1428    #endif
1429    wsr     a0, PS
1430    rsync
1431
1432    /* OK to call C code at this point, dispatch user ISRs */
1433
1434    dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK
1435
1436    /* Done handling interrupts, transfer control to OS */
1437    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1438
1439    /*
1440    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1441    on entry and used to return to a thread or interrupted interrupt handler.
1442    */
1443    .global     _xt_medint5_exit
1444    .type       _xt_medint5_exit,@function
1445    .align      4
1446_xt_medint5_exit:
1447    /* Restore only level-specific regs (the rest were already restored) */
1448    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1449    wsr     a0, EPS_5
1450    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1451    wsr     a0, EPC_5
1452    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1453    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1454    rsync                                   /* ensure EPS and EPC written */
1455    rfi     5
1456
1457#endif  /* Level 5 */
1458
1459#if XCHAL_EXCM_LEVEL >= 6
1460
1461    .begin      literal_prefix .Level6InterruptVector
1462    .section    .Level6InterruptVector.text, "ax"
1463    .global     _Level6Vector
1464    .type       _Level6Vector,@function
1465    .align      4
1466_Level6Vector:
1467    wsr     a0, EXCSAVE_6                   /* preserve a0 */
1468    call0   _xt_medint6                     /* load interrupt handler */
1469
1470    .end        literal_prefix
1471
1472    .section .iram1,"ax"
1473    .type       _xt_medint6,@function
1474    .align      4
1475_xt_medint6:
1476    mov     a0, sp                          /* sp == a1 */
1477    addi    sp, sp, -XT_STK_FRMSZ           /* allocate interrupt stack frame */
1478    s32i    a0, sp, XT_STK_A1               /* save pre-interrupt SP */
1479    rsr     a0, EPS_6                       /* save interruptee's PS */
1480    s32i    a0, sp, XT_STK_PS
1481    rsr     a0, EPC_6                       /* save interruptee's PC */
1482    s32i    a0, sp, XT_STK_PC
1483    rsr     a0, EXCSAVE_6                   /* save interruptee's a0 */
1484    s32i    a0, sp, XT_STK_A0
1485    movi    a0, _xt_medint6_exit            /* save exit point for dispatch */
1486    s32i    a0, sp, XT_STK_EXIT
1487
1488    /* EXCSAVE_6 should now be free to use. Use it to keep a copy of the
1489    current stack pointer that points to the exception frame (XT_STK_FRAME).*/
1490    #ifdef XT_DEBUG_BACKTRACE
1491    #ifndef __XTENSA_CALL0_ABI__
1492    mov     a0, sp
1493    wsr     a0, EXCSAVE_6
1494    #endif
1495    #endif
1496
1497    /* Save rest of interrupt context and enter RTOS. */
1498    call0   XT_RTOS_INT_ENTER               /* common RTOS interrupt entry */
1499
1500    /* !! We are now on the RTOS system stack !! */
1501
1502    /* Set up PS for C, enable interrupts above this level and clear EXCM. */
1503    #ifdef __XTENSA_CALL0_ABI__
1504    movi    a0, PS_INTLEVEL(6) | PS_UM
1505    #else
1506    movi    a0, PS_INTLEVEL(6) | PS_UM | PS_WOE
1507    #endif
1508    wsr     a0, PS
1509    rsync
1510
1511    /* OK to call C code at this point, dispatch user ISRs */
1512
1513    dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK
1514
1515    /* Done handling interrupts, transfer control to OS */
1516    call0   XT_RTOS_INT_EXIT                /* does not return directly here */
1517
1518    /*
1519    Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
1520    on entry and used to return to a thread or interrupted interrupt handler.
1521    */
1522    .global     _xt_medint6_exit
1523    .type       _xt_medint6_exit,@function
1524    .align      4
1525_xt_medint6_exit:
1526    /* Restore only level-specific regs (the rest were already restored) */
1527    l32i    a0, sp, XT_STK_PS               /* retrieve interruptee's PS */
1528    wsr     a0, EPS_6
1529    l32i    a0, sp, XT_STK_PC               /* retrieve interruptee's PC */
1530    wsr     a0, EPC_6
1531    l32i    a0, sp, XT_STK_A0               /* retrieve interruptee's A0 */
1532    l32i    sp, sp, XT_STK_A1               /* remove interrupt stack frame */
1533    rsync                                   /* ensure EPS and EPC written */
1534    rfi     6
1535
1536#endif  /* Level 6 */
1537
1538
1539/*******************************************************************************
1540
1541HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS
1542
1543High priority interrupts are by definition those with priorities greater
1544than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority
1545interrupts cannot interact with the RTOS, that is they must save all regs
1546they use and not call any RTOS function.
1547
1548A further restriction imposed by the Xtensa windowed architecture is that
1549high priority interrupts must not modify the stack area even logically
1550"above" the top of the interrupted stack (they need to provide their
1551own stack or static save area).
1552
1553Cadence Design Systems recommends high priority interrupt handlers be coded in assembly
1554and used for purposes requiring very short service times.
1555
1556Here are templates for high priority (level 2+) interrupt vectors.
1557They assume only one interrupt per level to avoid the burden of identifying
1558which interrupts at this level are pending and enabled. This allows for
1559minimum latency and avoids having to save/restore a2 in addition to a0.
1560If more than one interrupt per high priority level is configured, this burden
1561is on the handler which in any case must provide a way to save and restore
1562registers it uses without touching the interrupted stack.
1563
1564Each vector goes at a predetermined location according to the Xtensa
1565hardware configuration, which is ensured by its placement in a special
1566section known to the Xtensa linker support package (LSP). It performs
1567the minimum necessary before jumping to the handler in the .text section.
1568
1569*******************************************************************************/
1570
1571/*
1572These stubs just call xt_highintX/xt_nmi to handle the real interrupt. Please define
1573these in an external assembly source file. If these symbols are not defined anywhere
1574else, the defaults in xtensa_vector_defaults.S are used.
1575*/
1576
1577#if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
1578
1579    .begin      literal_prefix .Level2InterruptVector
1580    .section    .Level2InterruptVector.text, "ax"
1581    .global     _Level2Vector
1582    .type       _Level2Vector,@function
1583    .global     xt_highint2
1584    .align      4
1585_Level2Vector:
1586    wsr     a0, EXCSAVE_2                   /* preserve a0 */
1587    call0   xt_highint2                    /* load interrupt handler */
1588
1589    .end        literal_prefix
1590
1591#endif  /* Level 2 */
1592
1593#if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
1594
1595    .begin      literal_prefix .Level3InterruptVector
1596    .section    .Level3InterruptVector.text, "ax"
1597    .global     _Level3Vector
1598    .type       _Level3Vector,@function
1599    .global     xt_highint3
1600    .align      4
1601_Level3Vector:
1602    wsr     a0, EXCSAVE_3                   /* preserve a0 */
1603    call0   xt_highint3                    /* load interrupt handler */
1604    /* never returns here - call0 is used as a jump (see note at top) */
1605
1606    .end        literal_prefix
1607
1608#endif  /* Level 3 */
1609
1610#if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
1611
1612    .begin      literal_prefix .Level4InterruptVector
1613    .section    .Level4InterruptVector.text, "ax"
1614    .global     _Level4Vector
1615    .type       _Level4Vector,@function
1616    .global     xt_highint4
1617    .align      4
1618_Level4Vector:
1619    wsr     a0, EXCSAVE_4                   /* preserve a0 */
1620    call0   xt_highint4                    /* load interrupt handler */
1621    /* never returns here - call0 is used as a jump (see note at top) */
1622
1623    .end        literal_prefix
1624
1625#endif  /* Level 4 */
1626
1627#if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
1628
1629    .begin      literal_prefix .Level5InterruptVector
1630    .section    .Level5InterruptVector.text, "ax"
1631    .global     _Level5Vector
1632    .type       _Level5Vector,@function
1633    .global     xt_highint5
1634    .align      4
1635_Level5Vector:
1636    wsr     a0, EXCSAVE_5                   /* preserve a0 */
1637    call0   xt_highint5                    /* load interrupt handler */
1638    /* never returns here - call0 is used as a jump (see note at top) */
1639
1640    .end        literal_prefix
1641
1642#endif  /* Level 5 */
1643
1644#if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
1645
1646    .begin      literal_prefix .Level6InterruptVector
1647    .section    .Level6InterruptVector.text, "ax"
1648    .global     _Level6Vector
1649    .type       _Level6Vector,@function
1650    .global     xt_highint6
1651    .align      4
1652_Level6Vector:
1653    wsr     a0, EXCSAVE_6                   /* preserve a0 */
1654    call0   xt_highint6                    /* load interrupt handler */
1655    /* never returns here - call0 is used as a jump (see note at top) */
1656
1657    .end        literal_prefix
1658
1659#endif  /* Level 6 */
1660
1661#if XCHAL_HAVE_NMI
1662
1663    .begin      literal_prefix .NMIExceptionVector
1664    .section    .NMIExceptionVector.text, "ax"
1665    .global     _NMIExceptionVector
1666    .type       _NMIExceptionVector,@function
1667    .global     xt_nmi
1668    .align      4
1669_NMIExceptionVector:
1670    wsr     a0, EXCSAVE + XCHAL_NMILEVEL  _ /* preserve a0 */
1671    call0   xt_nmi                         /* load interrupt handler */
1672    /* never returns here - call0 is used as a jump (see note at top) */
1673
1674    .end        literal_prefix
1675
1676#endif  /* NMI */
1677
1678
1679/*******************************************************************************
1680
1681WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
1682
1683Here is the code for each window overflow/underflow exception vector and
1684(interspersed) efficient code for handling the alloca exception cause.
1685Window exceptions are handled entirely in the vector area and are very
1686tight for performance. The alloca exception is also handled entirely in
1687the window vector area so comes at essentially no cost in code size.
1688Users should never need to modify them and Cadence Design Systems recommends
1689they do not.
1690
1691Window handlers go at predetermined vector locations according to the
1692Xtensa hardware configuration, which is ensured by their placement in a
1693special section known to the Xtensa linker support package (LSP). Since
1694their offsets in that section are always the same, the LSPs do not define
1695a section per vector.
1696
1697These things are coded for XEA2 only (XEA1 is not supported).
1698
1699Note on Underflow Handlers:
1700The underflow handler for returning from call[i+1] to call[i]
1701must preserve all the registers from call[i+1]'s window.
1702In particular, a0 and a1 must be preserved because the RETW instruction
1703will be reexecuted (and may even underflow if an intervening exception
1704has flushed call[i]'s registers).
1705Registers a2 and up may contain return values.
1706
1707*******************************************************************************/
1708
1709#if XCHAL_HAVE_WINDOWED
1710
1711    .section .WindowVectors.text, "ax"
1712
1713/*
1714--------------------------------------------------------------------------------
1715Window Overflow Exception for Call4.
1716
1717Invoked if a call[i] referenced a register (a4-a15)
1718that contains data from ancestor call[j];
1719call[j] had done a call4 to call[j+1].
1720On entry here:
1721    window rotated to call[j] start point;
1722        a0-a3 are registers to be saved;
1723        a4-a15 must be preserved;
1724        a5 is call[j+1]'s stack pointer.
1725--------------------------------------------------------------------------------
1726*/
1727
1728    .org    0x0
1729    .global _WindowOverflow4
1730_WindowOverflow4:
1731
1732    s32e    a0, a5, -16     /* save a0 to call[j+1]'s stack frame */
1733    s32e    a1, a5, -12     /* save a1 to call[j+1]'s stack frame */
1734    s32e    a2, a5,  -8     /* save a2 to call[j+1]'s stack frame */
1735    s32e    a3, a5,  -4     /* save a3 to call[j+1]'s stack frame */
1736    rfwo                    /* rotates back to call[i] position */
1737
1738/*
1739--------------------------------------------------------------------------------
1740Window Underflow Exception for Call4
1741
1742Invoked by RETW returning from call[i+1] to call[i]
1743where call[i]'s registers must be reloaded (not live in ARs);
1744where call[i] had done a call4 to call[i+1].
1745On entry here:
1746        window rotated to call[i] start point;
1747        a0-a3 are undefined, must be reloaded with call[i].reg[0..3];
1748        a4-a15 must be preserved (they are call[i+1].reg[0..11]);
1749        a5 is call[i+1]'s stack pointer.
1750--------------------------------------------------------------------------------
1751*/
1752
1753    .org    0x40
1754    .global _WindowUnderflow4
1755_WindowUnderflow4:
1756
1757    l32e    a0, a5, -16     /* restore a0 from call[i+1]'s stack frame */
1758    l32e    a1, a5, -12     /* restore a1 from call[i+1]'s stack frame */
1759    l32e    a2, a5,  -8     /* restore a2 from call[i+1]'s stack frame */
1760    l32e    a3, a5,  -4     /* restore a3 from call[i+1]'s stack frame */
1761    rfwu
1762
1763/*
1764--------------------------------------------------------------------------------
1765Handle alloca exception generated by interruptee executing 'movsp'.
1766This uses space between the window vectors, so is essentially "free".
1767All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,
1768and PS.EXCM has been set by the exception hardware (can't be interrupted).
1769The fact the alloca exception was taken means the registers associated with
1770the base-save area have been spilled and will be restored by the underflow
1771handler, so those 4 registers are available for scratch.
1772The code is optimized to avoid unaligned branches and minimize cache misses.
1773--------------------------------------------------------------------------------
1774*/
1775
1776    .align  4
1777    .global _xt_alloca_exc
1778_xt_alloca_exc:
1779
1780    rsr     a0, WINDOWBASE  /* grab WINDOWBASE before rotw changes it */
1781    rotw    -1              /* WINDOWBASE goes to a4, new a0-a3 are scratch */
1782    rsr     a2, PS
1783    extui   a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
1784    xor     a3, a3, a4      /* bits changed from old to current windowbase */
1785    rsr     a4, EXCSAVE_1   /* restore original a0 (now in a4) */
1786    slli    a3, a3, XCHAL_PS_OWB_SHIFT
1787    xor     a2, a2, a3      /* flip changed bits in old window base */
1788    wsr     a2, PS          /* update PS.OWB to new window base */
1789    rsync
1790
1791    _bbci.l a4, 31, _WindowUnderflow4
1792    rotw    -1              /* original a0 goes to a8 */
1793    _bbci.l a8, 30, _WindowUnderflow8
1794    rotw    -1
1795    j               _WindowUnderflow12
1796
1797/*
1798--------------------------------------------------------------------------------
1799Window Overflow Exception for Call8
1800
1801Invoked if a call[i] referenced a register (a4-a15)
1802that contains data from ancestor call[j];
1803call[j] had done a call8 to call[j+1].
1804On entry here:
1805    window rotated to call[j] start point;
1806        a0-a7 are registers to be saved;
1807        a8-a15 must be preserved;
1808        a9 is call[j+1]'s stack pointer.
1809--------------------------------------------------------------------------------
1810*/
1811
1812    .org    0x80
1813    .global _WindowOverflow8
1814_WindowOverflow8:
1815
1816    s32e    a0, a9, -16     /* save a0 to call[j+1]'s stack frame */
1817    l32e    a0, a1, -12     /* a0 <- call[j-1]'s sp
1818                               (used to find end of call[j]'s frame) */
1819    s32e    a1, a9, -12     /* save a1 to call[j+1]'s stack frame */
1820    s32e    a2, a9,  -8     /* save a2 to call[j+1]'s stack frame */
1821    s32e    a3, a9,  -4     /* save a3 to call[j+1]'s stack frame */
1822    s32e    a4, a0, -32     /* save a4 to call[j]'s stack frame */
1823    s32e    a5, a0, -28     /* save a5 to call[j]'s stack frame */
1824    s32e    a6, a0, -24     /* save a6 to call[j]'s stack frame */
1825    s32e    a7, a0, -20     /* save a7 to call[j]'s stack frame */
1826    rfwo                    /* rotates back to call[i] position */
1827
1828/*
1829--------------------------------------------------------------------------------
1830Window Underflow Exception for Call8
1831
1832Invoked by RETW returning from call[i+1] to call[i]
1833where call[i]'s registers must be reloaded (not live in ARs);
1834where call[i] had done a call8 to call[i+1].
1835On entry here:
1836        window rotated to call[i] start point;
1837        a0-a7 are undefined, must be reloaded with call[i].reg[0..7];
1838        a8-a15 must be preserved (they are call[i+1].reg[0..7]);
1839        a9 is call[i+1]'s stack pointer.
1840--------------------------------------------------------------------------------
1841*/
1842
1843    .org    0xC0
1844    .global _WindowUnderflow8
1845_WindowUnderflow8:
1846
1847    l32e    a0, a9, -16     /* restore a0 from call[i+1]'s stack frame */
1848    l32e    a1, a9, -12     /* restore a1 from call[i+1]'s stack frame */
1849    l32e    a2, a9,  -8     /* restore a2 from call[i+1]'s stack frame */
1850    l32e    a7, a1, -12     /* a7 <- call[i-1]'s sp
1851                               (used to find end of call[i]'s frame) */
1852    l32e    a3, a9,  -4     /* restore a3 from call[i+1]'s stack frame */
1853    l32e    a4, a7, -32     /* restore a4 from call[i]'s stack frame */
1854    l32e    a5, a7, -28     /* restore a5 from call[i]'s stack frame */
1855    l32e    a6, a7, -24     /* restore a6 from call[i]'s stack frame */
1856    l32e    a7, a7, -20     /* restore a7 from call[i]'s stack frame */
1857    rfwu
1858
1859/*
1860--------------------------------------------------------------------------------
1861Window Overflow Exception for Call12
1862
1863Invoked if a call[i] referenced a register (a4-a15)
1864that contains data from ancestor call[j];
1865call[j] had done a call12 to call[j+1].
1866On entry here:
1867    window rotated to call[j] start point;
1868        a0-a11 are registers to be saved;
1869        a12-a15 must be preserved;
1870        a13 is call[j+1]'s stack pointer.
1871--------------------------------------------------------------------------------
1872*/
1873
1874    .org    0x100
1875    .global _WindowOverflow12
1876_WindowOverflow12:
1877
1878    s32e    a0,  a13, -16   /* save a0 to call[j+1]'s stack frame */
1879    l32e    a0,  a1,  -12   /* a0 <- call[j-1]'s sp
1880                               (used to find end of call[j]'s frame) */
1881    s32e    a1,  a13, -12   /* save a1 to call[j+1]'s stack frame */
1882    s32e    a2,  a13,  -8   /* save a2 to call[j+1]'s stack frame */
1883    s32e    a3,  a13,  -4   /* save a3 to call[j+1]'s stack frame */
1884    s32e    a4,  a0,  -48   /* save a4 to end of call[j]'s stack frame */
1885    s32e    a5,  a0,  -44   /* save a5 to end of call[j]'s stack frame */
1886    s32e    a6,  a0,  -40   /* save a6 to end of call[j]'s stack frame */
1887    s32e    a7,  a0,  -36   /* save a7 to end of call[j]'s stack frame */
1888    s32e    a8,  a0,  -32   /* save a8 to end of call[j]'s stack frame */
1889    s32e    a9,  a0,  -28   /* save a9 to end of call[j]'s stack frame */
1890    s32e    a10, a0,  -24   /* save a10 to end of call[j]'s stack frame */
1891    s32e    a11, a0,  -20   /* save a11 to end of call[j]'s stack frame */
1892    rfwo                    /* rotates back to call[i] position */
1893
1894/*
1895--------------------------------------------------------------------------------
1896Window Underflow Exception for Call12
1897
1898Invoked by RETW returning from call[i+1] to call[i]
1899where call[i]'s registers must be reloaded (not live in ARs);
1900where call[i] had done a call12 to call[i+1].
1901On entry here:
1902        window rotated to call[i] start point;
1903        a0-a11 are undefined, must be reloaded with call[i].reg[0..11];
1904        a12-a15 must be preserved (they are call[i+1].reg[0..3]);
1905        a13 is call[i+1]'s stack pointer.
1906--------------------------------------------------------------------------------
1907*/
1908
1909    .org 0x140
1910    .global _WindowUnderflow12
1911_WindowUnderflow12:
1912
1913    l32e    a0,  a13, -16   /* restore a0 from call[i+1]'s stack frame */
1914    l32e    a1,  a13, -12   /* restore a1 from call[i+1]'s stack frame */
1915    l32e    a2,  a13,  -8   /* restore a2 from call[i+1]'s stack frame */
1916    l32e    a11, a1,  -12   /* a11 <- call[i-1]'s sp
1917                               (used to find end of call[i]'s frame) */
1918    l32e    a3,  a13,  -4   /* restore a3 from call[i+1]'s stack frame */
1919    l32e    a4,  a11, -48   /* restore a4 from end of call[i]'s stack frame */
1920    l32e    a5,  a11, -44   /* restore a5 from end of call[i]'s stack frame */
1921    l32e    a6,  a11, -40   /* restore a6 from end of call[i]'s stack frame */
1922    l32e    a7,  a11, -36   /* restore a7 from end of call[i]'s stack frame */
1923    l32e    a8,  a11, -32   /* restore a8 from end of call[i]'s stack frame */
1924    l32e    a9,  a11, -28   /* restore a9 from end of call[i]'s stack frame */
1925    l32e    a10, a11, -24   /* restore a10 from end of call[i]'s stack frame */
1926    l32e    a11, a11, -20   /* restore a11 from end of call[i]'s stack frame */
1927    rfwu
1928
1929#endif /* XCHAL_HAVE_WINDOWED */
1930
1931    .section    .UserEnter.text, "ax"
1932    .global     call_user_start
1933    .type       call_user_start,@function
1934    .align      4
1935    .literal_position
1936