• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
2
3Permission is hereby granted, free of charge, to any person obtaining
4a copy of this software and associated documentation files (the
5``Software''), to deal in the Software without restriction, including
6without limitation the rights to use, copy, modify, merge, publish,
7distribute, sublicense, and/or sell copies of the Software, and to
8permit persons to whom the Software is furnished to do so, subject to
9the following conditions:
10
11The above copyright notice and this permission notice shall be
12included in all copies or substantial portions of the Software.
13
14THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
15EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
21
22#define LIBFFI_ASM
23#include <fficonfig.h>
24#include <ffi.h>
25
26#ifdef HAVE_MACHINE_ASM_H
27#include <machine/asm.h>
28#else
29#ifdef __USER_LABEL_PREFIX__
30#define CONCAT1(a, b) CONCAT2(a, b)
31#define CONCAT2(a, b) a ## b
32
33/* Use the right prefix for global labels.  */
34#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
35#else
36#define CNAME(x) x
37#endif
38#endif
39
40#define cfi_adjust_cfa_offset(off)	.cfi_adjust_cfa_offset off
41#define cfi_rel_offset(reg, off)	.cfi_rel_offset reg, off
42#define cfi_restore(reg)		.cfi_restore reg
43#define cfi_def_cfa_register(reg)	.cfi_def_cfa_register reg
44
45        .text
46        .globl CNAME(ffi_call_SYSV)
47#ifdef __ELF__
48        .type CNAME(ffi_call_SYSV), #function
49#endif
50#ifdef __APPLE__
51        .align 2
52#endif
53
54/* ffi_call_SYSV()
55
56   Create a stack frame, setup an argument context, call the callee
57   and extract the result.
58
59   The maximum required argument stack size is provided,
60   ffi_call_SYSV() allocates that stack space then calls the
61   prepare_fn to populate register context and stack.  The
62   argument passing registers are loaded from the register
63   context and the callee called, on return the register passing
64   register are saved back to the context.  Our caller will
65   extract the return value from the final state of the saved
66   register context.
67
68   Prototype:
69
70   extern unsigned
71   ffi_call_SYSV (void (*)(struct call_context *context, unsigned char *,
72			   extended_cif *),
73                  struct call_context *context,
74                  extended_cif *,
75                  size_t required_stack_size,
76                  void (*fn)(void));
77
78   Therefore on entry we have:
79
80   x0 prepare_fn
81   x1 &context
82   x2 &ecif
83   x3 bytes
84   x4 fn
85
86   This function uses the following stack frame layout:
87
88   ==
89                saved x30(lr)
90   x29(fp)->    saved x29(fp)
91                saved x24
92                saved x23
93                saved x22
94   sp'    ->    saved x21
95                ...
96   sp     ->    (constructed callee stack arguments)
97   ==
98
99   Voila! */
100
101#define ffi_call_SYSV_FS (8 * 4)
102
103        .cfi_startproc
104CNAME(ffi_call_SYSV):
105        stp     x29, x30, [sp, #-16]!
106	cfi_adjust_cfa_offset (16)
107        cfi_rel_offset (x29, 0)
108        cfi_rel_offset (x30, 8)
109
110        mov     x29, sp
111	cfi_def_cfa_register (x29)
112        sub     sp, sp, #ffi_call_SYSV_FS
113
114        stp     x21, x22, [sp, #0]
115        cfi_rel_offset (x21, 0 - ffi_call_SYSV_FS)
116        cfi_rel_offset (x22, 8 - ffi_call_SYSV_FS)
117
118        stp     x23, x24, [sp, #16]
119        cfi_rel_offset (x23, 16 - ffi_call_SYSV_FS)
120        cfi_rel_offset (x24, 24 - ffi_call_SYSV_FS)
121
122        mov     x21, x1
123        mov     x22, x2
124        mov     x24, x4
125
126        /* Allocate the stack space for the actual arguments, many
127           arguments will be passed in registers, but we assume
128           worst case and allocate sufficient stack for ALL of
129           the arguments.  */
130        sub     sp, sp, x3
131
132        /* unsigned (*prepare_fn) (struct call_context *context,
133				   unsigned char *stack, extended_cif *ecif);
134	 */
135        mov     x23, x0
136        mov     x0, x1
137        mov     x1, sp
138        /* x2 already in place */
139        blr     x23
140
141        /* Preserve the flags returned.  */
142        mov     x23, x0
143
144        /* Figure out if we should touch the vector registers.  */
145        tbz     x23, #AARCH64_FFI_WITH_V_BIT, 1f
146
147        /* Load the vector argument passing registers.  */
148        ldp     q0, q1, [x21, #8*32 +  0]
149        ldp     q2, q3, [x21, #8*32 + 32]
150        ldp     q4, q5, [x21, #8*32 + 64]
151        ldp     q6, q7, [x21, #8*32 + 96]
1521:
153        /* Load the core argument passing registers.  */
154        ldp     x0, x1, [x21,  #0]
155        ldp     x2, x3, [x21, #16]
156        ldp     x4, x5, [x21, #32]
157        ldp     x6, x7, [x21, #48]
158
159        /* Don't forget x8 which may be holding the address of a return buffer.
160	 */
161        ldr     x8,     [x21, #8*8]
162
163        blr     x24
164
165        /* Save the core argument passing registers.  */
166        stp     x0, x1, [x21,  #0]
167        stp     x2, x3, [x21, #16]
168        stp     x4, x5, [x21, #32]
169        stp     x6, x7, [x21, #48]
170
171        /* Note nothing useful ever comes back in x8!  */
172
173        /* Figure out if we should touch the vector registers.  */
174        tbz     x23, #AARCH64_FFI_WITH_V_BIT, 1f
175
176        /* Save the vector argument passing registers.  */
177        stp     q0, q1, [x21, #8*32 + 0]
178        stp     q2, q3, [x21, #8*32 + 32]
179        stp     q4, q5, [x21, #8*32 + 64]
180        stp     q6, q7, [x21, #8*32 + 96]
1811:
182        /* All done, unwind our stack frame.  */
183        ldp     x21, x22, [x29,  # - ffi_call_SYSV_FS]
184        cfi_restore (x21)
185        cfi_restore (x22)
186
187        ldp     x23, x24, [x29,  # - ffi_call_SYSV_FS + 16]
188        cfi_restore (x23)
189        cfi_restore (x24)
190
191        mov     sp, x29
192	cfi_def_cfa_register (sp)
193
194        ldp     x29, x30, [sp], #16
195	cfi_adjust_cfa_offset (-16)
196        cfi_restore (x29)
197        cfi_restore (x30)
198
199        ret
200
201        .cfi_endproc
202#ifdef __ELF__
203        .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV)
204#endif
205
206#define ffi_closure_SYSV_FS (8 * 2 + AARCH64_CALL_CONTEXT_SIZE)
207
208/* ffi_closure_SYSV
209
210   Closure invocation glue. This is the low level code invoked directly by
211   the closure trampoline to setup and call a closure.
212
213   On entry x17 points to a struct trampoline_data, x16 has been clobbered
214   all other registers are preserved.
215
216   We allocate a call context and save the argument passing registers,
217   then invoked the generic C ffi_closure_SYSV_inner() function to do all
218   the real work, on return we load the result passing registers back from
219   the call context.
220
221   On entry
222
223   extern void
224   ffi_closure_SYSV (struct trampoline_data *);
225
226   struct trampoline_data
227   {
228        UINT64 *ffi_closure;
229        UINT64 flags;
230   };
231
232   This function uses the following stack frame layout:
233
234   ==
235                saved x30(lr)
236   x29(fp)->    saved x29(fp)
237                saved x22
238                saved x21
239                ...
240   sp     ->    call_context
241   ==
242
243   Voila!  */
244
245        .text
246        .globl CNAME(ffi_closure_SYSV)
247#ifdef __APPLE__
248        .align 2
249#endif
250        .cfi_startproc
251CNAME(ffi_closure_SYSV):
252        stp     x29, x30, [sp, #-16]!
253	cfi_adjust_cfa_offset (16)
254        cfi_rel_offset (x29, 0)
255        cfi_rel_offset (x30, 8)
256
257        mov     x29, sp
258        cfi_def_cfa_register (x29)
259
260        sub     sp, sp, #ffi_closure_SYSV_FS
261
262        stp     x21, x22, [x29, #-16]
263        cfi_rel_offset (x21, -16)
264        cfi_rel_offset (x22, -8)
265
266        /* Load x21 with &call_context.  */
267        mov     x21, sp
268        /* Preserve our struct trampoline_data *  */
269        mov     x22, x17
270
271        /* Save the rest of the argument passing registers.  */
272        stp     x0, x1, [x21, #0]
273        stp     x2, x3, [x21, #16]
274        stp     x4, x5, [x21, #32]
275        stp     x6, x7, [x21, #48]
276        /* Don't forget we may have been given a result scratch pad address.
277	 */
278        str     x8,     [x21, #64]
279
280        /* Figure out if we should touch the vector registers.  */
281        ldr     x0, [x22, #8]
282        tbz     x0, #AARCH64_FFI_WITH_V_BIT, 1f
283
284        /* Save the argument passing vector registers.  */
285        stp     q0, q1, [x21, #8*32 + 0]
286        stp     q2, q3, [x21, #8*32 + 32]
287        stp     q4, q5, [x21, #8*32 + 64]
288        stp     q6, q7, [x21, #8*32 + 96]
2891:
290        /* Load &ffi_closure..  */
291        ldr     x0, [x22, #0]
292        mov     x1, x21
293        /* Compute the location of the stack at the point that the
294           trampoline was called.  */
295        add     x2, x29, #16
296
297        bl      CNAME(ffi_closure_SYSV_inner)
298
299        /* Figure out if we should touch the vector registers.  */
300        ldr     x0, [x22, #8]
301        tbz     x0, #AARCH64_FFI_WITH_V_BIT, 1f
302
303        /* Load the result passing vector registers.  */
304        ldp     q0, q1, [x21, #8*32 + 0]
305        ldp     q2, q3, [x21, #8*32 + 32]
306        ldp     q4, q5, [x21, #8*32 + 64]
307        ldp     q6, q7, [x21, #8*32 + 96]
3081:
309        /* Load the result passing core registers.  */
310        ldp     x0, x1, [x21,  #0]
311        ldp     x2, x3, [x21, #16]
312        ldp     x4, x5, [x21, #32]
313        ldp     x6, x7, [x21, #48]
314        /* Note nothing useful is returned in x8.  */
315
316        /* We are done, unwind our frame.  */
317        ldp     x21, x22, [x29,  #-16]
318        cfi_restore (x21)
319        cfi_restore (x22)
320
321        mov     sp, x29
322        cfi_def_cfa_register (sp)
323
324        ldp     x29, x30, [sp], #16
325	cfi_adjust_cfa_offset (-16)
326        cfi_restore (x29)
327        cfi_restore (x30)
328
329        ret
330        .cfi_endproc
331#ifdef __ELF__
332        .size CNAME(ffi_closure_SYSV), .-CNAME(ffi_closure_SYSV)
333#endif
334