/arch/x86/platform/efi/ |
D | efi_stub_64.S | 12 mov %rsp, %rax; \ 13 subq $0x70, %rsp; \ 14 and $~0xf, %rsp; \ 15 mov %rax, (%rsp); \ 18 mov %rax, 0x8(%rsp); \ 19 movaps %xmm0, 0x60(%rsp); \ 20 movaps %xmm1, 0x50(%rsp); \ 21 movaps %xmm2, 0x40(%rsp); \ 22 movaps %xmm3, 0x30(%rsp); \ 23 movaps %xmm4, 0x20(%rsp); \ [all …]
|
/arch/x86/include/asm/ |
D | ftrace.h | 11 subq $(SS+8-\skip), %rsp 12 movq %rax, RAX(%rsp) 13 movq %rcx, RCX(%rsp) 14 movq %rdx, RDX(%rsp) 15 movq %rsi, RSI(%rsp) 16 movq %rdi, RDI(%rsp) 17 movq %r8, R8(%rsp) 18 movq %r9, R9(%rsp) 20 movq SS+8(%rsp), %rdx 21 movq %rdx, RIP(%rsp) [all …]
|
D | calling.h | 87 subq $9*8+\addskip, %rsp 138 addq $ARG_SKIP+\addskip, %rsp 144 movq \offset(%rsp), %r11 145 movq \offset+8(%rsp), %r10 146 movq \offset+16(%rsp), %r9 147 movq \offset+24(%rsp), %r8 148 movq \offset+40(%rsp), %rcx 149 movq \offset+48(%rsp), %rdx 150 movq \offset+56(%rsp), %rsi 151 movq \offset+64(%rsp), %rdi [all …]
|
D | dwarf2.h | 105 movq %\reg, \offset(%rsp) 110 movq \offset(%rsp), %\reg
|
/arch/x86/ia32/ |
D | ia32entry.S | 47 movq %rax,\offset+R11(%rsp) 48 movq %rax,\offset+R10(%rsp) 49 movq %\_r9,\offset+R9(%rsp) 50 movq %rax,\offset+R8(%rsp) 65 movl \offset+16(%rsp),%r9d 67 movl \offset+40(%rsp),%ecx 68 movl \offset+48(%rsp),%edx 69 movl \offset+56(%rsp),%esi 70 movl \offset+64(%rsp),%edi 121 CFI_DEF_CFA rsp,0 [all …]
|
/arch/x86/kernel/ |
D | entry_64.S | 95 movq RIP(%rsp), %rdi 99 movq SS+16(%rsp), %rsi 141 movq %r15, R15(%rsp) 142 movq %r14, R14(%rsp) 143 movq %r13, R13(%rsp) 144 movq %r12, R12(%rsp) 145 movq %r11, R11(%rsp) 146 movq %r10, R10(%rsp) 147 movq %rbp, RBP(%rsp) 148 movq %rbx, RBX(%rsp) [all …]
|
D | head_64.S | 217 movq stack_start(%rip), %rsp 298 movq stack_start(%rip),%rsp 326 # 104(%rsp) %rflags 327 # 96(%rsp) %cs 328 # 88(%rsp) %rip 329 # 80(%rsp) error code 337 pushq $i # 72(%rsp) Vector number 350 pushq %rax # 64(%rsp) 351 pushq %rcx # 56(%rsp) 352 pushq %rdx # 48(%rsp) [all …]
|
D | relocate_kernel_64.S | 62 movq %rsp, RSP(%r11) 95 lea PAGE_SIZE(%r8), %rsp 174 leaq PAGE_SIZE(%r10), %rsp 178 movq 0(%rsp), %rbp 187 lea PAGE_SIZE(%r8), %rsp 194 movq RSP(%r8), %rsp
|
/arch/x86/xen/ |
D | xen-asm_64.S | 24 mov 8+0(%rsp), %rcx 25 mov 8+8(%rsp), %r11 71 movq %rsp, PER_CPU_VAR(old_rsp) 72 movq PER_CPU_VAR(kernel_stack), %rsp 90 movq %rsp, PER_CPU_VAR(old_rsp) 91 movq PER_CPU_VAR(kernel_stack), %rsp 123 mov 0*8(%rsp), %rcx 124 mov 1*8(%rsp), %r11 125 mov 5*8(%rsp), %rsp 152 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
|
/arch/x86/crypto/ |
D | salsa20-x86_64-asm_64.S | 5 mov %rsp,%r11 8 sub %r11,%rsp 26 movq %r11,0(%rsp) 28 movq %r12,8(%rsp) 30 movq %r13,16(%rsp) 32 movq %r14,24(%rsp) 34 movq %r15,32(%rsp) 36 movq %rbx,40(%rsp) 38 movq %rbp,48(%rsp) 56 movq %rcx,56(%rsp) [all …]
|
D | sha512-avx2-asm.S | 188 add frame_XFER(%rsp),h # h = k + w + h # -- 250 add 1*8+frame_XFER(%rsp), h # h = k + w + h # -- 307 add 2*8+frame_XFER(%rsp), h # h = k + w + h # -- 365 add 3*8+frame_XFER(%rsp), h # h = k + w + h # -- 426 add frame_XFER(%rsp), h # h = k + w + h # -- 464 add 8*1+frame_XFER(%rsp), h # h = k + w + h # -- 502 add 8*2+frame_XFER(%rsp), h # h = k + w + h # -- 540 add 8*3+frame_XFER(%rsp), h # h = k + w + h # -- 573 mov %rsp, %rax 574 sub $frame_size, %rsp [all …]
|
D | sha512-avx-asm.S | 103 #define W_t(i) 8*i+frame_W(%rsp) 106 #define WK_2(i) 8*((i%2))+frame_WK(%rsp) 285 mov %rsp, %rax 286 sub $frame_size, %rsp 287 and $~(0x20 - 1), %rsp 288 mov %rax, frame_RSPSAVE(%rsp) 291 mov %rbx, frame_GPRSAVE(%rsp) 292 mov %r12, frame_GPRSAVE +8*1(%rsp) 293 mov %r13, frame_GPRSAVE +8*2(%rsp) 294 mov %r14, frame_GPRSAVE +8*3(%rsp) [all …]
|
D | sha512-ssse3-asm.S | 100 #define W_t(i) 8*i+frame_W(%rsp) 103 #define WK_2(i) 8*((i%2))+frame_WK(%rsp) 284 mov %rsp, %rax 285 sub $frame_size, %rsp 286 and $~(0x20 - 1), %rsp 287 mov %rax, frame_RSPSAVE(%rsp) 290 mov %rbx, frame_GPRSAVE(%rsp) 291 mov %r12, frame_GPRSAVE +8*1(%rsp) 292 mov %r13, frame_GPRSAVE +8*2(%rsp) 293 mov %r14, frame_GPRSAVE +8*3(%rsp) [all …]
|
D | sha256-avx2-asm.S | 165 addl \disp(%rsp, SRND), h # h = k + w + h # -- 213 addl offset(%rsp, SRND), h # h = k + w + h # -- 264 addl offset(%rsp, SRND), h # h = k + w + h # -- 314 addl offset(%rsp, SRND), h # h = k + w + h # -- 380 addl \disp(%rsp, SRND), h # h = k + w + h # -- 419 addl offset(%rsp, SRND), h # h = k + w + h # -- 459 addl offset(%rsp, SRND), h # h = k + w + h # -- 499 addl offset(%rsp, SRND), h # h = k + w + h # -- 540 mov %rsp, %rax 541 subq $STACK_SIZE, %rsp [all …]
|
D | sha256-ssse3-asm.S | 174 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH 213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 255 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 296 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 315 ## input is [rsp + _XFER + %1 * 4] 335 add offset(%rsp), y2 # y2 = k + w + S1 + CH 365 mov %rsp, %r12 366 subq $STACK_SIZE, %rsp 367 and $~15, %rsp 372 mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data [all …]
|
D | sha256-avx-asm.S | 179 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH 214 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 253 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 291 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 309 ## input is [rsp + _XFER + %1 * 4] 329 add offset(%rsp), y2 # y2 = k + w + S1 + CH 359 mov %rsp, %r12 360 subq $STACK_SIZE, %rsp # allocate stack space 361 and $~15, %rsp # align stack pointer 366 mov NUM_BLKS, _INP_END(%rsp) [all …]
|
/arch/x86/lib/ |
D | csum-copy_64.S | 55 subq $7*8, %rsp 57 movq %rbx, 2*8(%rsp) 59 movq %r12, 3*8(%rsp) 61 movq %r14, 4*8(%rsp) 63 movq %r13, 5*8(%rsp) 65 movq %rbp, 6*8(%rsp) 68 movq %r8, (%rsp) 69 movq %r9, 1*8(%rsp) 211 movq 2*8(%rsp), %rbx 213 movq 3*8(%rsp), %r12 [all …]
|
D | copy_page_64.S | 23 subq $2*8, %rsp 25 movq %rbx, (%rsp) 27 movq %r12, 1*8(%rsp) 86 movq (%rsp), %rbx 88 movq 1*8(%rsp), %r12 90 addq $2*8, %rsp
|
/arch/x86/um/ |
D | stub_64.S | 24 mov %rbx, %rsp 25 add $0x10, %rsp 28 mov 0x0(%rsp), %rax 37 mov %rsp, 8(%rbx) 40 add %rax, %rsp
|
D | setjmp_64.S | 10 # %rsp (post-return) 27 movq %rsp,8(%rdi) # Post-return %rsp! 46 movq 8(%rdi),%rsp
|
/arch/arm/mach-msm/ |
D | scm.c | 150 static inline void *scm_get_response_buffer(const struct scm_response *rsp) in scm_get_response_buffer() argument 152 return (void *)rsp + rsp->buf_offset; in scm_get_response_buffer() 229 struct scm_response *rsp; in scm_call() local 245 rsp = scm_command_to_response(cmd); in scm_call() 247 u32 start = (u32)rsp; in scm_call() 248 u32 end = (u32)scm_get_response_buffer(rsp) + resp_len; in scm_call() 255 } while (!rsp->is_complete); in scm_call() 258 memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len); in scm_call()
|
/arch/x86/kernel/acpi/ |
D | wakeup_64.S | 27 movq saved_rsp, %rsp 42 subq $8, %rsp 47 movq %rsp, pt_regs_sp(%rax) 67 movq %rsp, saved_rsp 73 addq $8, %rsp 94 movq pt_regs_sp(%rax), %rsp 111 addq $8, %rsp
|
/arch/s390/pci/ |
D | pci_clp.c | 84 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) in clp_query_pci_fngrp() 88 rrb->response.hdr.rsp, rc); in clp_query_pci_fngrp() 127 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) { in clp_query_pci_fn() 135 rrb->response.hdr.rsp, rc); in clp_query_pci_fn() 198 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) { in clp_set_pci_fn() 204 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); in clp_set_pci_fn() 206 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) in clp_set_pci_fn() 210 rrb->response.hdr.rsp); in clp_set_pci_fn() 302 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) { in clp_find_pci_devices() 304 rrb->response.hdr.rsp, rc); in clp_find_pci_devices()
|
/arch/s390/include/asm/ |
D | clp.h | 14 u16 rsp; member
|
/arch/x86/include/uapi/asm/ |
D | ptrace.h | 66 unsigned long rsp; member
|