/arch/c6x/kernel/ |
D | entry.S | 19 #define SP B15 macro 40 SHR .S1X SP,THREAD_SHIFT,reg 48 STW .D2T2 B0,*SP--[2] ; save original B0 54 STW .D2T2 B1,*+SP[1] ; save original B1 55 XOR .D2 SP,B1,B0 ; (SP ^ KSP) 56 LDW .D2T2 *+SP[1],B1 ; restore B0/B1 57 LDW .D2T2 *++SP[2],B0 59 [B0] STDW .D2T2 SP:DP,*--B1[1] ; user: save user sp/dp kstack 60 [B0] MV .S2 B1,SP ; and switch to kstack 61 ||[!B0] STDW .D2T2 SP:DP,*--SP[1] ; kernel: save on current stack [all …]
|
D | switch_to.S | 10 #define SP B15 macro 42 ;; Switch to next SP 43 MV .S2 B7,SP
|
/arch/powerpc/platforms/cell/spufs/ |
D | spu_save_crt0.S | 63 il $SP, 16368 64 stqd $0, 0($SP) 70 stqd $SP, -160($SP) 71 ai $SP, $SP, -160
|
D | spu_restore_crt0.S | 29 il $SP, 16368 30 stqd $0, 0($SP) 36 stqd $SP, -160($SP) 37 ai $SP, $SP, -160
|
/arch/arm/kernel/ |
D | unwind.c | 74 SP = 13, enumerator 247 unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; in unwind_exec_pop_subset_r4_to_r13() 259 ctrl->vrs[SP] = (unsigned long)vsp; in unwind_exec_pop_subset_r4_to_r13() 267 unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; in unwind_exec_pop_r4_to_rN() 279 ctrl->vrs[SP] = (unsigned long)vsp; in unwind_exec_pop_r4_to_rN() 287 unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; in unwind_exec_pop_subset_r0_to_r3() 298 ctrl->vrs[SP] = (unsigned long)vsp; in unwind_exec_pop_subset_r0_to_r3() 337 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; in unwind_exec_insn() 339 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; in unwind_exec_insn() 356 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; in unwind_exec_insn() [all …]
|
D | sleep.S | 69 mov r5, sp @ current virtual SP 81 1: mov r2, r5 @ virtual SP 91 ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
|
D | entry-ftrace.S | 79 add lr, sp, #16 @ move in LR the value of SP as it was 86 @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
|
/arch/arc/include/asm/ |
D | entry-arcv2.h | 53 ; 1. SP auto-switched to kernel mode stack 78 ; 1. SP auto-switched to kernel mode stack 146 ; - K mode: add the offset from current SP where H/w starts auto push 149 ; 2. Upon entry SP is always saved (for any inspection, unwinding etc), 152 lr r10, [AUX_USER_SP] ; U mode SP variable 156 add.nz r10, r10, SZ_PT_REGS ; K mode SP 158 st r10, [sp, PT_sp] ; SP (pt_regs->sp) 181 ; Restore SP (into AUX_USER_SP) only if returning to U mode 237 ; SP points to PC/STAT32: hw restores them despite NO_AUTOSAVE 250 btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
|
D | entry.h | 134 mov r12, sp ; save SP as ref to pt_regs 193 ; SP is back to start of pt_regs
|
/arch/x86/crypto/ |
D | serpent-sse2-x86_64-asm_64.S | 565 #define SP(SBOX, x0, x1, x2, x3, x4, i) \ macro 701 SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); 702 SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); 703 SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); 704 SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); 705 SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); 706 SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); 707 SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); 708 SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); 709 SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); [all …]
|
D | serpent-avx2-asm_64.S | 536 #define SP(SBOX, x0, x1, x2, x3, x4, i) \ macro 632 SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); 633 SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); 634 SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); 635 SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); 636 SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); 637 SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); 638 SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); 639 SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); 640 SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); [all …]
|
D | serpent-avx-x86_64-asm_64.S | 530 #define SP(SBOX, x0, x1, x2, x3, x4, i) \ macro 626 SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); 627 SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); 628 SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); 629 SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); 630 SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); 631 SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); 632 SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); 633 SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); 634 SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); [all …]
|
/arch/arm/probes/ |
D | decode-thumb.c | 105 REGS(SP, 0, SP, 0, NOSPPC)), 114 REGS(SP, 0, NOPC, 0, NOSPPC)), 167 REGS(SP, 0, NOPC, 0, 0)), 198 REGS(SP, 0, SP, 0, 0)),
|
/arch/x86/kernel/ |
D | vm86_32.c | 82 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) macro 551 SP(regs) -= 6; in do_int() 571 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); in handle_vm86_trap() 599 sp = SP(regs); in handle_vm86_fault() 626 SP(regs) -= 4; in handle_vm86_fault() 629 SP(regs) -= 2; in handle_vm86_fault() 640 SP(regs) += 4; in handle_vm86_fault() 643 SP(regs) += 2; in handle_vm86_fault() 679 SP(regs) += 12; in handle_vm86_fault() 684 SP(regs) += 6; in handle_vm86_fault()
|
/arch/m68k/ifpsp060/ |
D | fpsp.doc | 169 - documented in 3.5 of 060SP spec. 177 - documented in 3.5 of 060SP spec. 185 - documented in 3.7 of 060SP spec. 193 - documented in 3.6 of 060SP spec. 202 - documented in 3.4 of 060SP spec. 214 - documented in 3.4 of 060SP spec. 227 - not fully documented in 060SP spec. 264 - documented in 3.1 of 060SP spec.
|
D | CHANGES | 51 a failing value to the 68060SP, the package ignores 97 stated that ONLY "bit 0" would be set. The 060SP attempts to set a few 119 stated that ONLY "bit 0" would be set. The 060SP attempts to set a few
|
D | os.S | 61 | When installing the 060SP, the _copyin()/_copyout() equivalents for a 64 | The addresses within the 060SP are guaranteed to be on the stack.
|
/arch/sparc/net/ |
D | bpf_jit_64.h | 18 #define SP 0x0e macro
|
D | bpf_jit_32.h | 29 #define SP 0x0e macro
|
D | bpf_jit_comp_32.c | 212 do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ 216 do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ 292 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP)) 295 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
|
D | bpf_jit_comp_64.c | 813 emit(SAVE | IMMED | RS1(SP) | S13(-stack_needed) | RD(SP), ctx); in build_prologue() 819 emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(G0), ctx); in build_prologue() 867 emit(LD32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); in emit_tail_call() 875 emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); in emit_tail_call() 1056 emit_alu3_K(ADD, SP, STACK_BIAS + 128, tmp, ctx); in build_insn()
|
/arch/mips/kvm/ |
D | entry.c | 50 #define SP 29 macro 225 UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); in kvm_mips_build_vcpu_run() 249 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); in kvm_mips_build_vcpu_run() 784 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); in kvm_mips_build_exit() 787 UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); in kvm_mips_build_exit() 795 kvm_mips_build_restore_scratch(&p, K0, SP); in kvm_mips_build_exit() 811 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); in kvm_mips_build_exit()
|
/arch/x86/um/ |
D | signal.c | 177 GETREG(SP, sp); in copy_sc_from_user() 256 PUTREG(SP, sp); in copy_sc_to_user() 279 PUTREG(SP, sp_at_signal); in copy_sc_to_user()
|
/arch/arm/mm/ |
D | abort-lv4t.S | 216 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH 217 subne r7, r7, r6, lsl #2 @ decrement SP if POP
|
/arch/sh/kernel/cpu/sh2/ |
D | entry.S | 88 mov.l r3,@-r15 ! original SP 137 mov.l r0,@-r2 ! save old SP
|