/arch/powerpc/kernel/ |
D | fsl_booke_entry_mapping.S | 6 mfmsr r7 7 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ 8 mfspr r7, SPRN_PID0 9 slwi r7,r7,16 10 or r7,r7,r4 11 mtspr SPRN_MAS6,r7 13 mfspr r7,SPRN_MAS1 14 andis. r7,r7,MAS1_VALID@h 17 mfspr r7,SPRN_MMUCFG 18 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ [all …]
|
D | misc_32.S | 40 mullw r7,r10,r5 41 addc r7,r0,r7 49 addc r7,r0,r7 62 lis r7,__got2_start@ha 63 addi r7,r7,__got2_start@l 66 subf r8,r7,r8 75 add r7,r0,r7 76 2: lwz r0,0(r7) 78 stw r0,0(r7) 79 addi r7,r7,4 [all …]
|
D | tm.S | 149 li r7, (MSR_TS_S)@higher 151 and r6, r6, r7 213 std r7, GPR7(r1) /* Temporary stash */ 219 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 225 subi r7, r7, STACK_FRAME_OVERHEAD 228 SAVE_GPR(0, r7) /* user r0 */ 229 SAVE_GPRS(2, 6, r7) /* user r2-r6 */ 230 SAVE_GPRS(8, 10, r7) /* user r8-r10 */ 236 std r3, GPR1(r7) 237 std r4, GPR7(r7) [all …]
|
/arch/arm/boot/compressed/ |
D | ll_char_wr.S | 33 stmfd sp!, {r4 - r7, lr} 35 @ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc) 58 ldrb r7, [r6, r1] 62 @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) 65 ldr r7, [lr, r7, lsl #2] 66 mul r7, r2, r7 67 sub r1, r1, #1 @ avoid using r7 directly after 68 str r7, [r0, -r5]! 69 ldrb r7, [r6, r1] 70 ldr r7, [lr, r7, lsl #2] [all …]
|
/arch/arm/mach-imx/ |
D | suspend-imx6.S | 98 ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET 99 add r7, r7, r0 101 ldr r8, [r7], #0x4 102 ldr r9, [r7], #0x4 115 ldr r7, =MX6Q_MMDC_MPDGCTRL0 116 ldr r6, [r11, r7] 118 str r6, [r11, r7] 120 ldr r6, [r11, r7] 125 ldr r6, [r11, r7] 127 str r6, [r11, r7] [all …]
|
/arch/powerpc/lib/ |
D | memcmp_32.S | 16 srawi. r7, r5, 2 /* Divide len by 4 */ 19 mtctr r7 20 li r7, 0 21 1: lwzx r3, r6, r7 22 lwzx r0, r4, r7 23 addi r7, r7, 4 31 lhzx r3, r6, r7 32 lhzx r0, r4, r7 33 addi r7, r7, 2 37 4: lbzx r3, r6, r7 [all …]
|
D | memcpy_64.S | 66 srdi r7,r5,4 69 mtctr r7 105 srdi r7,r5,4 109 mtctr r7 119 srd r7,r0,r11 121 or r7,r7,r6 124 # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12 137 srd r7,r0,r11 142 # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9 143 1: or r7,r7,r6 [all …]
|
D | copy_32.S | 16 lwz r7,4(r4); \ 20 stw r7,4(r6); \ 27 lwz r7,4(r4); \ 35 stw r7,4(r6); \ 113 clrlwi r7,r6,32-LG_CACHELINE_BYTES 114 add r8,r7,r5 118 xori r0,r7,CACHELINE_MASK & ~3 125 li r7,4 126 10: dcbz r7,r6 175 add r7,r3,r5 /* test if the src & dst overlap */ [all …]
|
D | copyuser_64.S | 95 lex; ld r7,0(r4) 104 mr r9,r7 108 lex; ld r7,16(r4) 118 stex; std r7,16(r3) 161 srdi r7,r5,4 165 mtctr r7 174 sHd r7,r0,r11 176 or r7,r7,r6 193 sHd r7,r0,r11 199 1: or r7,r7,r6 [all …]
|
/arch/powerpc/crypto/ |
D | aes-spe-keys.S | 31 xor r7,r7,r7; \ 78 LOAD_KEY(r7,r4,8) 82 stw r7,8(r3) 94 xor r7,r7,r6 95 xor r8,r8,r7 98 stw r7,8(r3) 120 LOAD_KEY(r7,r4,8) 126 stw r7,8(r3) 140 xor r7,r7,r6 141 xor r8,r8,r7 [all …]
|
/arch/arm/mm/ |
D | abort-lv4t.S | 33 and r7, r8, #15 << 24 34 add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine 65 mov r7, #0x11 66 orr r7, r7, #0x1100 67 and r6, r8, r7 68 and r9, r8, r7, lsl #1 70 and r9, r8, r7, lsl #2 72 and r9, r8, r7, lsl #3 78 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' 80 subne r7, r7, r6, lsl #2 @ Undo increment [all …]
|
D | pv-fixup-asm.S | 30 add r7, r2, #0x1000 31 add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER 32 add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER) 33 1: ldrd r4, r5, [r7] 36 strd r4, r5, [r7], #1 << L2_ORDER 37 cmp r7, r6 41 add r7, r2, #0x1000 43 add r7, r7, r3 44 ldrd r4, r5, [r7] 47 strd r4, r5, [r7], #1 << L2_ORDER [all …]
|
/arch/powerpc/platforms/52xx/ |
D | mpc52xx_sleep.S | 13 mfmsr r7 14 ori r7, r7, 0x8000 /* EE */ 15 mtmsr r7 126 mtspr SPRN_SPRG0, r7 130 mfspr r7, 311 /* MBAR */ 131 addi r7, r7, 0x540 /* intr->main_emul */ 133 stw r8, 0(r7) 135 dcbf 0, r7 138 mfspr r7, 311 /* MBAR */ 139 addi r7, r7, 0x524 /* intr->enc_status */ [all …]
|
/arch/csky/abiv1/ |
D | memcpy.S | 24 mov r7, r2 44 stw r1, (r7, 0) 46 stw r5, (r7, 4) 47 stw r8, (r7, 8) 48 stw r1, (r7, 12) 51 addi r7, 16 64 stw r1, (r7, 0) 67 addi r7, 4 80 stb r1, (r7, 0) 82 addi r7, 1 [all …]
|
/arch/arc/lib/ |
D | strchr-700.S | 22 asl r7,r2,3 25 asl r7,r3,r7 27 lsr r7,r3,r7 31 sub r12,r2,r7 37 sub r12,r6,r7 40 and r7,r12,r4 41 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 47 bic r2,r7,r6 66 and r7,r12,r4 67 breq r7,0,.Loop /* ... so that this branch is unaligned. */ [all …]
|
/arch/powerpc/platforms/83xx/ |
D | suspend-asm.S | 71 mfspr r7, SPRN_HID2 75 stw r7, SS_HID+8(r3) 80 mfspr r7, SPRN_DABR 87 stw r7, SS_DABR+0(r3) 94 mfspr r7, SPRN_SPRG3 100 stw r7, SS_SPRG+12(r3) 106 mfspr r7, SPRN_SPRG7 111 stw r7, SS_SPRG+28(r3) 116 mfspr r7, SPRN_DBAT1L 121 stw r7, SS_DBAT+0x0c(r3) [all …]
|
/arch/powerpc/kvm/ |
D | book3s_interrupts.S | 170 PPC_LL r7, GPR3(r1) 172 PPC_STL r14, VCPU_GPR(R14)(r7) 173 PPC_STL r15, VCPU_GPR(R15)(r7) 174 PPC_STL r16, VCPU_GPR(R16)(r7) 175 PPC_STL r17, VCPU_GPR(R17)(r7) 176 PPC_STL r18, VCPU_GPR(R18)(r7) 177 PPC_STL r19, VCPU_GPR(R19)(r7) 178 PPC_STL r20, VCPU_GPR(R20)(r7) 179 PPC_STL r21, VCPU_GPR(R21)(r7) 180 PPC_STL r22, VCPU_GPR(R22)(r7) [all …]
|
/arch/microblaze/lib/ |
D | uaccess_old.S | 75 beqid r7, 0f /* zero size is not likely */ 77 or r3, r3, r7 /* find if count is unaligned */ 82 rsubi r3, r7, PAGE_SIZE /* detect PAGE_SIZE */ 88 addik r7, r7, -4 89 bneid r7, w1 91 addik r3, r7, 0 105 swi r7, r1, 8 121 addik r7, r7, -0x200 122 bneid r7, loop 128 lwi r7, r1, 8 [all …]
|
/arch/powerpc/kernel/vdso32/ |
D | cacheflush.S | 39 lwz r7,CFG_DCACHE_BLOCKSZ(r10) 40 addi r5,r7,-1 52 mr r7, r6 59 add r6,r6,r7 69 lwz r7,CFG_ICACHE_BLOCKSZ(r10) 70 addi r5,r7,-1 82 add r6,r6,r7 84 2: icbi 0, r7 85 addi r7, r7, L1_CACHE_BYTES
|
/arch/powerpc/boot/ |
D | div64.S | 19 li r7,0 22 divwu r7,r5,r4 # if dividend.hi >= divisor, 23 mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor 52 4: stw r7,0(r3) # return the quotient in *r3 75 addi r7,r5,32 # could be xori, or addi with -32 77 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 78 sraw r7,r3,r7 # t2 = MSW >> (count-32) 80 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 82 or r4,r4,r7 # LSW |= t2 89 addi r7,r5,32 # could be xori, or addi with -32 [all …]
|
D | string.S | 127 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ 132 mtctr r7 136 1: lwz r7,4(r4) 138 stw r7,4(r6) 158 add r7,r0,r4 159 andi. r7,r7,3 /* will source be word-aligned too? */ 163 6: lbz r7,4(r4) 165 stb r7,4(r6) 169 rlwinm. r7,r5,32-3,3,31 171 mtctr r7 [all …]
|
/arch/powerpc/platforms/44x/ |
D | misc_44x.S | 16 mfmsr r7 17 ori r0,r7,MSR_DS 24 mtmsr r7 30 mfmsr r7 31 ori r0,r7,MSR_DS 38 mtmsr r7
|
/arch/nios2/kernel/ |
D | insnemu.S | 27 ldw r7, PT_R7(sp) 122 stw r7, 28(sp) 187 movi r7, 0x24 /* muli opcode (I-type instruction format) */ 188 beq r2, r7, mul_immed /* muli doesn't use the B register as a source */ 210 andi r7, r4, 0x02 /* For R-type multiply instructions, 212 bne r7, zero, multiply 261 xori r7, r4, 0x25 /* OPX of div */ 262 bne r7, zero, unsigned_division 304 cmplt r7, r3, zero /* r7 = MSB of r3 */ 305 or r13, r13, r7 [all …]
|
/arch/hexagon/lib/ |
D | divsi3.S | 22 r7 = cl0(r3) define 29 r6 = sub(r7,r6) 32 r7 = r6 define 38 r5:4 = vaslw(r5:4,r7) 43 r7:6 = vlsrw(r5:4,#1) 55 r7:6 = vlsrw(r7:6,#2) 56 if (!p0.new) r0 = add(r0,r7) 61 if (!p0) r0 = add(r0,r7)
|
/arch/arm/crypto/ |
D | sha1-armv4-large.S | 72 ldmia r0,{r3,r4,r5,r6,r7} 79 mov r7,r7,ror#30 @ [6] 85 add r7,r8,r7,ror#2 @ E+=K_00_19 90 add r7,r7,r3,ror#27 @ E+=ROR(A,27) 94 add r7,r8,r7,ror#2 @ E+=K_00_19 96 add r7,r7,r3,ror#27 @ E+=ROR(A,27) 102 add r7,r7,r9 @ E+=X[i] 105 add r7,r7,r10 @ E+=F_00_19(B,C,D) 115 add r6,r6,r7,ror#27 @ E+=ROR(A,27) 121 add r6,r6,r7,ror#27 @ E+=ROR(A,27) [all …]
|