| /kernel/linux/linux-5.10/arch/xtensa/boot/boot-redboot/ |
| D | bootstrap.S | 96 mov.n a8, a4 101 s32i a10, a8, 0 102 s32i a11, a8, 4 105 s32i a10, a8, 8 106 s32i a11, a8, 12 107 addi a8, a8, 16 109 blt a8, a5, 1b 143 l32i a8, a2, 0 144 s32i a8, a6, 0 171 add a8, a0, a4 [all …]
|
| /kernel/linux/linux-5.10/arch/xtensa/lib/ |
| D | memcopy.S | 52 * a8/ tmp 124 movi a8, 3 # if source is not aligned, 125 _bany a3, a8, .Lsrcunaligned # then use shifting copy 134 slli a8, a7, 4 135 add a8, a8, a3 # a8 = end of last 16B source chunk 149 bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end 202 and a11, a3, a8 # save unalignment offset for below 215 l32i a8, a3, 8 219 __src_b a7, a7, a8 222 __src_b a8, a8, a9 [all …]
|
| D | checksum.S | 142 l8ui a8, a2, 3 /* bits 0.. 8 */ 146 slli a8, a8, 24 150 or a7, a7, a8 183 a8 = temp 224 EX(10f) l32i a8, a2, 4 226 EX(10f) s32i a8, a3, 4 228 ONES_ADD(a5, a8) 230 EX(10f) l32i a8, a2, 12 232 EX(10f) s32i a8, a3, 12 234 ONES_ADD(a5, a8) [all …]
|
| D | usercopy.S | 50 * a8/ tmp 73 movi a8, 3 # if source is also aligned, 74 bnone a3, a8, .Laligned # then use word copy 143 slli a8, a7, 4 144 add a8, a8, a3 # a8 = end of last 16B source chunk 158 blt a3, a8, .Loop1 201 and a10, a3, a8 # save unalignment offset for below 213 EX(10f) l32i a8, a3, 8 217 __src_b a7, a7, a8 220 __src_b a8, a8, a9 [all …]
|
| /kernel/linux/linux-6.6/arch/xtensa/lib/ |
| D | memcopy.S | 52 * a8/ tmp 124 movi a8, 3 # if source is not aligned, 125 _bany a3, a8, .Lsrcunaligned # then use shifting copy 134 slli a8, a7, 4 135 add a8, a8, a3 # a8 = end of last 16B source chunk 149 bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end 202 and a11, a3, a8 # save unalignment offset for below 215 l32i a8, a3, 8 219 __src_b a7, a7, a8 222 __src_b a8, a8, a9 [all …]
|
| D | checksum.S | 142 l8ui a8, a2, 3 /* bits 0.. 8 */ 146 slli a8, a8, 24 150 or a7, a7, a8 184 a8 = temp 225 EX(10f) l32i a8, a2, 4 227 EX(10f) s32i a8, a3, 4 229 ONES_ADD(a5, a8) 231 EX(10f) l32i a8, a2, 12 233 EX(10f) s32i a8, a3, 12 235 ONES_ADD(a5, a8) [all …]
|
| D | usercopy.S | 50 * a8/ tmp 78 movi a8, 3 # if source is also aligned, 79 bnone a3, a8, .Laligned # then use word copy 148 slli a8, a7, 4 149 add a8, a8, a3 # a8 = end of last 16B source chunk 163 blt a3, a8, .Loop1 206 and a10, a3, a8 # save unalignment offset for below 224 EX(10f) l32i a8, a3, 8 228 __src_b a7, a7, a8 231 __src_b a8, a8, a9 [all …]
|
| /kernel/linux/linux-6.6/arch/xtensa/mm/ |
| D | misc.S | 65 l32i a8, a3, 0 67 s32i a8, a2, 0 70 l32i a8, a3, 8 72 s32i a8, a2, 8 75 l32i a8, a3, 16 77 s32i a8, a2, 16 80 l32i a8, a3, 24 82 s32i a8, a2, 24 170 addi a8, a3, 1 # way1 172 wdtlb a7, a8 [all …]
|
| /kernel/linux/linux-5.10/arch/xtensa/mm/ |
| D | misc.S | 64 l32i a8, a3, 0 66 s32i a8, a2, 0 69 l32i a8, a3, 8 71 s32i a8, a2, 8 74 l32i a8, a3, 16 76 s32i a8, a2, 16 79 l32i a8, a3, 24 81 s32i a8, a2, 24 188 addi a8, a3, 1 # way1 190 wdtlb a7, a8 [all …]
|
| /kernel/linux/linux-6.6/arch/xtensa/boot/boot-redboot/ |
| D | bootstrap.S | 99 mov.n a8, a4 104 s32i a10, a8, 0 105 s32i a11, a8, 4 108 s32i a10, a8, 8 109 s32i a11, a8, 12 110 addi a8, a8, 16 112 blt a8, a5, 1b 146 l32i a8, a2, 0 147 s32i a8, a6, 0 177 # a8(a4) Load address of the image
|
| /kernel/linux/linux-6.6/arch/xtensa/kernel/ |
| D | align.S | 182 and a3, a3, a8 # align memory address 184 __ssa8 a8 257 and a3, a3, a8 # align memory address 259 __ssa8 a8 355 /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 358 l32i a8, a2, PT_AREG8 405 and a4, a4, a8 # align memory address 414 __ssa8r a8 415 __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 422 and a5, a5, a8 # mask [all …]
|
| D | coprocessor.S | 168 s32i a8, a1, PT_AREG8 193 /* Retrieve previous owner (a8). */ 197 l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER 220 beqz a8, 1f # skip 'save' if no previous owner 224 l32i a10, a8, THREAD_CPENABLE 231 add a2, a2, a8 239 s32i a10, a8, THREAD_CPENABLE 252 l32i a8, a1, PT_AREG8
|
| D | entry.S | 178 s32i a8, a1, PT_AREG8 318 s32i a8, a1, PT_AREG8 729 l32i a8, a1, PT_AREG8 1025 _bbci.l a8, 30, 8f 1253 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1257 s32i a8, a2, PT_AREG8 1318 s32e a8, a4, -32 1345 s32e a8, a0, -48 1346 mov a8, a0 1348 s32e a9, a8, -44 [all …]
|
| /kernel/linux/linux-5.10/arch/xtensa/kernel/ |
| D | align.S | 169 s32i a8, a2, PT_AREG8 182 rsr a8, excvaddr # load unaligned memory address 233 and a3, a3, a8 # align memory address 235 __ssa8 a8 325 /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 327 l32i a8, a2, PT_AREG8 372 and a4, a4, a8 # align memory address 381 __ssa8r a8 382 __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 389 and a5, a5, a8 # mask [all …]
|
| D | entry.S | 179 s32i a8, a1, PT_AREG8 317 s32i a8, a1, PT_AREG8 716 l32i a8, a1, PT_AREG8 1007 _bbci.l a8, 30, 8f 1228 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1232 s32i a8, a2, PT_AREG8 1293 s32e a8, a4, -32 1320 s32e a8, a0, -48 1321 mov a8, a0 1323 s32e a9, a8, -44 [all …]
|
| /kernel/linux/linux-5.10/arch/c6x/lib/ |
| D | csum_64plus.S | 71 || MPYU .M1 A7,A2,A8 76 || ADD .L1 A8,A9,A9 84 || ZERO .D1 A8 89 LDBU .D1T1 *A4++,A8 92 ADD .S1 A8,A9,A9 95 STB .D2T1 A8,*B4++ 100 LDBU .D1T1 *A4++,A8 103 SHL .S1 A8,8,A0 107 STB .D2T1 A8,*B4++ 394 || MV .S1 A6,A8 [all …]
|
| D | memcpy_64plus.S | 21 [A1] LDB .D2T1 *B4++,A8 29 [A1] STB .D1T1 A8,*A3++ 34 LDNDW .D2T1 *B4++,A9:A8 39 || STNDW .D1T1 A9:A8,*A3++
|
| D | strasgi.S | 21 ldw .d2t1 *B4++, A8 48 || mv .s2x A8, B5 51 [B0] ldw .d2t1 *B4++, A8 72 [B0] stw .d1t1 A8, *A4++
|
| /kernel/linux/linux-6.6/arch/xtensa/include/asm/ |
| D | initialize_mmu.h | 204 extui a8, a4, 28, 4 205 beq a8, a11, 2f 207 mov a11, a8 209 addx4 a9, a8, a3 220 movi a8, 0x20000000 233 add a5, a5, a8 235 bgeu a5, a8, 1b
|
| D | traps.h | 94 " addi a8, a0, 3\n" in spill_registers() 114 " mov a8, a8\n" in spill_registers() 119 : : : "a8", "a9", "memory"); in spill_registers()
|
| /kernel/linux/linux-5.10/arch/xtensa/include/asm/ |
| D | initialize_mmu.h | 204 extui a8, a4, 28, 4 205 beq a8, a11, 2f 207 mov a11, a8 209 addx4 a9, a8, a3 220 movi a8, 0x20000000 233 add a5, a5, a8 235 bgeu a5, a8, 1b
|
| D | traps.h | 68 " addi a8, a0, 3\n" in spill_registers() 88 " mov a8, a8\n" in spill_registers() 93 : : : "a8", "a9", "memory"); in spill_registers()
|
| /kernel/linux/linux-6.6/arch/arm64/crypto/ |
| D | chacha-neon-core.S | 176 a8 .req w21 228 mov a8, v8.s[0] 277 add a8, a8, a12 286 eor a4, a4, a8 344 add a8, a8, a12 353 eor a4, a4, a8 415 add a8, a8, a13 424 eor a7, a7, a8 482 add a8, a8, a13 491 eor a7, a7, a8 [all …]
|
| /kernel/linux/linux-5.10/arch/c6x/kernel/ |
| D | entry.S | 77 || STDW .D1T1 A9:A8,*A15--[1] 161 LDDW .D1T1 *++A15[1],A9:A8 233 LDW .D2T1 *+SP(REGS_A8+8),A8 320 ;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function 547 LDW .D2T1 *+SP(REGS_A8+8),A8 615 MV .D2X A8,B7 627 MV .D2X A8,B7 685 ;; A8 - len_lo (BE), len_hi (LE) 699 MV .L1 A8,A6 703 MV .L1 A8,A7 [all …]
|
| /kernel/linux/linux-5.10/arch/arm64/crypto/ |
| D | chacha-neon-core.S | 176 a8 .req w21 229 mov a8, v8.s[0] 278 add a8, a8, a12 287 eor a4, a4, a8 345 add a8, a8, a12 354 eor a4, a4, a8 416 add a8, a8, a13 425 eor a7, a7, a8 483 add a8, a8, a13 492 eor a7, a7, a8 [all …]
|