/arch/xtensa/lib/ |
D | memcopy.S | 71 add a7, a3, a4 # a7 = end address for source 74 l8ui a6, a3, 0 75 addi a3, a3, 1 79 bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end 93 l8ui a6, a3, 0 94 addi a3, a3, 1 103 l8ui a6, a3, 0 104 l8ui a7, a3, 1 105 addi a3, a3, 2 116 # a2/ dst, a3/ src, a4/ len [all …]
|
D | usercopy.S | 64 # a2/ dst, a3/ src, a4/ len 74 bnone a3, a8, .Laligned # then use word copy 75 __ssa8 a3 # set shift amount from byte offset 88 EX(10f) l8ui a6, a3, 0 89 addi a3, a3, 1 98 EX(10f) l8ui a6, a3, 0 99 EX(10f) l8ui a7, a3, 1 100 addi a3, a3, 2 118 add a7, a3, a4 # a7 = end address for source 121 EX(10f) l8ui a6, a3, 0 [all …]
|
D | memset.S | 38 # a2/ dst, a3/ c, a4/ length 39 extui a3, a3, 0, 8 # mask to just 8 bits 40 slli a7, a3, 8 # duplicate character in all bytes of word 41 or a3, a3, a7 # ... 42 slli a7, a3, 16 # ... 43 or a3, a3, a7 # ... 68 EX(10f) s32i a3, a5, 0 69 EX(10f) s32i a3, a5, 4 70 EX(10f) s32i a3, a5, 8 71 EX(10f) s32i a3, a5, 12 [all …]
|
D | checksum.S | 51 srli a5, a3, 5 /* 32-byte chunks */ 81 extui a5, a3, 2, 3 /* remaining 4-byte chunks */ 97 _bbci.l a3, 1, 5f /* remaining 2-byte chunk */ 102 _bbci.l a3, 0, 7f /* remaining 1-byte chunk */ 114 beqz a3, 7b /* branch if len == 0 */ 115 beqi a3, 1, 6b /* branch if len == 1 */ 123 addi a3, a3, -2 /* adjust len */ 131 srli a5, a3, 2 /* 4-byte chunks */ 157 _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */ 195 or a10, a2, a3 [all …]
|
D | strncpy_user.S | 39 # a3/ src 54 # a2/ dst, a3/ src, a4/ len 61 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned 62 bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned 70 EX(11f) l8ui a9, a3, 0 # get byte 0 71 addi a3, a3, 1 # advance src pointer 77 bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned 80 EX(11f) l8ui a9, a3, 0 # get byte 0 87 EX(11f) l8ui a9, a3, 1 # get byte 0 88 addi a3, a3, 2 # advance src pointer [all …]
|
/arch/xtensa/mm/ |
D | misc.S | 35 movi a3, 0 37 s32i a3, a2, 0 38 s32i a3, a2, 4 39 s32i a3, a2, 8 40 s32i a3, a2, 12 41 s32i a3, a2, 16 42 s32i a3, a2, 20 43 s32i a3, a2, 24 44 s32i a3, a2, 28 64 l32i a8, a3, 0 [all …]
|
/arch/csky/kernel/ |
D | atomic.S | 17 mfcr a3, epc 18 addi a3, TRAP0_SIZE 21 stw a3, (sp, 0) 22 mfcr a3, epsr 23 stw a3, (sp, 4) 24 mfcr a3, usp 25 stw a3, (sp, 8) 30 ldex a3, (a2) 31 cmpne a0, a3 33 mov a3, a1 [all …]
|
D | entry.S | 35 ldw a3, (sp, LSAVE_A3) 46 mtcr a3, ss2 51 RD_MEH a3 53 tlbi.vaas a3 56 btsti a3, 31 72 mov a2, a3 83 lsri a3, PTE_INDX_SHIFT 85 and a3, a2 86 addu r6, a3 87 ldw a3, (r6) [all …]
|
/arch/xtensa/kernel/ |
D | entry.S | 138 s32i a3, a2, PT_AREG3 148 rsr a3, sar 150 s32i a3, a1, PT_SAR 162 rsr a3, windowstart 165 s32i a3, a1, PT_WINDOWSTART 166 slli a2, a3, 32-WSBITS 167 src a2, a3, a2 202 1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 203 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 204 and a3, a3, a2 # max. only one bit is set [all …]
|
D | coprocessor.S | 32 xchal_cp##x##_store a2 a3 a4 a5 a6; \ 49 xchal_cp##x##_load a2 a3 a4 a5 a6; \ 120 s32i a3, a2, PT_AREG3 121 rsr a3, sar 123 s32i a3, a2, PT_SAR 138 rsr a3, exccause 139 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 143 ssl a3 # SAR: 32 - coprocessor_number 154 addx4 a0, a3, a0 # entry for CP 172 addx8 a3, a3, a5 # a3: coprocessor number [all …]
|
D | align.S | 173 s32i a3, a2, PT_AREG3 175 rsr a3, excsave1 177 s32i a4, a3, EXC_TABLE_FIXUP 202 movi a3, ~3 203 and a3, a3, a7 # mask lower bits 205 l32i a4, a3, 0 # load 2 words 206 l32i a5, a3, 4 232 .Lload: movi a3, ~3 233 and a3, a3, a8 # align memory address 237 addi a3, a3, 8 [all …]
|
D | vectors.S | 74 xsr a3, excsave1 # save a3 and get dispatch table 76 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2 80 addx4 a0, a0, a3 # find entry in table 82 xsr a3, excsave1 # restore a3 and dispatch table 101 xsr a3, excsave1 # save a3, and get dispatch table 107 addx4 a0, a0, a3 # find entry in table 109 xsr a3, excsave1 # restore a3 and dispatch table 212 xsr a3, excsave1 213 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE 236 l32i a2, a3, EXC_TABLE_KSTK [all …]
|
D | head.S | 87 movi a3, XCHAL_KSEG_PADDR 88 bltu a2, a3, 1f 89 sub a2, a2, a3 90 movi a3, XCHAL_KSEG_SIZE 91 bgeu a2, a3, 1f 92 movi a3, XCHAL_KSEG_CACHED_VADDR 93 add a2, a2, a3 171 ___unlock_dcache_all a2 a3 175 ___unlock_icache_all a2 a3 178 ___invalidate_dcache_all a2 a3 [all …]
|
D | mcount.S | 29 movi a3, ftrace_stub 30 bne a3, a4, 1f 34 movi a3, 0x3fffffff 35 and a7, a7, a3 39 and a6, a6, a3
|
/arch/csky/abiv2/ |
D | strcpy.S | 8 mov a3, a0 17 stw a2, (a3) 22 stw a2, (a3, 4) 27 stw a2, (a3, 8) 32 stw a2, (a3, 12) 37 stw a2, (a3, 16) 42 stw a2, (a3, 20) 47 stw a2, (a3, 24) 52 stw a2, (a3, 28) 54 addi a3, 32 [all …]
|
D | strcmp.S | 8 mov a3, a0 10 xor a2, a3, a1 18 ldw t0, (a3, 0) 28 ldw t0, (a3, 4) 35 ldw t0, (a3, 8) 42 ldw t0, (a3, 12) 49 ldw t0, (a3, 16) 56 ldw t0, (a3, 20) 63 ldw t0, (a3, 24) 70 ldw t0, (a3, 28) [all …]
|
/arch/xtensa/include/asm/ |
D | initialize_mmu.h | 49 movi a3, 0x25 /* For SMP/MX -- internal for writeback, 53 movi a3, 0x29 /* non-MX -- Most cores use Std Memory 57 wsr a3, atomctl 95 srli a3, a0, 27 96 slli a3, a3, 27 97 addi a3, a3, CA_BYPASS 99 wdtlb a3, a7 100 witlb a3, a7 196 movi a3, .Lattribute_table 209 addx4 a9, a8, a3 [all …]
|
/arch/riscv/lib/ |
D | memset.S | 16 sltiu a3, a2, 16 17 bnez a3, 4f 23 addi a3, t0, SZREG-1 24 andi a3, a3, ~(SZREG-1) 25 beq a3, t0, 2f /* Skip if already aligned */ 27 sub a4, a3, t0 31 bltu t0, a3, 1b 37 slli a3, a1, 8 38 or a1, a3, a1 39 slli a3, a1, 16 [all …]
|
D | memcpy.S | 15 sltiu a3, a2, 128 16 bnez a3, 4f 18 andi a3, t6, SZREG-1 20 bne a3, a4, 4f 22 beqz a3, 2f /* Skip if already aligned */ 27 andi a3, a1, ~(SZREG-1) 28 addi a3, a3, SZREG 30 sub a4, a3, a1 36 bltu a1, a3, 1b 42 add a3, a1, a4 [all …]
|
/arch/mips/kernel/ |
D | linux32.c | 54 unsigned long, __dummy, unsigned long, a2, unsigned long, a3) 56 return ksys_truncate(path, merge_64(a2, a3)); 60 unsigned long, a2, unsigned long, a3) 62 return ksys_ftruncate(fd, merge_64(a2, a3)); 102 asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3, in sys32_readahead() argument 105 return ksys_readahead(fd, merge_64(a2, a3), count); in sys32_readahead() 109 unsigned long a2, unsigned long a3, in sys32_sync_file_range() argument 114 merge_64(a2, a3), merge_64(a4, a5), in sys32_sync_file_range() 119 unsigned long a2, unsigned long a3, in sys32_fadvise64_64() argument 124 merge_64(a2, a3), merge_64(a4, a5), in sys32_fadvise64_64()
|
D | r4k_switch.S | 50 li a3, 0xff01 51 and t1, a3 53 nor a3, $0, a3 54 and a2, a3
|
D | r2300_switch.S | 56 li a3, 0xff01 57 and t1, a3 59 nor a3, $0, a3 60 and a2, a3
|
/arch/riscv/kernel/ |
D | head.S | 142 la a3, .Lsecondary_park 143 csrw CSR_TVEC, a3 145 slli a3, a0, LGREG 148 add a4, a3, a4 149 add a5, a3, a5 242 la a3, hart_lottery 244 amoadd.w a3, a2, (a3) 245 bnez a3, .Lsecondary_start 248 la a3, __bss_start 250 ble a4, a3, clear_bss_done [all …]
|
/arch/m68k/kernel/ |
D | relocate_kernel.S | 105 movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */ 108 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 109 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 110 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 111 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 112 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 113 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 114 movel %a3@+,%a2@+ /* *dst++ = *src++ */ 115 movel %a3@+,%a2@+ /* *dst++ = *src++ */
|
/arch/mips/crypto/ |
D | poly1305-mips.pl | 41 ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); 75 my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3); 546 my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3); 981 mflo ($a3,$rs2,$d2) 991 addu $h0,$h0,$a3 994 sltu $a3,$h0,$a3 995 addu $h1,$h1,$a3 998 mflo ($a3,$r1,$d0) 1008 addu $h1,$h1,$a3 1009 sltu $a3,$h1,$a3 [all …]
|