/arch/arm/crypto/ |
D | sha1-armv4-large.S | 67 mov r5,r5,ror#30 68 mov r6,r6,ror#30 69 mov r7,r7,ror#30 @ [6] 75 add r7,r8,r7,ror#2 @ E+=K_00_19 80 add r7,r7,r3,ror#27 @ E+=ROR(A,27) 84 add r7,r8,r7,ror#2 @ E+=K_00_19 86 add r7,r7,r3,ror#27 @ E+=ROR(A,27) 91 and r10,r4,r10,ror#2 93 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) 100 add r6,r8,r6,ror#2 @ E+=K_00_19 [all …]
|
D | sha256-core.S_shipped | 121 eor r0,r8,r8,ror#5 123 eor r0,r0,r8,ror#19 @ Sigma1(e) 138 eor r0,r8,r8,ror#5 140 eor r0,r0,r8,ror#19 @ Sigma1(e) 146 add r11,r11,r0,ror#6 @ h+=Sigma1(e) 150 eor r0,r4,r4,ror#11 168 eor r0,r0,r4,ror#20 @ Sigma0(a) 172 add r11,r11,r0,ror#2 @ h+=Sigma0(a) 179 eor r0,r7,r7,ror#5 181 eor r0,r0,r7,ror#19 @ Sigma1(e) [all …]
|
D | aes-armv4.S | 275 eor r0,r0,r7,ror#8 278 eor r5,r5,r8,ror#8 280 eor r6,r6,r9,ror#8 283 eor r1,r1,r4,ror#24 288 eor r0,r0,r7,ror#16 291 eor r1,r1,r8,ror#8 293 eor r6,r6,r9,ror#16 296 eor r2,r2,r5,ror#16 301 eor r0,r0,r7,ror#24 303 eor r1,r1,r8,ror#16 [all …]
|
D | sha1-armv7-neon.S | 97 add e, e, a, ror #(32 - 5); \ 102 ror b, #(32 - 30); \ 111 add e, e, a, ror #(32 - 5); \ 115 ror b, #(32 - 30); \ 125 add e, e, a, ror #(32 - 5); \ 130 ror b, #(32 - 30); \
|
/arch/arm/lib/ |
D | bswapsdi2.S | 18 eor r3, r0, r0, ror #16 21 eor r0, r3, r0, ror #8 27 eor r3, ip, ip, ror #16 28 eor r1, r0, r0, ror #16 33 eor r1, r1, r0, ror #8 34 eor r0, r3, ip, ror #8
|
D | csumpartial.S | 42 movne sum, sum, ror #8 77 movne r0, r0, ror #8 @ rotate checksum by 8 bits 108 movne sum, sum, ror #8
|
D | io-readsw-armv4.S | 102 _BE_ONLY_( mov ip, ip, ror #8 ) 125 _BE_ONLY_( movne ip, ip, ror #8 )
|
/arch/x86/crypto/ |
D | sha256-ssse3-asm.S | 152 ror $(25-11), y0 # y0 = e >> (25-11) 155 ror $(22-13), y1 # y1 = a >> (22-13) 158 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) 165 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) 169 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) 172 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) 186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 195 ror $(25-11), y0 # y0 = e >> (25-11) 198 ror $(22-13), y1 # y1 = a >> (22-13) 201 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) [all …]
|
D | sha512-ssse3-asm.S | 124 ror $23, tmp0 # 41 # tmp = e ror 23 126 xor e_64, tmp0 # tmp = (e ror 23) ^ e 130 ror $4, tmp0 # 18 # tmp = ((e ror 23) ^ e) ror 4 131 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e 134 ror $14, tmp0 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) 142 ror $5, tmp0 # 39 # tmp = a ror 5 143 xor a_64, tmp0 # tmp = (a ror 5) ^ a 145 ror $6, tmp0 # 34 # tmp = ((a ror 5) ^ a) ror 6 146 xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a 148 ror $28, tmp0 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) [all …]
|
D | twofish-i586-asm_32.S | 87 ror $16, b ## D;\ 90 ror $16, a ## D;\ 97 ror $15, b ## D;\ 125 ror $16, b ## D;\ 128 ror $16, a ## D;\ 135 ror $16, b ## D;\ 144 ror $1, c ## D;\ 162 ror $16, a ## D;\ 165 ror $16, b ## D;\ 172 ror $15, a ## D;\ [all …]
|
D | twofish-x86_64-asm_64.S | 85 ror $16, b ## D;\ 88 ror $16, a ## D;\ 95 ror $15, b ## D;\ 123 ror $16, b ## D;\ 126 ror $16, a ## D;\ 141 ror $1, c ## D;\ 158 ror $16, a ## D;\ 161 ror $16, b ## D;\ 168 ror $15, a ## D;\ 194 ror $16, b ## D;\ [all …]
|
D | sha512-avx-asm.S | 122 # shld is faster than ror on Sandybridge 131 RORQ tmp0, 23 # 41 # tmp = e ror 23 133 xor e_64, tmp0 # tmp = (e ror 23) ^ e 137 RORQ tmp0, 4 # 18 # tmp = ((e ror 23) ^ e) ror 4 138 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e 149 RORQ tmp0, 5 # 39 # tmp = a ror 5 150 xor a_64, tmp0 # tmp = (a ror 5) ^ a 152 RORQ tmp0, 6 # 34 # tmp = ((a ror 5) ^ a) ror 6 153 xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a
|
D | sha512-avx2-asm.S | 178 # Calculate w[t-15] ror 1 181 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 224 # Calculate w[t-15] ror 8 227 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 229 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 291 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA} 292 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} 295 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA} 296 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ 297 # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} [all …]
|
D | sha256-avx2-asm.S | 198 vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 238 vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 266 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA} 274 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA} 325 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC} 331 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
|
/arch/arc/lib/ |
D | strlen.S | 21 ror r5,r4 31 ror r5,r4
|
D | strchr-700.S | 33 ror r4,r3 58 ror r4,r3
|
D | strcpy-700.S | 29 ror r12,r8
|
D | strcmp-archs.S | 20 ror r11, r12
|
D | strcmp.S | 23 ror r5,r12
|
/arch/arm64/crypto/ |
D | sha2-ce-core.S | 145 ror x7, x4, #29 // ror(lsl(x4, 3), 32)
|
D | sha1-ce-core.S | 141 ror x7, x4, #29 // ror(lsl(x4, 3), 32)
|
/arch/mips/include/asm/octeon/ |
D | cvmx-npi-defs.h | 711 uint64_t ror:1; member 713 uint64_t ror:1; 735 uint64_t ror:1; member 737 uint64_t ror:1; 1683 uint64_t ror:1; member 1689 uint64_t ror:1; 1707 uint64_t ror:1; member 1713 uint64_t ror:1;
|
D | cvmx-sli-defs.h | 2701 uint64_t ror:32; member 2703 uint64_t ror:32; 2832 uint64_t ror:1; member 2834 uint64_t ror:1; 2865 uint64_t ror:1; member 2867 uint64_t ror:1; 3142 uint64_t ror:32; member 3144 uint64_t ror:32;
|
/arch/nios2/kernel/ |
D | insnemu.S | 462 ror r7, r7, r16 /* r7 = 0x80000000 on carry, or else 0x00000000 */
|
/arch/arm64/include/asm/ |
D | uaccess.h | 473 ror \tmp2, \tmp2, #16
|