/arch/powerpc/crypto/ |
D | sha1-powerpc-asm.S | 32 #define W(t) (((t)%16)+16) macro 35 LWZ(W(t),(t)*4,r4) 44 add r14,r0,W(t); \ 45 LWZ(W((t)+4),((t)+4)*4,r4); \ 56 xor r5,W((t)+4-3),W((t)+4-8); \ 58 xor W((t)+4),W((t)+4-16),W((t)+4-14); \ 59 add r0,r0,W(t); \ 60 xor W((t)+4),W((t)+4),r5; \ 62 rotlwi W((t)+4),W((t)+4),1 71 add r0,r0,W(t); \ [all …]
|
/arch/x86/kernel/ |
D | uprobes.c | 59 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro 102 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */ 103 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ 104 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ 105 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ 106 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 107 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ 108 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ 109 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ 110 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ [all …]
|
/arch/x86/purgatory/ |
D | sha256.c | 38 static inline void LOAD_OP(int I, u32 *W, const u8 *input) in LOAD_OP() argument 40 W[I] = __be32_to_cpu(((__be32 *)(input))[I]); in LOAD_OP() 43 static inline void BLEND_OP(int I, u32 *W) in BLEND_OP() argument 45 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP() 51 u32 W[64]; in sha256_transform() local 56 LOAD_OP(i, W, input); in sha256_transform() 60 BLEND_OP(i, W); in sha256_transform() 67 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; in sha256_transform() 69 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; in sha256_transform() 71 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; in sha256_transform() [all …]
|
/arch/x86/crypto/ |
D | sha1_ssse3_asm.S | 316 .set W, W0 define 324 .set W_minus_32, W 335 .set W_minus_04, W 336 .set W, W_minus_32 define 357 movdqa W_TMP1, W 379 movdqa W_minus_12, W 380 palignr $8, W_minus_16, W # w[i-14] 383 pxor W_minus_08, W 386 pxor W_TMP1, W 387 movdqa W, W_TMP2 [all …]
|
D | sha512-ssse3-asm.S | 102 # W[t]+K[t] (stack frame) 129 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 133 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 135 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 158 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 159 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message 166 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} 175 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2] 178 movdqa %xmm2, %xmm0 # XMM0 = W[t-2] 183 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15] [all …]
|
D | sha512-avx-asm.S | 78 # W[t] + K[t] | W[t+1] + K[t+1] 105 # W[t]+K[t] (stack frame) 136 add WK_2(idx), T1 # W[t] + K[t] from message scheduler 140 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h 142 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) 164 # Two rounds are computed based on the values for K[t-2]+W[t-2] and 165 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message 172 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} 177 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2] 179 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15] [all …]
|
D | sha256-ssse3-asm.S | 149 ## compute W[-16] + W[-7] 4 at a time 154 palignr $4, X2, XTMP0 # XTMP0 = W[-7] 162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16] 167 palignr $4, X0, XTMP1 # XTMP1 = W[-15] 171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15] 175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15] 186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15] 194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15] 211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3 [all …]
|
D | sha512-avx2-asm.S | 170 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] 172 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] 174 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] 181 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 183 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 227 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 229 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 234 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 236 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} 238 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} [all …]
|
D | sha256-avx-asm.S | 156 ## compute W[-16] + W[-7] 4 at a time 161 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] 168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16] 173 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] 190 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 204 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 216 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 225 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} 228 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 240 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} [all …]
|
/arch/arm/crypto/ |
D | sha1-armv7-neon.S | 92 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 94 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 98 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 102 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 106 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 108 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 112 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 115 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ 119 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument 121 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \ [all …]
|
/arch/arm/kernel/ |
D | hyp-stub.S | 250 __hyp_stub_reset: W(b) . 251 __hyp_stub_und: W(b) . 252 __hyp_stub_svc: W(b) . 253 __hyp_stub_pabort: W(b) . 254 __hyp_stub_dabort: W(b) . 255 __hyp_stub_trap: W(b) __hyp_stub_do_trap 256 __hyp_stub_irq: W(b) . 257 __hyp_stub_fiq: W(b) .
|
/arch/blackfin/mach-common/ |
D | dpmc_modes.S | 21 R1 = W[P0](z); 23 W[P0] = R1.L; 79 W[P3] = R4.L; 108 R6 = W[P0](z); 110 W[P0] = R0.l; /* Set Max VCO to SCLK divider */ 114 R5 = W[P0](z); 116 W[P0] = R0.l; /* Set Min CLKIN to VCO multiplier */ 125 R7 = W[P0](z); 132 W[P0] = R2; /* Set Min Core Voltage */ 146 R0 = W[P0](z); [all …]
|
/arch/arm/lib/ |
D | memmove.S | 88 6: W(nop) 89 W(ldr) r3, [r1, #-4]! 90 W(ldr) r4, [r1, #-4]! 91 W(ldr) r5, [r1, #-4]! 92 W(ldr) r6, [r1, #-4]! 93 W(ldr) r7, [r1, #-4]! 94 W(ldr) r8, [r1, #-4]! 95 W(ldr) lr, [r1, #-4]! 99 W(nop) 100 W(str) r3, [r0, #-4]! [all …]
|
D | bitops.h | 16 ALT_SMP(W(pldw) [r1]) 17 ALT_UP(W(nop)) 43 ALT_SMP(W(pldw) [r1]) 44 ALT_UP(W(nop))
|
/arch/blackfin/lib/ |
D | ins.S | 97 R0 = W[P0]; \ 98 W[P1++] = R0; \ 102 R0 = W[P0]; \ 115 W[P1++] = R0; \ 117 W[P1++] = R0; \
|
D | outs.S | 36 .Lword_loop_s: R0 = W[P1++]; 37 .Lword_loop_e: W[P0] = R0; 66 .Lword8_loop_e: W[P0] = R0;
|
/arch/arm/kvm/hyp/ |
D | hyp-entry.S | 65 W(b) hyp_reset 66 W(b) hyp_undef 67 W(b) hyp_svc 68 W(b) hyp_pabt 69 W(b) hyp_dabt 70 W(b) hyp_hvc 71 W(b) hyp_irq 72 W(b) hyp_fiq
|
/arch/arm/kvm/ |
D | init.S | 51 W(b) . 52 W(b) . 53 W(b) . 54 W(b) . 55 W(b) . 56 W(b) __do_hyp_init 57 W(b) . 58 W(b) .
|
/arch/m68k/fpsp040/ |
D | slogn.S | 436 |--LET V=U*U, W=V*V, CALCULATE 438 |--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] ) 443 fmulx %fp1,%fp1 | ...FP1 IS W 448 fmulx %fp1,%fp3 | ...W*B5 449 fmulx %fp1,%fp2 | ...W*B4 451 faddd LOGB3,%fp3 | ...B3+W*B5 452 faddd LOGB2,%fp2 | ...B2+W*B4 454 fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED 456 fmulx %fp0,%fp2 | ...V*(B2+W*B4) 458 faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5) [all …]
|
/arch/arm/boot/compressed/ |
D | head.S | 905 W(b) __armv4_mmu_cache_on 906 W(b) __armv4_mmu_cache_off 912 W(b) __armv3_mpu_cache_on 913 W(b) __armv3_mpu_cache_off 914 W(b) __armv3_mpu_cache_flush 918 W(b) __armv4_mpu_cache_on 919 W(b) __armv4_mpu_cache_off 920 W(b) __armv4_mpu_cache_flush 924 W(b) __arm926ejs_mmu_cache_on 925 W(b) __armv4_mmu_cache_off [all …]
|
/arch/x86/kernel/kprobes/ |
D | core.c | 74 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro 90 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 91 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ 92 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ 93 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 95 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ 96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ 97 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ 98 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ [all …]
|
/arch/blackfin/kernel/ |
D | trace.c | 403 int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask); in decode_dspLDST_0() local 407 if (W == 0) { in decode_dspLDST_0() 433 if (W == 1) { in decode_dspLDST_0() 465 int W = ((opcode >> LDST_W_bits) & LDST_W_mask); in decode_LDST_0() local 471 if (W == 0) in decode_LDST_0() 495 if (W == 1) in decode_LDST_0() 526 int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask); in decode_LDSTii_0() local 528 if (W == 0) { in decode_LDSTii_0() 560 int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask); in decode_LDSTidxI_0() local 566 if (W == 0) in decode_LDSTidxI_0() [all …]
|
/arch/arm/include/asm/ |
D | unified.h | 47 #define W(instr) instr.w 60 #define W(instr) instr
|
/arch/arm/mm/ |
D | cache-v7.S | 277 ALT_SMP(W(dsb)) 278 ALT_UP(W(nop)) 330 ALT_SMP(W(dsb)) 331 ALT_UP(W(nop)) 358 ALT_SMP(W(dsb)) 359 ALT_UP(W(nop)) 385 ALT_SMP(W(dsb)) 386 ALT_UP(W(nop)) 407 ALT_SMP(W(dsb)) 408 ALT_UP(W(nop))
|
/arch/powerpc/kernel/ |
D | prom_init.c | 643 #define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \ macro 656 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 657 W(0xffff0000), W(0x003e0000), /* POWER6 */ 658 W(0xffff0000), W(0x003f0000), /* POWER7 */ 659 W(0xffff0000), W(0x004b0000), /* POWER8E */ 660 W(0xffff0000), W(0x004c0000), /* POWER8NVL */ 661 W(0xffff0000), W(0x004d0000), /* POWER8 */ 662 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 663 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 664 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ [all …]
|