/arch/powerpc/crypto/ |
D | md5-asm.S | 69 #define R_00_15(a, b, c, d, w0, w1, p, q, off, k0h, k0l, k1h, k1l) \ argument 70 LOAD_DATA(w0, off) /* W */ \ 76 addi w0,w0,k0l; /* 1: wk = w + k */ \ 78 addis w0,w0,k0h; /* 1: wk = w + k' */ \ 80 add a,a,w0; /* 1: a = a + wk */ \ 93 #define R_16_31(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ argument 96 addi w0,w0,k0l; /* 1: wk = w + k */ \ 98 addis w0,w0,k0h; /* 1: wk = w + k' */ \ 101 add a,a,w0; /* 1: a = a + wk */ \ 113 #define R_32_47(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ argument [all …]
|
D | sha1-spe-asm.S | 111 #define R_00_15(a, b, c, d, e, w0, w1, k, off) \ argument 112 LOAD_DATA(w0, off) /* 1: W */ \ 120 add e,e,w0; /* 1: E = E + W */ \ 132 evmergelo w1,w1,w0; /* mix W[0]/W[1] */ \ 135 #define R_16_19(a, b, c, d, e, w0, w1, w4, w6, w7, k) \ argument 139 evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \ 141 evxor w0,w0,w4; /* W = W xor W[-8] */ \ 143 evxor w0,w0,w1; /* W = W xor W[-14] */ \ 145 evrlwi w0,w0,1; /* W = W rotl 1 */ \ 147 evaddw rT0,w0,rK; /* WK = W + K */ \ [all …]
|
D | sha256-spe-asm.S | 161 #define R_CALC_W(a, b, c, d, e, f, g, h, w0, w1, w4, w5, w7, k, off) \ argument 163 evmergelohi rT0,w0,w1; /* w[-15] */ \ 175 evaddw w0,w0,rT0; /* w = w[-16] + s0 */ \ 189 evaddw w0,w0,rT0; /* w = w + s1 */ \ 193 evaddw w0,w0,rT0; /* w = w + w[-7] */ \ 196 evaddw rT1,rT1,w0; /* wk = w + k */ \
|
/arch/arm64/lib/ |
D | bitops.S | 30 and w3, w0, #63 // Get bit offset 31 eor w0, w0, w3 // Clear low bits 48 and w3, w0, #63 // Get bit offset 49 eor w0, w0, w3 // Clear low bits
|
/arch/arm64/include/asm/ |
D | atomic_lse.h | 32 register int w0 asm ("w0") = i; \ 37 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 52 register int w0 asm ("w0") = i; \ in ATOMIC_OP() 60 : [i] "+r" (w0), [v] "+Q" (v->counter) \ in ATOMIC_OP() 64 return w0; \ in ATOMIC_OP() 84 register int w0 asm ("w0") = i; \ 94 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 98 return w0; \ 110 register int w0 asm ("w0") = i; 120 : [i] "+r" (w0), [v] "+Q" (v->counter) [all …]
|
/arch/m68k/lib/ |
D | muldi3.c | 28 #define umul_ppmm(w1, w0, u, v) \ argument 49 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ 54 #define umul_ppmm(w1, w0, u, v) \ argument 56 : "=d" ((USItype)(w0)), \
|
/arch/arm64/kernel/vdso/ |
D | gettimeofday.S | 185 cmp w0, #JUMPSLOT_MAX 189 add x_tmp, x_tmp, w0, uxtw #2 299 cmp w0, #CLOCK_REALTIME 300 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne 301 ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne 307 cmp w0, #CLOCK_REALTIME_COARSE 308 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne 316 mov w0, wzr
|
/arch/sparc/math-emu/ |
D | sfp-util_32.h | 27 #define umul_ppmm(w1, w0, u, v) \ argument 69 "=r" (w0) \
|
/arch/arm64/crypto/ |
D | ghash-ce-core.S | 45 sub w0, w0, #1 75 cbnz w0, 0b
|
D | sha256-core.S | 352 eor w0,w22,w22,ror#14 358 eor w16,w16,w0,ror#11 // Sigma1(e) 359 ror w0,w26,#2 366 eor w17,w0,w17,ror#13 // Sigma0(a) 373 ldp w15,w0,[x1],#2*4 422 rev w0,w0 // 13 432 add w22,w22,w0 // h+=X[i] 626 add w7,w7,w0 665 str w0,[sp,#4] 672 ror w0,w23,#2 [all …]
|
/arch/microblaze/lib/ |
D | muldi3.c | 13 #define umul_ppmm(w1, w0, u, v) \ argument 34 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
|
/arch/h8300/lib/ |
D | muldi3.c | 7 #define umul_ppmm(w1, w0, u, v) \ argument 24 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0); \
|
/arch/sh/math-emu/ |
D | sfp-util.h | 21 #define umul_ppmm(w1, w0, u, v) \ argument 23 : "=r" ((u32)(w1)), "=r" ((u32)(w0)) \
|
/arch/x86/crypto/ |
D | poly1305-avx2-x86_64.S | 39 #define w0 0x14(%r8) macro 100 # combine r0,u0,w0,y0 102 vmovd w0,t1x 219 # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] 242 # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] 266 # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] 290 # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] 314 # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
|
/arch/nios2/lib/ |
D | memcpy.c | 31 #define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2))) argument
|
/arch/arm64/kernel/ |
D | bpi.S | 62 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
D | head.S | 503 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 600 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 615 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 625 cmp w0, #BOOT_CPU_MODE_EL2 628 1: str w0, [x1] // This CPU has booted in EL1
|
D | entry.S | 102 mov w0, w0 // zero upper 32 bits of x0 880 mov w0, #-1 // set default errno for 887 cmp w0, #-1 // skip the syscall? 889 uxtw scno, w0 // syscall number (possibly new)
|
/arch/xtensa/kernel/ |
D | align.S | 69 .macro __src_b r, w0, w1; src \r, \w0, \w1; .endm 84 .macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
|
/arch/xtensa/lib/ |
D | memcopy.S | 14 .macro src_b r, w0, w1 16 src \r, \w0, \w1 18 src \r, \w1, \w0
|
/arch/ia64/kernel/ |
D | ptrace.c | 153 unsigned long w0, ri = ia64_psr(regs)->ri + 1; in ia64_increment_ip() local 159 get_user(w0, (char __user *) regs->cr_iip + 0); in ia64_increment_ip() 160 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { in ia64_increment_ip() 176 unsigned long w0, ri = ia64_psr(regs)->ri - 1; in ia64_decrement_ip() local 181 get_user(w0, (char __user *) regs->cr_iip + 0); in ia64_decrement_ip() 182 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { in ia64_decrement_ip()
|
/arch/mips/include/asm/sn/ |
D | ioc3.h | 184 u32 w0; /* first word (valid,bcnt,cksum) */ member
|
/arch/arm64/mm/ |
D | proc.S | 210 cpu .req w0
|
/arch/powerpc/xmon/ |
D | xmon.c | 3055 unsigned long w0,w1,w2; in dump_tlb_44x() local 3056 asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i)); in dump_tlb_44x() 3059 printf("[%02x] %08x %08x %08x ", i, w0, w1, w2); in dump_tlb_44x() 3060 if (w0 & PPC44x_TLB_VALID) { in dump_tlb_44x() 3062 w0 & PPC44x_TLB_EPN_MASK, in dump_tlb_44x()
|