/arch/arm64/lib/ |
D | crc32.S | 33 crc32\c\()x w8, w0, x3 35 csel w0, w0, w8, eq 38 crc32\c\()w w8, w0, w3 40 csel w0, w0, w8, eq 43 crc32\c\()h w8, w0, w3 45 csel w0, w0, w8, eq 47 crc32\c\()b w8, w0, w3 48 csel w0, w0, w8, eq 50 crc32\c\()x w8, w0, x5 52 csel w0, w0, w8, eq [all …]
|
D | tishift.S | 46 neg w0, w3 68 neg w0, w3
|
D | memcmp.S | 23 #define result w0
|
/arch/powerpc/crypto/ |
D | md5-asm.S | 61 #define R_00_15(a, b, c, d, w0, w1, p, q, off, k0h, k0l, k1h, k1l) \ argument 62 LOAD_DATA(w0, off) /* W */ \ 68 addi w0,w0,k0l; /* 1: wk = w + k */ \ 70 addis w0,w0,k0h; /* 1: wk = w + k' */ \ 72 add a,a,w0; /* 1: a = a + wk */ \ 85 #define R_16_31(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ argument 88 addi w0,w0,k0l; /* 1: wk = w + k */ \ 90 addis w0,w0,k0h; /* 1: wk = w + k' */ \ 93 add a,a,w0; /* 1: a = a + wk */ \ 105 #define R_32_47(a, b, c, d, w0, w1, p, q, k0h, k0l, k1h, k1l) \ argument [all …]
|
D | sha1-spe-asm.S | 106 #define R_00_15(a, b, c, d, e, w0, w1, k, off) \ argument 107 LOAD_DATA(w0, off) /* 1: W */ \ 115 add e,e,w0; /* 1: E = E + W */ \ 127 evmergelo w1,w1,w0; /* mix W[0]/W[1] */ \ 130 #define R_16_19(a, b, c, d, e, w0, w1, w4, w6, w7, k) \ argument 134 evxor w0,w0,rT0; /* W = W[-16] xor W[-3] */ \ 136 evxor w0,w0,w4; /* W = W xor W[-8] */ \ 138 evxor w0,w0,w1; /* W = W xor W[-14] */ \ 140 evrlwi w0,w0,1; /* W = W rotl 1 */ \ 142 evaddw rT0,w0,rK; /* WK = W + K */ \ [all …]
|
D | sha256-spe-asm.S | 156 #define R_CALC_W(a, b, c, d, e, f, g, h, w0, w1, w4, w5, w7, k, off) \ argument 158 evmergelohi rT0,w0,w1; /* w[-15] */ \ 170 evaddw w0,w0,rT0; /* w = w[-16] + s0 */ \ 184 evaddw w0,w0,rT0; /* w = w + s1 */ \ 188 evaddw w0,w0,rT0; /* w = w + w[-7] */ \ 191 evaddw rT1,rT1,w0; /* wk = w + k */ \
|
/arch/arm64/include/asm/ |
D | rwonce.h | 43 asm volatile(__LOAD_RCPC(b, %w0, %1) \ 48 asm volatile(__LOAD_RCPC(h, %w0, %1) \ 53 asm volatile(__LOAD_RCPC(, %w0, %1) \
|
/arch/arm64/kernel/ |
D | reloc_test_syms.S | 15 ldr w0, 0f 22 ldrh w0, [x0] 73 ldr w0, [x1] 81 ldrsh w0, [x1]
|
D | hyp-stub.S | 230 ldr w0, [x1] 231 cmp w0, #BOOT_CPU_MODE_EL2
|
D | head.S | 508 mov w0, #BOOT_CPU_MODE_EL1 552 mov w0, #BOOT_CPU_MODE_EL2 568 cmp w0, #BOOT_CPU_MODE_EL2 571 1: str w0, [x1] // Save CPU boot mode
|
/arch/m68k/lib/ |
D | muldi3.c | 28 #define umul_ppmm(w1, w0, u, v) \ argument 49 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ 54 #define umul_ppmm(w1, w0, u, v) \ argument 56 : "=d" ((USItype)(w0)), \
|
/arch/csky/lib/ |
D | usercopy.c | 81 int w0, w1, w2, w3; in raw_copy_to_user() local 136 : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), in raw_copy_to_user()
|
/arch/sparc/math-emu/ |
D | sfp-util_32.h | 28 #define umul_ppmm(w1, w0, u, v) \ argument 70 "=r" (w0) \
|
/arch/xtensa/include/asm/ |
D | asmmacro.h | 168 .macro __src_b r, w0, w1 170 src \r, \w0, \w1 172 src \r, \w1, \w0
|
/arch/microblaze/lib/ |
D | muldi3.c | 14 #define umul_ppmm(w1, w0, u, v) \ argument 35 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
|
/arch/h8300/lib/ |
D | muldi3.c | 8 #define umul_ppmm(w1, w0, u, v) \ argument 25 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0); \
|
/arch/sh/math-emu/ |
D | sfp-util.h | 22 #define umul_ppmm(w1, w0, u, v) \ argument 24 : "=r" ((u32)(w1)), "=r" ((u32)(w0)) \
|
/arch/arm64/crypto/ |
D | aes-ce-core.S | 72 dup v1.4s, w0 75 umov w0, v0.s[0]
|
D | ghash-ce-core.S | 252 tbnz w0, #0, 2f // skip until #blocks is a 253 tbnz w0, #1, 2f // round multiple of 4 257 sub w0, w0, #4 314 cbz w0, 5f 319 sub w0, w0, #1 343 cbnz w0, 0b 561 smov w0, v0.b[0] // return b0
|
D | sha2-ce-core.S | 155 mov w0, w2
|
D | sha512-ce-core.S | 204 mov w0, w2
|
D | sha1-ce-core.S | 148 mov w0, w2
|
/arch/nios2/lib/ |
D | memcpy.c | 31 #define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2))) argument
|
/arch/nds32/include/asm/ |
D | sfp-machine.h | 88 #define umul_ppmm(w1, w0, u, v) \ argument 109 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0); \
|
/arch/arm64/kvm/hyp/ |
D | hyp-entry.S | 201 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|