/arch/x86/crypto/ |
D | serpent-avx-x86_64-asm_64.S | 55 #define S0_1(x0, x1, x2, x3, x4) \ argument 64 #define S0_2(x0, x1, x2, x3, x4) \ argument 74 #define S1_1(x0, x1, x2, x3, x4) \ argument 83 #define S1_2(x0, x1, x2, x3, x4) \ argument 94 #define S2_1(x0, x1, x2, x3, x4) \ argument 103 #define S2_2(x0, x1, x2, x3, x4) \ argument 113 #define S3_1(x0, x1, x2, x3, x4) \ argument 123 #define S3_2(x0, x1, x2, x3, x4) \ argument 134 #define S4_1(x0, x1, x2, x3, x4) \ argument 143 #define S4_2(x0, x1, x2, x3, x4) \ argument [all …]
|
D | serpent-sse2-x86_64-asm_64.S | 41 #define S0_1(x0, x1, x2, x3, x4) \ argument 51 #define S0_2(x0, x1, x2, x3, x4) \ argument 61 #define S1_1(x0, x1, x2, x3, x4) \ argument 71 #define S1_2(x0, x1, x2, x3, x4) \ argument 82 #define S2_1(x0, x1, x2, x3, x4) \ argument 92 #define S2_2(x0, x1, x2, x3, x4) \ argument 102 #define S3_1(x0, x1, x2, x3, x4) \ argument 113 #define S3_2(x0, x1, x2, x3, x4) \ argument 124 #define S4_1(x0, x1, x2, x3, x4) \ argument 134 #define S4_2(x0, x1, x2, x3, x4) \ argument [all …]
|
D | serpent-avx2-asm_64.S | 61 #define S0_1(x0, x1, x2, x3, x4) \ argument 70 #define S0_2(x0, x1, x2, x3, x4) \ argument 80 #define S1_1(x0, x1, x2, x3, x4) \ argument 89 #define S1_2(x0, x1, x2, x3, x4) \ argument 100 #define S2_1(x0, x1, x2, x3, x4) \ argument 109 #define S2_2(x0, x1, x2, x3, x4) \ argument 119 #define S3_1(x0, x1, x2, x3, x4) \ argument 129 #define S3_2(x0, x1, x2, x3, x4) \ argument 140 #define S4_1(x0, x1, x2, x3, x4) \ argument 149 #define S4_2(x0, x1, x2, x3, x4) \ argument [all …]
|
D | serpent-sse2-i586-asm_32.S | 42 #define K(x0, x1, x2, x3, x4, i) \ argument 52 #define LK(x0, x1, x2, x3, x4, i) \ argument 98 #define KL(x0, x1, x2, x3, x4, i) \ argument 137 #define S0(x0, x1, x2, x3, x4) \ argument 156 #define S1(x0, x1, x2, x3, x4) \ argument 176 #define S2(x0, x1, x2, x3, x4) \ argument 195 #define S3(x0, x1, x2, x3, x4) \ argument 216 #define S4(x0, x1, x2, x3, x4) \ argument 235 #define S5(x0, x1, x2, x3, x4) \ argument 254 #define S6(x0, x1, x2, x3, x4) \ argument [all …]
|
D | glue_helper-asm-avx.S | 8 #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 18 #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 28 #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 44 #define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \ argument 72 #define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 90 #define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \ argument 131 #define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
|
D | glue_helper-asm-avx2.S | 8 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 18 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 28 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument 55 #define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \ argument 88 #define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 118 #define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \ argument 166 #define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
|
D | camellia-aesni-avx-asm_64.S | 51 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 213 #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 232 #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ argument 234 #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ argument 245 #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 254 #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 420 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 483 #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 506 #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 529 #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ argument [all …]
|
D | camellia-aesni-avx2-asm_64.S | 63 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 247 #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 266 #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ argument 268 #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ argument 279 #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 288 #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 454 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 517 #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 540 #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 563 #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ argument [all …]
|
D | twofish-avx-x86_64-asm_64.S | 120 #define round_head_2(a, b, x1, y1, x2, y2) \ argument 209 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ argument 220 #define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ argument 228 #define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ argument
|
D | cast6-avx-x86_64-asm_64.S | 186 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ argument 197 #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ argument 205 #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ argument
|
/arch/sparc/lib/ |
D | NG2memcpy.S | 86 #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ argument 101 #define FREG_MOVE_3(x0, x1, x2) \ argument 105 #define FREG_MOVE_4(x0, x1, x2, x3) \ argument 110 #define FREG_MOVE_5(x0, x1, x2, x3, x4) \ argument 116 #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ argument 123 #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ argument 131 #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ argument 145 #define FREG_LOAD_3(base, x0, x1, x2) \ argument 149 #define FREG_LOAD_4(base, x0, x1, x2, x3) \ argument 154 #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ argument [all …]
|
/arch/sparc/include/asm/ |
D | sfp-machine_32.h | 78 #define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \ argument 93 #define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \ argument 108 #define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 133 #define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 158 #define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0) argument 160 #define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y… argument 162 #define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \ argument
|
/arch/x86/math-emu/ |
D | poly.h | 74 static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2) in add_Xsig_Xsig() 88 static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp) in add_two_Xsig()
|
/arch/x86/kernel/ |
D | mmconf-fam10h_64.c | 35 static int cmp_range(const void *x1, const void *x2) in cmp_range()
|
/arch/ia64/include/asm/ |
D | kprobes.h | 40 unsigned long long x2 : 2; member
|
/arch/alpha/include/asm/ |
D | bitops.h | 301 unsigned long sum, x1, x2, x4; in ffz_b() local
|