/arch/x86/crypto/ |
D | serpent-avx-x86_64-asm_64.S | 51 #define S0_1(x0, x1, x2, x3, x4) \ argument 60 #define S0_2(x0, x1, x2, x3, x4) \ argument 70 #define S1_1(x0, x1, x2, x3, x4) \ argument 79 #define S1_2(x0, x1, x2, x3, x4) \ argument 90 #define S2_1(x0, x1, x2, x3, x4) \ argument 99 #define S2_2(x0, x1, x2, x3, x4) \ argument 109 #define S3_1(x0, x1, x2, x3, x4) \ argument 119 #define S3_2(x0, x1, x2, x3, x4) \ argument 130 #define S4_1(x0, x1, x2, x3, x4) \ argument 139 #define S4_2(x0, x1, x2, x3, x4) \ argument [all …]
|
D | serpent-avx2-asm_64.S | 51 #define S0_1(x0, x1, x2, x3, x4) \ argument 60 #define S0_2(x0, x1, x2, x3, x4) \ argument 70 #define S1_1(x0, x1, x2, x3, x4) \ argument 79 #define S1_2(x0, x1, x2, x3, x4) \ argument 90 #define S2_1(x0, x1, x2, x3, x4) \ argument 99 #define S2_2(x0, x1, x2, x3, x4) \ argument 109 #define S3_1(x0, x1, x2, x3, x4) \ argument 119 #define S3_2(x0, x1, x2, x3, x4) \ argument 130 #define S4_1(x0, x1, x2, x3, x4) \ argument 139 #define S4_2(x0, x1, x2, x3, x4) \ argument [all …]
|
D | serpent-sse2-x86_64-asm_64.S | 41 #define S0_1(x0, x1, x2, x3, x4) \ argument 51 #define S0_2(x0, x1, x2, x3, x4) \ argument 61 #define S1_1(x0, x1, x2, x3, x4) \ argument 71 #define S1_2(x0, x1, x2, x3, x4) \ argument 82 #define S2_1(x0, x1, x2, x3, x4) \ argument 92 #define S2_2(x0, x1, x2, x3, x4) \ argument 102 #define S3_1(x0, x1, x2, x3, x4) \ argument 113 #define S3_2(x0, x1, x2, x3, x4) \ argument 124 #define S4_1(x0, x1, x2, x3, x4) \ argument 134 #define S4_2(x0, x1, x2, x3, x4) \ argument [all …]
|
D | serpent-sse2-i586-asm_32.S | 42 #define K(x0, x1, x2, x3, x4, i) \ argument 52 #define LK(x0, x1, x2, x3, x4, i) \ argument 98 #define KL(x0, x1, x2, x3, x4, i) \ argument 137 #define S0(x0, x1, x2, x3, x4) \ argument 156 #define S1(x0, x1, x2, x3, x4) \ argument 176 #define S2(x0, x1, x2, x3, x4) \ argument 195 #define S3(x0, x1, x2, x3, x4) \ argument 216 #define S4(x0, x1, x2, x3, x4) \ argument 235 #define S5(x0, x1, x2, x3, x4) \ argument 254 #define S6(x0, x1, x2, x3, x4) \ argument [all …]
|
D | glue_helper-asm-avx.S | 8 #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 18 #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 28 #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument
|
D | glue_helper-asm-avx2.S | 8 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 18 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 28 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument
|
D | aria-aesni-avx-asm_64.S | 53 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 171 #define inpack16_pre(x0, x1, x2, x3, \ argument 194 #define inpack16_post(x0, x1, x2, x3, \ argument 222 #define write_output(x0, x1, x2, x3, \ argument 244 #define aria_store_state_8way(x0, x1, x2, x3, \ argument 256 #define aria_load_state_8way(x0, x1, x2, x3, \ argument 268 #define aria_ark_8way(x0, x1, x2, x3, \ argument 299 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ argument 323 #define aria_sbox_8way(x0, x1, x2, x3, \ argument 364 #define aria_diff_m(x0, x1, x2, x3, \ argument [all …]
|
D | aria-aesni-avx2-asm_64.S | 69 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 187 #define inpack16_pre(x0, x1, x2, x3, \ argument 210 #define inpack16_post(x0, x1, x2, x3, \ argument 238 #define write_output(x0, x1, x2, x3, \ argument 260 #define aria_store_state_8way(x0, x1, x2, x3, \ argument 272 #define aria_load_state_8way(x0, x1, x2, x3, \ argument 284 #define aria_ark_8way(x0, x1, x2, x3, \ argument 306 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ argument 329 #define aria_sbox_8way(x0, x1, x2, x3, \ argument 405 #define aria_diff_m(x0, x1, x2, x3, \ argument [all …]
|
D | camellia-aesni-avx-asm_64.S | 50 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 212 #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 231 #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ argument 233 #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ argument 244 #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 253 #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 419 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 482 #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 505 #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 528 #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ argument [all …]
|
D | aria-gfni-avx512-asm_64.S | 53 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 171 #define inpack16_pre(x0, x1, x2, x3, \ argument 194 #define inpack16_post(x0, x1, x2, x3, \ argument 222 #define write_output(x0, x1, x2, x3, \ argument 244 #define aria_store_state_8way(x0, x1, x2, x3, \ argument 256 #define aria_load_state_8way(x0, x1, x2, x3, \ argument 268 #define aria_ark_16way(x0, x1, x2, x3, \ argument 307 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ argument 329 #define aria_sbox_16way_gfni(x0, x1, x2, x3, \ argument 366 #define aria_diff_m(x0, x1, x2, x3, \ argument [all …]
|
D | camellia-aesni-avx2-asm_64.S | 62 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument 244 #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 263 #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ argument 265 #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ argument 276 #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 285 #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 451 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument 514 #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 537 #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument 560 #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ argument [all …]
|
D | cast6-avx-x86_64-asm_64.S | 190 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ argument 201 #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ argument 209 #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ argument
|
D | twofish-avx-x86_64-asm_64.S | 204 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ argument 215 #define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ argument 223 #define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ argument
|
D | sm4-aesni-avx2-asm_64.S | 62 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument
|
D | sm4-aesni-avx-asm_64.S | 46 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument
|
D | curve25519-x86_64.c | 988 u64 *x3; in point_add_and_double() local
|
/arch/sparc/lib/ |
D | NG2memcpy.S | 86 #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ argument 105 #define FREG_MOVE_4(x0, x1, x2, x3) \ argument 110 #define FREG_MOVE_5(x0, x1, x2, x3, x4) \ argument 116 #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ argument 123 #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ argument 131 #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ argument 149 #define FREG_LOAD_4(base, x0, x1, x2, x3) \ argument 154 #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ argument 160 #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ argument 167 #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ argument
|
/arch/sparc/include/asm/ |
D | sfp-machine_32.h | 108 #define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 133 #define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 160 #define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y… argument 162 #define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \ argument
|