/arch/arm/include/asm/ |
D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 54 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 75 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 95 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 114 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
/arch/xtensa/platforms/iss/include/platform/ |
D | simcall-iss.h | 61 register int b1 asm("a3") = b; in __simc() 66 : "+r"(a1), "+r"(b1) in __simc() 69 errno = b1; in __simc()
|
D | simcall-gdbio.h | 22 register int b1 asm("a6") = b; in __simc() 28 : "r"(b1), "r"(d1) in __simc()
|
/arch/s390/net/ |
D | bpf_jit_comp.c | 111 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) in reg_set_seen() argument 113 u32 r1 = reg2hex[b1]; in reg_set_seen() 119 #define REG_SET_SEEN(b1) \ argument 121 reg_set_seen(jit, b1); \ 124 #define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]] argument 137 #define EMIT2(op, b1, b2) \ argument 139 _EMIT2((op) | reg(b1, b2)); \ 140 REG_SET_SEEN(b1); \ 151 #define EMIT4(op, b1, b2) \ argument 153 _EMIT4((op) | reg(b1, b2)); \ [all …]
|
/arch/arm/nwfpe/ |
D | softfloat-macros | 339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so 346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 350 z1 = a1 + b1; 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 371 bits64 b1, 383 z1 = a1 + b1; 397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the 406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); [all …]
|
/arch/arm/crypto/ |
D | aes-neonbs-core.S | 80 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 81 veor \b2, \b2, \b1 92 veor \b3, \b3, \b1 93 veor \b1, \b1, \b5 96 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 98 veor \b1, \b1, \b4 101 veor \b6, \b6, \b1 102 veor \b1, \b1, \b5 110 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 111 veor \b1, \b1, \b7 [all …]
|
D | blake2s-core.S | 68 .macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3 75 add \a1, \a1, \b1, ror #brot 89 eor \b1, \c1, \b1, ror #brot 96 add \a1, \a1, \b1, ror #12 110 eor \b1, \c1, \b1, ror#12
|
D | chacha-scalar-core.S | 71 .macro _halfround a1, b1, c1, d1, a2, b2, c2, d2 74 add \a1, \a1, \b1, ror #brot 83 eor \b1, \c1, \b1, ror #brot 88 add \a1, \a1, \b1, ror #20 97 eor \b1, \c1, \b1, ror #20
|
D | ghash-ce-core.S | 95 .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4 109 .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l argument 111 .ifc \b1, t4l 116 vmull.p8 t4q, \ad, \b1 @ E = A*B1 282 __pmull_\pn XH, XL_H, SHASH_H, s1h, s2h, s3h, s4h @ a1 * b1 285 __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
|
/arch/x86/crypto/ |
D | cast6-avx-x86_64-asm_64.S | 125 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 126 F_head(b1, RX, RGI1, RGI2, op0); \ 129 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 135 #define F1_2(a1, b1, a2, b2) \ argument 136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 137 #define F2_2(a1, b1, a2, b2) \ argument 138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 139 #define F3_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
D | cast5-avx-x86_64-asm_64.S | 125 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 126 F_head(b1, RX, RGI1, RGI2, op0); \ 129 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 135 #define F1_2(a1, b1, a2, b2) \ argument 136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 137 #define F2_2(a1, b1, a2, b2) \ argument 138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 139 #define F3_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 142 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
D | ghash-clmulni-intel_asm.S | 54 pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1 55 pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0) 57 pxor T1, T2 # T2 = a0 * b1 + a1 * b0
|
D | camellia-aesni-avx-asm_64.S | 432 #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ argument 437 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 451 vpshufb a0, b1, b1; \ 469 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 474 vmovdqu b1, st1; \ 475 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 476 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 478 vmovdqu st1, b1; \
|
D | curve25519-x86_64.c | 782 u64 *b1; in point_add_and_double() local 799 b1 = tmp1 + (u32)4U; in point_add_and_double() 811 fmul_scalar(b1, c, (u64)121665U); in point_add_and_double() 812 fadd(b1, b1, d); in point_add_and_double()
|
D | aesni-intel_asm.S | 515 pxor \HK, \TMP3 # TMP3 = b1+b0 516 pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1 518 pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) 992 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 1011 pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) 1026 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 1039 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1056 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1069 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1087 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 [all …]
|
/arch/arm64/crypto/ |
D | aes-neonbs-core.S | 25 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 26 eor \b2, \b2, \b1 37 eor \b3, \b3, \b1 38 eor \b1, \b1, \b5 41 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 43 eor \b1, \b1, \b4 46 eor \b6, \b6, \b1 47 eor \b1, \b1, \b5 55 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 56 eor \b1, \b1, \b7 [all …]
|
/arch/ia64/kernel/ |
D | module.c | 247 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1]; in plt_target() local 250 b0 = b[0]; b1 = b[1]; in plt_target() 251 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */ in plt_target() 252 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */ in plt_target() 253 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */ in plt_target() 296 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0]; in plt_target() local 298 b0 = b[0]; b1 = b[1]; in plt_target() 299 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */ in plt_target() 300 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */ in plt_target() 301 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */ in plt_target() [all …]
|
D | patch.c | 47 u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); in ia64_patch() local 51 b0 = b[0]; b1 = b[1]; in ia64_patch() 61 b[1] = (b1 & ~m1) | (v1 & m1); in ia64_patch()
|
/arch/arm64/boot/dts/amd/ |
D | Makefile | 3 amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
|
/arch/alpha/include/asm/ |
D | bitops.h | 445 unsigned long b0, b1, ofs, tmp; in sched_find_first_bit() local 448 b1 = b[1]; in sched_find_first_bit() 450 tmp = (b0 ? b0 : b1); in sched_find_first_bit()
|
/arch/sh/kernel/cpu/sh4/ |
D | softfloat.c | 90 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 92 void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 638 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in add128() argument 643 z1 = a1 + b1; in add128() 649 sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in sub128() argument 652 *z1Ptr = a1 - b1; in sub128() 653 *z0Ptr = a0 - b0 - (a1 < b1); in sub128() 658 bits64 b0, b1; in estimateDiv128To64() local 672 b1 = b << 32; in estimateDiv128To64() 673 add128(rem0, rem1, b0, b1, &rem0, &rem1); in estimateDiv128To64()
|
/arch/x86/net/ |
D | bpf_jit_comp.c | 35 #define EMIT1(b1) EMIT(b1, 1) argument 36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) argument 40 #define EMIT1_off32(b1, off) \ argument 41 do { EMIT1(b1); EMIT(off, 4); } while (0) 42 #define EMIT2_off32(b1, b2, off) \ argument 43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 44 #define EMIT3_off32(b1, b2, b3, off) \ argument 45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) [all …]
|
D | bpf_jit_comp32.c | 67 #define EMIT1(b1) EMIT(b1, 1) argument 68 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 69 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 70 #define EMIT4(b1, b2, b3, b4) \ argument 71 EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 73 #define EMIT1_off32(b1, off) \ argument 74 do { EMIT1(b1); EMIT(off, 4); } while (0) 75 #define EMIT2_off32(b1, b2, off) \ argument 76 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 77 #define EMIT3_off32(b1, b2, b3, off) \ argument [all …]
|
/arch/powerpc/crypto/ |
D | aes-tab-4k.S | 36 .long R(de, 6f, 6f, b1), R(91, c5, c5, 54) 76 .long R(79, b1, b1, c8), R(b6, 5b, 5b, ed) 123 .long R(01, 8d, 8d, 8c), R(b1, d5, d5, 64) 172 .long R(de, b1, 5a, 49), R(25, ba, 1b, 67) 186 .long R(b1, 64, 77, e0), R(bb, 6b, ae, 84) 220 .long R(0c, 0a, 67, b1), R(93, 57, e7, 0f) 279 .long R(e1, 1c, e5, ed), R(7a, 47, b1, 3c)
|
/arch/ia64/include/uapi/asm/ |
D | ptrace.h | 193 unsigned long b1; member
|