/arch/c6x/lib/ |
D | divremu.S | 36 [b1] lmbd .l2 1, B4, B1 37 ||[!b1] b .s2 B3 ; RETURN A 38 ||[!b1] mvk .d2 1, B4 40 ||[!b1] zero .s1 A5 52 || [b1] subc .l1x A4,B4,A4 53 || [b1] add .s2 -1, B1, B1 54 [b1] subc .l1x A4,B4,A4 55 || [b1] add .s2 -1, B1, B1 60 || [b1] subc .l1x A4,B4,A4 61 || [b1] add .s2 -1, B1, B1 [all …]
|
/arch/s390/net/ |
D | bpf_jit_comp.c | 113 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) in reg_set_seen() argument 115 u32 r1 = reg2hex[b1]; in reg_set_seen() 121 #define REG_SET_SEEN(b1) \ argument 123 reg_set_seen(jit, b1); \ 126 #define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]] argument 139 #define EMIT2(op, b1, b2) \ argument 141 _EMIT2(op | reg(b1, b2)); \ 142 REG_SET_SEEN(b1); \ 153 #define EMIT4(op, b1, b2) \ argument 155 _EMIT4(op | reg(b1, b2)); \ [all …]
|
/arch/arm/include/asm/ |
D | xor.h | 29 : "=r" (src), "=r" (b1), "=r" (b2) \ 31 __XOR(a1, b1); __XOR(a2, b2); 35 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 37 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 57 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 78 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 98 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 117 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
/arch/arm/nwfpe/ |
D | softfloat-macros | 339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so 346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 350 z1 = a1 + b1; 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 371 bits64 b1, 383 z1 = a1 + b1; 397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the 406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); [all …]
|
/arch/x86/net/ |
D | bpf_jit_comp.c | 45 #define EMIT1(b1) EMIT(b1, 1) argument 46 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 47 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 48 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) argument 49 #define EMIT1_off32(b1, off) \ argument 50 do {EMIT1(b1); EMIT(off, 4); } while (0) 51 #define EMIT2_off32(b1, b2, off) \ argument 52 do {EMIT2(b1, b2); EMIT(off, 4); } while (0) 53 #define EMIT3_off32(b1, b2, b3, off) \ argument 54 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) [all …]
|
/arch/x86/crypto/ |
D | cast5-avx-x86_64-asm_64.S | 140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 141 F_head(b1, RX, RGI1, RGI2, op0); \ 144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 150 #define F1_2(a1, b1, a2, b2) \ argument 151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 152 #define F2_2(a1, b1, a2, b2) \ argument 153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 154 #define F3_2(a1, b1, a2, b2) \ argument 155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 157 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
D | ghash-clmulni-intel_asm.S | 59 PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1 60 PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0) 62 pxor T1, T2 # T2 = a0 * b1 + a1 * b0
|
D | cast6-avx-x86_64-asm_64.S | 140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 141 F_head(b1, RX, RGI1, RGI2, op0); \ 144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 150 #define F1_2(a1, b1, a2, b2) \ argument 151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 152 #define F2_2(a1, b1, a2, b2) \ argument 153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 154 #define F3_2(a1, b1, a2, b2) \ argument 155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
D | aesni-intel_asm.S | 170 pxor \HK, \TMP3 # TMP3 = b1+b0 171 PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1 173 PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) 680 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 699 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) 714 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 727 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 744 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 757 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 775 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 [all …]
|
D | camellia-aesni-avx-asm_64.S | 433 #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ argument 438 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 452 vpshufb a0, b1, b1; \ 470 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 475 vmovdqu b1, st1; \ 476 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 477 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 479 vmovdqu st1, b1; \
|
/arch/xtensa/platforms/iss/include/platform/ |
D | simcall.h | 66 register int b1 asm("a3") = b; in __simc() 73 : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1) in __simc()
|
/arch/ia64/kernel/ |
D | module.c | 245 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1]; in plt_target() local 248 b0 = b[0]; b1 = b[1]; in plt_target() 249 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */ in plt_target() 250 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */ in plt_target() 251 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */ in plt_target() 294 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0]; in plt_target() local 296 b0 = b[0]; b1 = b[1]; in plt_target() 297 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */ in plt_target() 298 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */ in plt_target() 299 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */ in plt_target() [all …]
|
D | patch.c | 46 u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); in ia64_patch() local 50 b0 = b[0]; b1 = b[1]; in ia64_patch() 60 b[1] = (b1 & ~m1) | (v1 & m1); in ia64_patch()
|
/arch/arm64/boot/dts/amd/ |
D | Makefile | 2 amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
|
/arch/blackfin/include/asm/ |
D | context.S | 43 [--sp] = b1; 115 [--sp] = b1; 174 [--sp] = b1; 253 b1 = [sp++]; define 323 b1 = [sp++]; define
|
/arch/alpha/include/asm/ |
D | bitops.h | 444 unsigned long b0, b1, ofs, tmp; in sched_find_first_bit() local 447 b1 = b[1]; in sched_find_first_bit() 449 tmp = (b0 ? b0 : b1); in sched_find_first_bit()
|
/arch/arm/crypto/ |
D | ghash-ce-core.S | 70 vmull.p64 XH, SHASH_H, XL_H @ a1 * b1 73 vmull.p64 XM, SHASH2_L, T1_L @ (a1 + a0)(b1 + b0)
|
/arch/sh/kernel/cpu/sh4/ |
D | softfloat.c | 90 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 92 void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 638 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in add128() argument 643 z1 = a1 + b1; in add128() 649 sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in sub128() argument 652 *z1Ptr = a1 - b1; in sub128() 653 *z0Ptr = a0 - b0 - (a1 < b1); in sub128() 658 bits64 b0, b1; in estimateDiv128To64() local 672 b1 = b << 32; in estimateDiv128To64() 673 add128(rem0, rem1, b0, b1, &rem0, &rem1); in estimateDiv128To64()
|
/arch/blackfin/include/uapi/asm/ |
D | ptrace.h | 55 long b1; member
|
/arch/powerpc/crypto/ |
D | aes-tab-4k.S | 41 .long R(de, 6f, 6f, b1), R(91, c5, c5, 54) 81 .long R(79, b1, b1, c8), R(b6, 5b, 5b, ed) 128 .long R(01, 8d, 8d, 8c), R(b1, d5, d5, 64) 177 .long R(de, b1, 5a, 49), R(25, ba, 1b, 67) 191 .long R(b1, 64, 77, e0), R(bb, 6b, ae, 84) 225 .long R(0c, 0a, 67, b1), R(93, 57, e7, 0f) 284 .long R(e1, 1c, e5, ed), R(7a, 47, b1, 3c)
|
/arch/ia64/include/uapi/asm/ |
D | ptrace.h | 192 unsigned long b1; member
|
/arch/blackfin/kernel/ |
D | signal.c | 68 RESTORE(b0); RESTORE(b1); RESTORE(b2); RESTORE(b3); in rt_restore_sigcontext() 129 SETUP(b0); SETUP(b1); SETUP(b2); SETUP(b3); in rt_setup_sigcontext()
|
/arch/c6x/kernel/ |
D | signal.c | 47 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9); in restore_sigcontext() 112 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9); in setup_sigcontext()
|
/arch/x86/crypto/sha1-mb/ |
D | sha1_x8_avx2.S | 73 # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} 83 # r1 = {h1 g1 f1 e1 d1 c1 b1 a1} 94 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} 98 vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
|
/arch/c6x/include/uapi/asm/ |
D | ptrace.h | 133 REG_PAIR(b1, b0);
|