/arch/arm/include/asm/ |
D | xor.h | 29 : "=r" (src), "=r" (b1), "=r" (b2) \ 31 __XOR(a1, b1); __XOR(a2, b2); 35 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 37 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 58 register unsigned int b2 __asm__("r9"); in xor_arm4regs_2() 79 register unsigned int b2 __asm__("r9"); in xor_arm4regs_3() 99 register unsigned int b2 __asm__("lr"); in xor_arm4regs_4() 118 register unsigned int b2 __asm__("lr"); in xor_arm4regs_5()
|
/arch/x86/net/ |
D | bpf_jit_comp.c | 46 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 47 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 48 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) argument 51 #define EMIT2_off32(b1, b2, off) \ argument 52 do {EMIT2(b1, b2); EMIT(off, 4); } while (0) 53 #define EMIT3_off32(b1, b2, b3, off) \ argument 54 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 55 #define EMIT4_off32(b1, b2, b3, b4, off) \ argument 56 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 372 u8 b1 = 0, b2 = 0, b3 = 0; in do_jit() local [all …]
|
/arch/x86/lib/ |
D | insn.c | 153 insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); in insn_get_prefixes() local 160 if (X86_MODRM_MOD(b2) != 3) in insn_get_prefixes() 164 insn->vex_prefix.bytes[1] = b2; in insn_get_prefixes() 166 b2 = peek_nbyte_next(insn_byte_t, insn, 2); in insn_get_prefixes() 167 insn->vex_prefix.bytes[2] = b2; in insn_get_prefixes() 168 b2 = peek_nbyte_next(insn_byte_t, insn, 3); in insn_get_prefixes() 169 insn->vex_prefix.bytes[3] = b2; in insn_get_prefixes() 172 if (insn->x86_64 && X86_VEX_W(b2)) in insn_get_prefixes() 176 b2 = peek_nbyte_next(insn_byte_t, insn, 2); in insn_get_prefixes() 177 insn->vex_prefix.bytes[2] = b2; in insn_get_prefixes() [all …]
|
/arch/s390/net/ |
D | bpf_jit_comp.c | 139 #define EMIT2(op, b1, b2) \ argument 141 _EMIT2(op | reg(b1, b2)); \ 143 REG_SET_SEEN(b2); \ 153 #define EMIT4(op, b1, b2) \ argument 155 _EMIT4(op | reg(b1, b2)); \ 157 REG_SET_SEEN(b2); \ 160 #define EMIT4_RRF(op, b1, b2, b3) \ argument 162 _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \ 164 REG_SET_SEEN(b2); \ 174 #define EMIT4_DISP(op, b1, b2, disp) \ argument [all …]
|
/arch/s390/include/asm/ |
D | vx-insn.h | 262 GR_NUM b2, "%r0" 265 .word (b2 << 12) | (\disp) 294 GR_NUM b2, \base 296 .word (b2 << 12) | (\disp) 304 GR_NUM b2, \base 306 .word (b2 << 12) | (\disp) 345 GR_NUM b2, \base 348 .word (b2 << 12) | (\disp) 368 GR_NUM b2, \base /* Base register */ 370 .word (b2 << 12) | (\disp) [all …]
|
/arch/x86/crypto/ |
D | cast5-avx-x86_64-asm_64.S | 140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 142 F_head(b2, RX, RGI3, RGI4, op0); \ 145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 150 #define F1_2(a1, b1, a2, b2) \ argument 151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 152 #define F2_2(a1, b1, a2, b2) \ argument 153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 154 #define F3_2(a1, b1, a2, b2) \ argument 155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 157 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
D | cast6-avx-x86_64-asm_64.S | 140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 142 F_head(b2, RX, RGI3, RGI4, op0); \ 145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 150 #define F1_2(a1, b1, a2, b2) \ argument 151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 152 #define F2_2(a1, b1, a2, b2) \ argument 153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 154 #define F3_2(a1, b1, a2, b2) \ argument 155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
D | camellia-aesni-avx-asm_64.S | 433 #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ argument 438 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 453 vpshufb a0, b2, b2; \ 476 transpose_4x4(a2, b2, c2, d2, b0, b1); \
|
/arch/blackfin/include/asm/ |
D | context.S | 44 [--sp] = b2; 116 [--sp] = b2; 175 [--sp] = b2; 252 b2 = [sp++]; define 322 b2 = [sp++]; define
|
/arch/blackfin/include/uapi/asm/ |
D | ptrace.h | 54 long b2; member
|
/arch/powerpc/crypto/ |
D | aes-tab-4k.S | 48 .long R(ef, fa, fa, 15), R(b2, 59, 59, eb) 69 .long R(7f, b2, b2, cd), R(ea, 75, 75, 9f) 72 .long R(36, 1b, 1b, 2d), R(dc, 6e, 6e, b2) 197 .long R(b2, eb, 28, 07), R(2f, b5, c2, 03) 199 .long R(30, 28, 87, f2), R(23, bf, a5, b2) 243 .long R(1d, 9e, 2f, 4b), R(dc, b2, 30, f3) 267 .long R(31, a4, b2, af), R(2a, 3f, 23, 31)
|
/arch/ia64/include/uapi/asm/ |
D | ptrace.h | 193 unsigned long b2; member
|
/arch/blackfin/kernel/ |
D | signal.c | 68 RESTORE(b0); RESTORE(b1); RESTORE(b2); RESTORE(b3); in rt_restore_sigcontext() 129 SETUP(b0); SETUP(b1); SETUP(b2); SETUP(b3); in rt_setup_sigcontext()
|
D | kgdb.c | 42 gdb_regs[BFIN_B2] = regs->b2; in pt_regs_to_gdb_regs() 118 regs->b2 = gdb_regs[BFIN_B2]; in gdb_regs_to_pt_regs()
|
/arch/c6x/kernel/ |
D | signal.c | 47 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9); in restore_sigcontext() 112 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9); in setup_sigcontext()
|
D | asm-offsets.c | 68 OFFSET(REGS_B2, pt_regs, b2); in foo()
|
/arch/arm/nwfpe/ |
D | softfloat-macros | 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 372 bits64 b2, 381 z2 = a2 + b2; 416 Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' 430 bits64 b2, 439 z2 = a2 - b2; 440 borrow1 = ( a2 < b2 );
|
/arch/x86/crypto/sha1-mb/ |
D | sha1_x8_avx2.S | 73 # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} 84 # r2 = {h2 g2 f2 e2 d2 c2 b2 a2} 95 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} 99 vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
|
/arch/c6x/include/uapi/asm/ |
D | ptrace.h | 134 REG_PAIR(b3, b2);
|
/arch/ia64/kernel/ |
D | entry.h | 57 .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off); \
|
D | head.S | 110 SAVE_FROM_REG(b2,_reg1,_reg2);; \ 1068 SET_REG(b2); 1091 mov b2=r18 // doing tlb_flush work 1104 br.sptk.many b2;; // jump to tlb purge code 1110 RESTORE_REG(b2, r25, r17);;
|
/arch/x86/crypto/sha512-mb/ |
D | sha512_x4_avx2.S | 117 # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} 123 # r1 = {d3 d2 c3 c2 b3 b2 a3 a2} 129 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
|
/arch/x86/crypto/sha256-mb/ |
D | sha256_x8_avx2.S | 129 # r1 = {b7 b6 b5 b4 b3 b2 b1 b0} 140 # r2 = {h2 g2 f2 e2 d2 c2 b2 a2} 151 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} 155 vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
|
/arch/x86/kernel/ |
D | uprobes.c | 59 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 60 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
/arch/blackfin/mach-common/ |
D | interrupt.S | 58 [--sp] = b2;
|