/arch/arm/include/asm/ |
D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 55 register unsigned int b2 __asm__("r9"); in xor_arm4regs_2() 76 register unsigned int b2 __asm__("r9"); in xor_arm4regs_3() 96 register unsigned int b2 __asm__("lr"); in xor_arm4regs_4() 115 register unsigned int b2 __asm__("lr"); in xor_arm4regs_5()
|
/arch/x86/net/ |
D | bpf_jit_comp.c | 33 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 34 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 35 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) argument 39 #define EMIT2_off32(b1, b2, off) \ argument 40 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 41 #define EMIT3_off32(b1, b2, b3, off) \ argument 42 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 43 #define EMIT4_off32(b1, b2, b3, b4, off) \ argument 44 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 301 u8 b1, b2, b3; in emit_mov_imm32() local [all …]
|
D | bpf_jit_comp32.c | 67 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 68 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 69 #define EMIT4(b1, b2, b3, b4) \ argument 70 EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 74 #define EMIT2_off32(b1, b2, off) \ argument 75 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 76 #define EMIT3_off32(b1, b2, b3, off) \ argument 77 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 78 #define EMIT4_off32(b1, b2, b3, b4, off) \ argument 79 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) [all …]
|
/arch/x86/lib/ |
D | insn.c | 140 insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); in insn_get_prefixes() local 147 if (X86_MODRM_MOD(b2) != 3) in insn_get_prefixes() 151 insn->vex_prefix.bytes[1] = b2; in insn_get_prefixes() 153 b2 = peek_nbyte_next(insn_byte_t, insn, 2); in insn_get_prefixes() 154 insn->vex_prefix.bytes[2] = b2; in insn_get_prefixes() 155 b2 = peek_nbyte_next(insn_byte_t, insn, 3); in insn_get_prefixes() 156 insn->vex_prefix.bytes[3] = b2; in insn_get_prefixes() 159 if (insn->x86_64 && X86_VEX_W(b2)) in insn_get_prefixes() 163 b2 = peek_nbyte_next(insn_byte_t, insn, 2); in insn_get_prefixes() 164 insn->vex_prefix.bytes[2] = b2; in insn_get_prefixes() [all …]
|
/arch/s390/include/asm/ |
D | vx-insn.h | 263 GR_NUM b2, "%r0" 266 .word (b2 << 12) | (\disp) 295 GR_NUM b2, \base 297 .word (b2 << 12) | (\disp) 305 GR_NUM b2, \base 307 .word (b2 << 12) | (\disp) 346 GR_NUM b2, \base 349 .word (b2 << 12) | (\disp) 369 GR_NUM b2, \base /* Base register */ 371 .word (b2 << 12) | (\disp) [all …]
|
/arch/s390/net/ |
D | bpf_jit_comp.c | 139 #define EMIT2(op, b1, b2) \ argument 141 _EMIT2(op | reg(b1, b2)); \ 143 REG_SET_SEEN(b2); \ 153 #define EMIT4(op, b1, b2) \ argument 155 _EMIT4(op | reg(b1, b2)); \ 157 REG_SET_SEEN(b2); \ 160 #define EMIT4_RRF(op, b1, b2, b3) \ argument 162 _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \ 164 REG_SET_SEEN(b2); \ 174 #define EMIT4_DISP(op, b1, b2, disp) \ argument [all …]
|
/arch/arm/crypto/ |
D | aes-neonbs-core.S | 85 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 86 veor \b2, \b2, \b1 89 veor \b6, \b6, \b2 96 veor \b2, \b2, \b7 101 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 105 veor \b2, \b2, \b0 111 veor \b2, \b2, \b5 115 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 120 veor \b2, \b2, \b5 123 veor \b2, \b2, \b0 [all …]
|
D | ghash-ce-core.S | 93 .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4 107 .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l 115 .ifc \b2, t3l 120 vmull.p8 t3q, \ad, \b2 @ G = A*B2
|
/arch/x86/crypto/ |
D | cast5-avx-x86_64-asm_64.S | 125 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 127 F_head(b2, RX, RGI3, RGI4, op0); \ 130 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 135 #define F1_2(a1, b1, a2, b2) \ argument 136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 137 #define F2_2(a1, b1, a2, b2) \ argument 138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 139 #define F3_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 142 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
D | cast6-avx-x86_64-asm_64.S | 125 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 127 F_head(b2, RX, RGI3, RGI4, op0); \ 130 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 135 #define F1_2(a1, b1, a2, b2) \ argument 136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 137 #define F2_2(a1, b1, a2, b2) \ argument 138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 139 #define F3_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
D | camellia-aesni-avx-asm_64.S | 433 #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ argument 438 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 453 vpshufb a0, b2, b2; \ 476 transpose_4x4(a2, b2, c2, d2, b0, b1); \
|
D | camellia-aesni-avx2-asm_64.S | 467 #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ argument 472 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 487 vpshufb a0, b2, b2; \ 510 transpose_4x4(a2, b2, c2, d2, b0, b1); \
|
/arch/arm64/crypto/ |
D | aes-neonbs-core.S | 25 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 26 eor \b2, \b2, \b1 29 eor \b6, \b6, \b2 36 eor \b2, \b2, \b7 41 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 45 eor \b2, \b2, \b0 51 eor \b2, \b2, \b5 55 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 60 eor \b2, \b2, \b5 63 eor \b2, \b2, \b0 [all …]
|
/arch/powerpc/crypto/ |
D | aes-tab-4k.S | 43 .long R(ef, fa, fa, 15), R(b2, 59, 59, eb) 64 .long R(7f, b2, b2, cd), R(ea, 75, 75, 9f) 67 .long R(36, 1b, 1b, 2d), R(dc, 6e, 6e, b2) 192 .long R(b2, eb, 28, 07), R(2f, b5, c2, 03) 194 .long R(30, 28, 87, f2), R(23, bf, a5, b2) 238 .long R(1d, 9e, 2f, 4b), R(dc, b2, 30, f3) 262 .long R(31, a4, b2, af), R(2a, 3f, 23, 31)
|
/arch/ia64/include/uapi/asm/ |
D | ptrace.h | 194 unsigned long b2; member
|
/arch/c6x/kernel/ |
D | signal.c | 44 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9); in restore_sigcontext() 109 COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9); in setup_sigcontext()
|
D | asm-offsets.c | 69 OFFSET(REGS_B2, pt_regs, b2); in foo()
|
/arch/arm/nwfpe/ |
D | softfloat-macros | 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 372 bits64 b2, 381 z2 = a2 + b2; 416 Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' 430 bits64 b2, 439 z2 = a2 - b2; 440 borrow1 = ( a2 < b2 );
|
/arch/c6x/include/uapi/asm/ |
D | ptrace.h | 135 REG_PAIR(b3, b2);
|
/arch/ia64/kernel/ |
D | entry.h | 58 .spillsp b2,SW(B2)+16+(off); .spillsp b3,SW(B3)+16+(off); \
|
D | head.S | 111 SAVE_FROM_REG(b2,_reg1,_reg2);; \ 1069 SET_REG(b2); 1092 mov b2=r18 // doing tlb_flush work 1105 br.sptk.many b2;; // jump to tlb purge code 1111 RESTORE_REG(b2, r25, r17);;
|
D | relocate_kernel.S | 219 mov r4=b2
|
/arch/x86/kernel/ |
D | uprobes.c | 46 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
/arch/x86/kernel/kprobes/ |
D | core.c | 61 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 62 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
/arch/s390/kvm/ |
D | vsie.c | 289 unsigned long *b1, *b2; in shadow_crycb() local 340 b2 = (unsigned long *) in shadow_crycb() 343 bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); in shadow_crycb()
|