/arch/arm/include/asm/ |
D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 54 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 75 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 95 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 114 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
/arch/arm/nwfpe/ |
D | softfloat-macros | 339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so 346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 350 z1 = a1 + b1; 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 371 bits64 b1, 383 z1 = a1 + b1; 397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the 406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); [all …]
|
/arch/alpha/include/asm/ |
D | bitops.h | 446 unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; in sched_find_first_bit() local 449 ofs = (b1 ? 64 : 128); in sched_find_first_bit() 450 b1 = (b1 ? b1 : b2); in sched_find_first_bit() 452 b0 = (b0 ? b0 : b1); in sched_find_first_bit()
|
/arch/arm/common/ |
D | uengine.c | 307 u8 b1; in generate_ucode() local 312 b1 = (gpr_a[i] >> 8) & 0xff; in generate_ucode() 318 ucode[offset++] = (b1 >> 4); in generate_ucode() 319 ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6); in generate_ucode() 335 u8 b1; in generate_ucode() local 340 b1 = (gpr_b[i] >> 8) & 0xff; in generate_ucode() 346 ucode[offset++] = (b1 >> 4); in generate_ucode() 347 ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6); in generate_ucode()
|
/arch/blackfin/kernel/ |
D | mcount.S | 30 [--sp] = b1; 55 b1 = [sp++]; define
|
D | signal.c | 94 RESTORE(b0); RESTORE(b1); RESTORE(b2); RESTORE(b3); in rt_restore_sigcontext() 159 SETUP(b0); SETUP(b1); SETUP(b2); SETUP(b3); in rt_setup_sigcontext()
|
D | asm-offsets.c | 100 DEFINE(PT_B1, offsetof(struct pt_regs, b1)); in main()
|
D | kgdb.c | 73 gdb_regs[BFIN_B1] = regs->b1; in pt_regs_to_gdb_regs() 149 regs->b1 = gdb_regs[BFIN_B1]; in gdb_regs_to_pt_regs()
|
/arch/ia64/kernel/ |
D | module.c | 242 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1]; in plt_target() local 245 b0 = b[0]; b1 = b[1]; in plt_target() 246 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */ in plt_target() 247 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */ in plt_target() 248 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */ in plt_target() 291 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0]; in plt_target() local 293 b0 = b[0]; b1 = b[1]; in plt_target() 294 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */ in plt_target() 295 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */ in plt_target() 296 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */ in plt_target() [all …]
|
D | patch.c | 47 u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); in ia64_patch() local 51 b0 = b[0]; b1 = b[1]; in ia64_patch() 61 b[1] = (b1 & ~m1) | (v1 & m1); in ia64_patch()
|
D | entry.h | 56 .spillsp b0,SW(B0)+16+(off); .spillsp b1,SW(B1)+16+(off); \
|
D | head.S | 110 SAVE_FROM_REG(b1,_reg1,_reg2);; \ 411 (p7) mov b1=r9 414 (p7) br.call.sptk.many rp=b1 1118 SET_REG(b1); 1228 mov b1=r18 // Return location 1249 RESTORE_REG(b1, r25, r17);;
|
D | relocate_kernel.S | 217 mov r5=b1;
|
D | mca_asm.S | 132 br.sptk.many b1 161 mov b1=r18;;
|
D | asm-offsets.c | 165 DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1)); in foo()
|
/arch/blackfin/include/asm/ |
D | context.S | 66 [--sp] = b1; 133 [--sp] = b1; 192 [--sp] = b1; 270 b1 = [sp++]; define 333 b1 = [sp++]; define
|
D | user.h | 47 long b0, b1, b2, b3; member
|
D | ptrace.h | 47 long b1; member
|
/arch/sh/kernel/cpu/sh4/ |
D | softfloat.c | 90 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 92 void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 638 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in add128() argument 643 z1 = a1 + b1; in add128() 649 sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in sub128() argument 652 *z1Ptr = a1 - b1; in sub128() 653 *z0Ptr = a0 - b0 - (a1 < b1); in sub128() 658 bits64 b0, b1; in estimateDiv128To64() local 672 b1 = b << 32; in estimateDiv128To64() 673 add128(rem0, rem1, b0, b1, &rem0, &rem1); in estimateDiv128To64()
|
/arch/mips/pmc-sierra/yosemite/ |
D | ht.c | 118 unsigned char b1, b2, b3, b4; in longswap() local 120 b1 = l&255; in longswap() 125 return ((b1<<24) + (b2<<16) + (b3<<8) + b4); in longswap()
|
/arch/ia64/include/asm/ |
D | ptrace.h | 217 unsigned long b1; member
|
/arch/blackfin/mach-common/ |
D | interrupt.S | 77 [--sp] = b1;
|
D | dpmc_modes.S | 526 [--sp] = b1; 596 b1 = [sp++]; define
|
/arch/x86/kernel/ |
D | kprobes.c | 77 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 78 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
/arch/ia64/kvm/ |
D | vcpu.c | 1727 unsigned long *b1 = &VCPU(vcpu, vgr[0]); in vcpu_bsw0() local 1735 *b1++ = *r; in vcpu_bsw0() 1763 unsigned long *b1 = &VCPU(vcpu, vgr[0]); in vcpu_bsw1() local 1771 *r++ = *b1++; in vcpu_bsw1()
|