Home
last modified time | relevance | path

Searched refs:b0 (Results 1 – 25 of 72) sorted by relevance

123

/arch/arm64/crypto/
Dsm4-ce-asm.h11 #define SM4_CRYPT_BLK_BE(b0) \ argument
12 sm4e b0.4s, v24.4s; \
13 sm4e b0.4s, v25.4s; \
14 sm4e b0.4s, v26.4s; \
15 sm4e b0.4s, v27.4s; \
16 sm4e b0.4s, v28.4s; \
17 sm4e b0.4s, v29.4s; \
18 sm4e b0.4s, v30.4s; \
19 sm4e b0.4s, v31.4s; \
20 rev64 b0.4s, b0.4s; \
[all …]
Dsm4-ce-gcm-core.S109 #define SM4_CRYPT_PMUL_128x128_BLK(b0, r0, r1, m0, m1, T0, T1) \ argument
110 rev32 b0.16b, b0.16b; \
112 sm4e b0.4s, v24.4s; \
114 sm4e b0.4s, v25.4s; \
116 sm4e b0.4s, v26.4s; \
118 sm4e b0.4s, v27.4s; \
120 sm4e b0.4s, v28.4s; \
122 sm4e b0.4s, v29.4s; \
124 sm4e b0.4s, v30.4s; \
126 sm4e b0.4s, v31.4s; \
[all …]
Dsm4-neon-core.S131 #define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \ argument
137 ROUND4(0, b0, b1, b2, b3); \
138 ROUND4(1, b1, b2, b3, b0); \
139 ROUND4(2, b2, b3, b0, b1); \
140 ROUND4(3, b3, b0, b1, b2); \
144 rev32 b0.16b, b0.16b; \
149 rotate_clockwise_4x4(b0, b1, b2, b3); \
154 #define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ argument
155 rev32 b0.16b, b0.16b; \
159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3);
[all …]
Daes-neonbs-core.S26 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
29 eor \b3, \b3, \b0
31 eor \b5, \b5, \b0
42 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
43 eor \b0, \b0, \b6
46 eor \b2, \b2, \b0
56 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5
64 eor \b2, \b2, \b0
67 eor \b0, \b0, \b6
71 .macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2
[all …]
/arch/ia64/lib/
Dip_fast_csum.S77 br.ret.sptk.many b0
85 mov r34=b0
90 br.call.sptk.many b0=do_csum
94 mov b0=r34
95 br.ret.sptk.many b0
146 br.ret.sptk.many b0
/arch/arm/nwfpe/
Dsoftfloat-macros339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so
346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr )
352 *z0Ptr = a0 + b0 + ( z1 < a1 );
359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is
370 bits64 b0,
385 z0 = a0 + b0;
397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the
406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr )
410 *z0Ptr = a0 - b0 - ( a1 < b1 );
416 Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2'
[all …]
/arch/riscv/crypto/
Dchacha-riscv64-zvkb.S76 .macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \
79 vadd.vv \a0, \a0, \b0
97 vxor.vv \b0, \b0, \c0
101 vror.vi \b0, \b0, 32 - 12
107 vadd.vv \a0, \a0, \b0
125 vxor.vv \b0, \b0, \c0
129 vror.vi \b0, \b0, 32 - 7
/arch/alpha/include/asm/
Dbitops.h440 unsigned long b0, b1, ofs, tmp; in sched_find_first_bit() local
442 b0 = b[0]; in sched_find_first_bit()
444 ofs = (b0 ? 0 : 64); in sched_find_first_bit()
445 tmp = (b0 ? b0 : b1); in sched_find_first_bit()
/arch/arm/crypto/
Dblake2s-core.S68 .macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3
74 add \a0, \a0, \b0, ror #brot
88 eor \b0, \c0, \b0, ror #brot
95 add \a0, \a0, \b0, ror #12
109 eor \b0, \c0, \b0, ror#12
Daes-neonbs-core.S80 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
83 veor \b3, \b3, \b0
85 veor \b5, \b5, \b0
96 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7
97 veor \b0, \b0, \b6
100 veor \b2, \b2, \b0
110 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5
118 veor \b2, \b2, \b0
121 veor \b0, \b0, \b6
125 .macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2
[all …]
/arch/ia64/kernel/
Dmca_asm.S275 br.call.sptk.many b0=ia64_mca_handler
295 mov b0=r12 // SAL_CHECK return address
297 br b0
319 mov b0=r12 // SAL_CHECK return address
320 br b0
384 br.call.sptk.many b0=ia64_init_handler
404 mov b0=r12 // SAL_CHECK return address
405 br b0
459 mov b0=r2 // save return address
694 br.sptk b0
[all …]
Dpatch.c47 u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); in ia64_patch() local
51 b0 = b[0]; b1 = b[1]; in ia64_patch()
59 b[0] = (b0 & ~m0) | (v0 & m0); in ia64_patch()
Dentry.S282 mov r21=b0
408 mov b0=r21
1045 (pRecurse) br.call.sptk.many b0=rse_clear_invalid
1054 (pReturn) br.ret.sptk.many b0
1069 (pRecurse) br.call.dptk.few b0=rse_clear_invalid
1076 (pReturn) br.ret.dptk.many b0
1099 mov b0=r21 // I0
1330 br.ret.sptk.many b0
1336 mov out2 = b0
1339 br.call.sptk.many b0 = ftrace_patch_gp
[all …]
Divt.S260 mov r29=b0 // save b0
268 mov b0=r29
304 mov r29=b0 // save b0
312 mov b0=r29
355 (p8) mov r29=b0 // save b0
395 (p8) mov r29=b0 // save b0
506 mov b0=r30
507 br.sptk.many b0 // return to continuation point
544 mov r29=b0 // save b0 in case of nested fault
573 mov b0=r29 // restore b0
[all …]
Dpal.S90 br.ret.sptk.many b0
126 br.ret.sptk.many b0
198 br.ret.sptk.many b0
256 br.ret.sptk.many b0
Dkprobes.c413 ri->ret_addr = (kprobe_opcode_t *)regs->b0; in arch_prepare_kretprobe()
417 regs->b0 = (unsigned long)dereference_function_descriptor(__kretprobe_trampoline); in arch_prepare_kretprobe()
624 if ((regs->b0 == bundle_addr) || in resume_execution()
625 (regs->b0 == bundle_addr + 0x10)) { in resume_execution()
626 regs->b0 = (regs->b0 - bundle_addr) + in resume_execution()
/arch/x86/crypto/
Dghash-clmulni-intel_asm.S53 pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0
55 pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0)
57 pxor T1, T2 # T2 = a0 * b1 + a1 * b0
Daria-gfni-avx512-asm_64.S66 #define byteslice_16x16b(a0, b0, c0, d0, \ argument
74 transpose_4x4(b0, b1, b2, b3, d2, d3); \
87 vpshufb a0, b0, b0; \
105 transpose_4x4(a0, b0, c0, d0, d2, d3); \
110 vmovdqu64 b0, st0; \
112 transpose_4x4(a2, b2, c2, d2, b0, b1); \
113 transpose_4x4(a3, b3, c3, d3, b0, b1); \
114 vmovdqu64 st0, b0; \
118 #define debyteslice_16x16b(a0, b0, c0, d0, \ argument
126 transpose_4x4(b0, b1, b2, b3, d2, d3); \
[all …]
Daria-aesni-avx-asm_64.S66 #define byteslice_16x16b(a0, b0, c0, d0, \ argument
74 transpose_4x4(b0, b1, b2, b3, d2, d3); \
87 vpshufb a0, b0, b0; \
105 transpose_4x4(a0, b0, c0, d0, d2, d3); \
110 vmovdqu b0, st0; \
112 transpose_4x4(a2, b2, c2, d2, b0, b1); \
113 transpose_4x4(a3, b3, c3, d3, b0, b1); \
114 vmovdqu st0, b0; \
118 #define debyteslice_16x16b(a0, b0, c0, d0, \ argument
126 transpose_4x4(b0, b1, b2, b3, d2, d3); \
[all …]
Daria-aesni-avx2-asm_64.S82 #define byteslice_16x16b(a0, b0, c0, d0, \ argument
90 transpose_4x4(b0, b1, b2, b3, d2, d3); \
103 vpshufb a0, b0, b0; \
121 transpose_4x4(a0, b0, c0, d0, d2, d3); \
126 vmovdqu b0, st0; \
128 transpose_4x4(a2, b2, c2, d2, b0, b1); \
129 transpose_4x4(a3, b3, c3, d3, b0, b1); \
130 vmovdqu st0, b0; \
134 #define debyteslice_16x16b(a0, b0, c0, d0, \ argument
142 transpose_4x4(b0, b1, b2, b3, d2, d3); \
[all …]
/arch/sh/kernel/cpu/sh4/
Dsoftfloat.c90 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
92 void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
638 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in add128() argument
645 *z0Ptr = a0 + b0 + (z1 < a1); in add128()
649 sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in sub128() argument
653 *z0Ptr = a0 - b0 - (a1 < b1); in sub128()
658 bits64 b0, b1; in estimateDiv128To64() local
663 b0 = b >> 32; in estimateDiv128To64()
665 do_div(tmp, b0); in estimateDiv128To64()
667 z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32; in estimateDiv128To64()
[all …]
/arch/ia64/include/uapi/asm/
Dptrace.h105 unsigned long b0; /* return pointer (bp) */ member
192 unsigned long b0; /* so we can force a direct return in copy_thread */ member
/arch/arm64/boot/dts/amd/
DMakefile2 dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb
/arch/ia64/include/asm/
Dftrace.h9 extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
/arch/arm64/tools/
Dsysreg1075 0b0 NI
1080 0b0 NI
1095 0b0 NI
1103 0b0 NI
1107 0b0 NI
1111 0b0 NI
1115 0b0 NI
1123 0b0 NI
1127 0b0 NI
1131 0b0 NI
[all …]

123