/arch/x86/crypto/ |
D | serpent-sse2-i586-asm_32.S | 28 #define RB %xmm1 macro 513 read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); 515 K(RA, RB, RC, RD, RE, 0); 516 S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1); 517 S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2); 518 S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3); 519 S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4); 520 S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5); 521 S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6); 522 S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7); [all …]
|
D | serpent-sse2-x86_64-asm_64.S | 636 K2(RA, RB, RC, RD, RE, 0); 637 S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); 638 S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); 639 S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); 640 S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); 641 S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); 642 S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); 643 S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); 644 S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); 645 S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); [all …]
|
D | serpent-avx2-asm_64.S | 567 K2(RA, RB, RC, RD, RE, 0); 568 S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); 569 S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); 570 S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); 571 S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); 572 S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); 573 S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); 574 S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); 575 S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); 576 S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); [all …]
|
D | serpent-avx-x86_64-asm_64.S | 567 K2(RA, RB, RC, RD, RE, 0); 568 S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); 569 S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); 570 S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); 571 S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); 572 S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); 573 S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); 574 S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); 575 S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); 576 S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); [all …]
|
D | twofish-avx-x86_64-asm_64.S | 189 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \ 190 encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); 193 encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \ 194 encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy); 197 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \ 198 decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); 201 decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \ 202 decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
|
D | cast6-avx-x86_64-asm_64.S | 156 qop(RC, RB, 2); \ 159 qop(RB, RA, 3); \ 169 qop(RB, RA, 3); \ 172 qop(RC, RB, 2); \
|
D | sha1_avx2_x86_64_asm.S | 109 .set RB, REG_RB define 334 .set RC, RB 335 .set RB, RTB define
|
/arch/powerpc/crypto/ |
D | sha1-powerpc-asm.S | 28 #define RB(t) ((((t)+3)%6)+7) macro 40 andc r0,RD(t),RB(t); \ 41 and r6,RB(t),RC(t); \ 48 rotlwi RB(t),RB(t),30; \ 52 and r6,RB(t),RC(t); \ 53 andc r0,RD(t),RB(t); \ 55 rotlwi RB(t),RB(t),30; \ 67 xor r6,RB(t),RC(t); \ 69 rotlwi RB(t),RB(t),30; \ 77 xor r6,RB(t),RC(t); \ [all …]
|
/arch/powerpc/xmon/ |
D | ppc-opc.c | 539 #define RB RAOPT + 1 macro 546 #define RBS RB + 1 3097 {"psq_lx", XW (4, 6,0), XW_MASK, PPCPS, 0, {FRT,RA,RB,PSWM,PSQM}}, 3099 {"psq_stx", XW (4, 7,0), XW_MASK, PPCPS, 0, {FRS,RA,RB,PSWM,PSQM}}, 3101 {"mulhhwu", XRC(4, 8,0), X_MASK, MULHW, 0, {RT, RA, RB}}, 3102 {"mulhhwu.", XRC(4, 8,1), X_MASK, MULHW, 0, {RT, RA, RB}}, 3108 {"machhwu", XO (4, 12,0,0), XO_MASK, MULHW, 0, {RT, RA, RB}}, 3110 {"machhwu.", XO (4, 12,0,1), XO_MASK, MULHW, 0, {RT, RA, RB}}, 3142 {"maddhd", VXA(4, 48), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, 3144 {"maddhdu", VXA(4, 49), VXA_MASK, POWER9, 0, {RT, RA, RB, RC}}, [all …]
|
/arch/powerpc/kvm/ |
D | book3s_32_sr.S | 92 #define KVM_LOAD_BAT(n, reg, RA, RB) \ argument 94 lwz RB,(n*16)+4(reg); \ 96 mtspr SPRN_IBAT##n##L,RB; \ 98 lwz RB,(n*16)+12(reg); \ 100 mtspr SPRN_DBAT##n##L,RB; \
|
/arch/sh/kernel/cpu/sh3/ |
D | swsusp.S | 85 3: .long 0x20000000 ! RB=1 138 2: .long 0x20000000 ! RB=1 139 3: .long 0xdfffffff ! RB=0
|
D | entry.S | 503 1: .long 0xcfffffff ! RB=0, BL=0
|
/arch/sh/ |
D | Kconfig.cpu | 84 This will enable the use of SR.RB register bank usage. Processors 89 information on SR.RB and register banking in the kernel in general.
|
/arch/arm/mm/ |
D | proc-sa1100.S | 147 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB 187 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB 188 mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
|
/arch/sh/kernel/ |
D | head_32.S | 59 mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF 340 1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
|
D | relocate_kernel.S | 224 .long 0x20000000 ! RB=1
|
/arch/m68k/include/asm/ |
D | traps.h | 110 #define RB (0x1000) macro
|
/arch/nds32/mm/ |
D | alignment.c | 20 #define RB(inst) (((inst) >> 10) & 0x1FUL) macro 475 shift = *idx_to_addr(regs, RB(inst)) << SV(inst); in do_32()
|
/arch/sh/kernel/cpu/shmobile/ |
D | sleep.S | 238 rb_bit: .long 0x20000000 ! RB=1 400 _rb_bit: .long 0x20000000 ! RB=1
|
/arch/powerpc/lib/ |
D | memcpy_power7.S | 16 #define LVS(VRT,RA,RB) lvsl VRT,RA,RB argument 19 #define LVS(VRT,RA,RB) lvsr VRT,RA,RB argument
|
D | copyuser_power7.S | 16 #define LVS(VRT,RA,RB) lvsl VRT,RA,RB argument 19 #define LVS(VRT,RA,RB) lvsr VRT,RA,RB argument
|
/arch/powerpc/kernel/ |
D | head_book3s_32.S | 38 #define LOAD_BAT(n, reg, RA, RB) \ argument 44 lwz RB,(n*16)+4(reg); \ 46 mtspr SPRN_IBAT##n##L,RB; \ 48 lwz RB,(n*16)+12(reg); \ 50 mtspr SPRN_DBAT##n##L,RB
|
/arch/m68k/kernel/ |
D | traps.c | 988 if (ssw & RB) in bad_super_trap()
|
/arch/x86/lib/ |
D | x86-opcode-map.txt | 114 45: INC eBP (i64) | REX.RB (o64)
|