Home
last modified time | relevance | path

Searched refs:IMM (Results 1 – 11 of 11) sorted by relevance

/arch/arc/lib/
Dmemcpy-archs.S9 # define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; << argument
10 # define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >> argument
11 # define MERGE_1(RX,RY,IMM) asl RX, RY, IMM argument
12 # define MERGE_2(RX,RY,IMM) argument
13 # define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF argument
14 # define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM argument
16 # define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >> argument
17 # define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; << argument
18 # define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; << argument
19 # define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; << argument
[all …]
/arch/m68k/lib/
Dudivsi3.S65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) macro
94 cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
108 L4: lsrl IMM (1), d1 /* shift divisor */
109 lsrl IMM (1), d0 /* shift dividend */
110 cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
113 andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
129 L5: subql IMM (1), d0 /* adjust quotient */
138 link a6,IMM (-12)
143 moveq IMM (31),d4
149 bset IMM (0),d0 | set the low order bit of a to 1,
[all …]
Dumodsi3.S65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) macro
94 addql IMM (8), sp
100 addql IMM (8), sp
Dmodsi3.S67 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) macro
96 addql IMM (8), sp
102 addql IMM (8), sp
Ddivsi3.S67 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) macro
93 moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
114 addql IMM (8), sp
Dmulsi3.S65 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x) macro
/arch/powerpc/math-emu/
Dmtfsfi.c10 mtfsfi(unsigned int crfD, unsigned int IMM) in mtfsfi() argument
18 __FPU_FPSCR |= (IMM & 0xf) << ((7 - crfD) << 2); in mtfsfi()
21 printk("%s: %d %x: %08lx\n", __func__, crfD, IMM, __FPU_FPSCR); in mtfsfi()
/arch/sparc/net/
Dbpf_jit_comp_32.c264 #define emit_cmpi(R1, IMM) \ argument
265 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
270 #define emit_btsti(R1, IMM) \ argument
271 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
276 #define emit_subi(R1, IMM, R3) \ argument
277 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
282 #define emit_addi(R1, IMM, R3) \ argument
283 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
288 #define emit_andi(R1, IMM, R3) \ argument
289 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3))
Dbpf_jit_comp_64.c649 #define emit_cmpi(R1, IMM, CTX) \ argument
650 emit(SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
655 #define emit_btsti(R1, IMM, CTX) \ argument
656 emit(ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
/arch/microblaze/kernel/
Dhw_exception_handler.S170 .macro bsrli, rD, rA, IMM argument
171 .if (\IMM) == 2
173 .elseif (\IMM) == 10
175 .elseif (\IMM) == 12
178 .elseif (\IMM) == 14
181 .elseif (\IMM) == 20
183 .elseif (\IMM) == 24
186 .elseif (\IMM) == 28
/arch/nds32/mm/
Dalignment.c22 #define IMM(inst) (((inst) >> 0) & 0x7FFFUL) macro
473 shift = GET_IMMSVAL(IMM(inst)) * len; in do_32()