/arch/nds32/kernel/ |
D | ex-entry.S | 22 smw.adm $p0, [$sp], $p0, #0x1 29 sethi $p0, hi20(has_fpu) 30 lbsi $p0, [$p0+lo12(has_fpu)] 31 beqz $p0, skip_fucop_ctl 32 mfsr $p0, $FUCOP_CTL 33 smw.adm $p0, [$sp], $p0, #0x1 34 bclr $p0, $p0, #FUCOP_CTL_offCP0EN 35 mtsr $p0, $FUCOP_CTL 67 andi $p0, $r20, #PSW_mskPOM 69 cmovz $fp, $p1, $p0 [all …]
|
D | ex-exit.S | 29 sethi $p0, hi20(has_fpu) 30 lbsi $p0, [$p0+lo12(has_fpu)] 31 beqz $p0, 2f 53 pop $p0 54 cmovn $sp, $p0, $p0 94 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt 95 andi $p0, $p0, #PSW_mskINTL 96 bnez $p0, resume_kernel ! done with iret 139 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt 140 andi $p0, $p0, #PSW_mskINTL [all …]
|
D | ex-scall.S | 20 la $p0, __entry_task 21 sw $r1, [$p0] 54 lwi $p0, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing 56 andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls?
|
/arch/x86/include/asm/ |
D | xor_avx.h | 29 static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) in xor_avx_2() argument 41 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_2() 43 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_2() 48 p0 = (unsigned long *)((uintptr_t)p0 + 512); in xor_avx_2() 55 static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, in xor_avx_3() argument 70 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_3() 72 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_3() 77 p0 = (unsigned long *)((uintptr_t)p0 + 512); in xor_avx_3() 85 static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, in xor_avx_4() argument 102 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_4() [all …]
|
/arch/hexagon/lib/ |
D | memcpy_likely_aligned.S | 10 p0 = bitsclr(r1,#7) define 11 p0 = bitsclr(r0,#7) define 12 if (p0.new) r5:4 = memd(r1) 13 if (p0.new) r7:6 = memd(r1+#8) 16 if (!p0) jump:nt .Lmemcpy_call 17 if (p0) r9:8 = memd(r1+#16) 18 if (p0) r11:10 = memd(r1+#24) 19 p0 = cmp.gtu(r2,#64) define 22 if (p0) jump:nt .Lmemcpy_call 23 if (!p0) memd(r0) = r5:4 [all …]
|
D | memset.S | 29 p0 = cmp.eq(r2, #0) define 36 if p0 jumpr r31 /* count == 0, so return */ 41 p0 = tstbit(r9, #0) define 58 p0 = tstbit(r9, #1) define 60 if !p0 jump 3f /* skip initial byte store */ 71 p0 = tstbit(r9, #2) define 73 if !p0 jump 4f /* skip initial half store */ 84 p0 = cmp.gtu(r2, #7) define 86 if !p0 jump 5f /* skip initial word store */ 91 p0 = cmp.gtu(r2, #11) define [all …]
|
D | divsi3.S | 10 p0 = cmp.gt(r0,#-1) define 15 p3 = xor(p0,p1) 18 p0 = cmp.gtu(r3,r2) define 26 r0 = mux(p0,#0,r0) 27 p0 = or(p0,p1) define 28 if (p0.new) jumpr:nt r31 35 p0 = cmp.gtu(r6,#4) define 39 if (!p0) r6 = #3 50 if (!p0.new) r0 = add(r0,r5) 51 if (!p0.new) r2 = sub(r2,r4) [all …]
|
D | udivsi3.S | 13 p0 = cmp.gtu(r1,r0) define 19 if (p0) jumpr r31 28 p0 = cmp.gtu(r2,r1) define 29 if (!p0.new) r1 = sub(r1,r2) 30 if (!p0.new) r0 = add(r0,r3) 34 p0 = cmp.gtu(r2,r1) define 35 if (!p0.new) r0 = add(r0,r3)
|
D | umodsi3.S | 12 p0 = cmp.gtu(r1,r0) define 16 if (p0) jumpr r31 26 p0 = cmp.gtu(r2,r0) define 27 if (!p0.new) r0 = sub(r0,r2) 32 p0 = cmp.gtu(r2,r0) define 33 if (!p0.new) r0 = sub(r0,r1)
|
D | modsi3.S | 17 p0 = cmp.gtu(r1,r2) define 21 if (p0) jumpr r31 32 p0 = cmp.gtu(r2,r0) define 33 if (!p0.new) r0 = sub(r0,r2) 38 p0 = cmp.gtu(r2,r0) define 39 if (!p0.new) r0 = sub(r0,r1)
|
D | memcpy.S | 148 #define ifbyte p0 /* if transfer has bytes in epilog/prolog */ 149 #define ifhword p0 /* if transfer has shorts in epilog/prolog */ 150 #define ifword p0 /* if transfer has words in epilog/prolog */ 151 #define noprolog p0 /* no prolog, xfer starts at 32byte */ 153 #define noepilog p0 /* no epilog, xfer ends on 32byte boundary */ 155 #define kernel1 p0 /* kernel count == 1 */ 187 p0 = cmp.gtu(len, #23); /* %1, <24 */ define 207 if(!p0) jump .Lbytes23orless; /* %1, <24 */ 322 p0 = cmp.gt(over, #0); define 324 if(p0) rest = add(rest, #16); [all …]
|
/arch/hexagon/mm/ |
D | copy_user_template.S | 19 p0 = cmp.gtu(bytes,#0) define 20 if (!p0.new) jump:nt .Ldone 26 p0 = bitsclr(r3,#7) define 27 if (!p0.new) jump:nt .Loop_not_aligned_8 52 p0 = bitsclr(r4,#7) define 53 if (p0.new) jump:nt .Lalign 56 p0 = bitsclr(r3,#3) define 57 if (!p0.new) jump:nt .Loop_not_aligned_4 82 p0 = bitsclr(r3,#1) define 83 if (!p0.new) jump:nt .Loop_not_aligned [all …]
|
/arch/nds32/lib/ |
D | memmove.S | 25 slt $p0, $r0, $r1 ! check if $r0 < $r1 26 beqz $p0, do_reverse ! branch if dst > src 34 lmw.bim $p0, [$r1], $p0 ! Read a word from src 36 smw.bim $p0, [$r0], $p0 ! Copy the word to det 49 lmw.adm $p0, [$r1], $p0 ! Read a word from src 51 smw.adm $p0, [$r0], $p0 ! Copy the word to det 59 lb.bi $p0, [$r1], $t0 ! Read a byte from src 61 sb.bi $p0, [$r0], $t0 ! copy the byte to det
|
D | clear_user.S | 20 srli $p0, $r1, #2 ! $p0 = number of word to clear 22 beqz $p0, byte_clear ! Only less than a word to clear 25 addi $p0, $p0, #-1 ! Decrease word count 26 bnez $p0, word_clear ! Continue looping to clear all words
|
D | memset.S | 16 slli $p0, $r1, #8 ! $p0 = 0000ab00 17 or $r1, $r1, $p0 ! $r1 = 0000abab 18 slli $p0, $r1, #16 ! $p0 = abab0000 19 or $r1, $r1, $p0 ! $r1 = abababab
|
/arch/ia64/lib/ |
D | memset.S | 69 cmp.eq p_scr, p0 = cnt, r0 81 cmp.ne p_unalgn, p0 = tmp, r0 // 84 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? 118 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? 137 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 186 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? 194 cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? 207 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 240 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? 248 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? [all …]
|
D | clear_user.S | 60 cmp.eq p6,p0=r0,len // check for zero length 69 cmp.lt p6,p0=16,len // if len > 16 then long memset 104 tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear) 108 tbit.nz p6,p0=buf,1 112 tbit.nz p6,p0=buf,2 116 tbit.nz p6,p0=buf,3 122 cmp.eq p6,p0=r0,cnt 167 tbit.nz p6,p0=len,3
|
D | strlen.S | 104 cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and 119 cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 120 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 137 cmp.eq.and p7,p0=8,val1// val1==8? 138 tnat.nz.and p7,p0=val2 // test NaT if val2 174 cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop 184 cmp.eq p6,p0=8,val1 // val1==8 ?
|
D | do_csum.S | 134 cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len) 142 tbit.nz p15,p0=buf,0 // is buf an odd address? 187 cmp.ltu p6,p0=result1[0],word1[0] // check the carry 188 cmp.eq.or.andcm p8,p0=0,count // exit if zero 8-byte 201 cmp.ltu p6,p0=result1[0],word1[1] 227 (ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1] 229 (ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1] 245 cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1 246 cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2 253 cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1]
|
D | memcpy_mck.S | 95 cmp.gt p15,p0=8,in2 // check for small size 96 cmp.ne p13,p0=0,r28 // check dest alignment 97 cmp.ne p14,p0=0,r29 // check src alignment 104 cmp.le p6,p0 = 1,r30 // for .align_dest 119 cmp.lt p6,p0=2*PREFETCH_DIST,cnt 145 cmp.eq p10,p0=r29,r0 // do we really need to loop? 147 cmp.le p6,p0=8,tmp 149 cmp.le p7,p0=16,tmp 174 cmp.le p8,p0=24,tmp 218 cmp.eq p16, p0 = r0, r0 // reset p16 to 1 [all …]
|
D | copy_user.S | 87 cmp.eq p8,p0=r0,len // check for zero length 204 cmp.gtu p9,p0=16,len1 235 #define SWITCH(pred, shift) cmp.eq pred,p0=shift,rshift 336 tbit.nz p7,p0=src1,1 340 tbit.nz p8,p0=src1,2 349 tbit.nz p9,p0=src1,3 364 tbit.nz p6,p0=len1,3 365 cmp.eq p7,p0=r0,cnt 392 tbit.nz p7,p0=len1,2 395 tbit.nz p8,p0=len1,1 [all …]
|
/arch/ia64/kernel/ |
D | ivt.S | 130 cmp.ne p8,p0=r18,r26 270 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 273 ITC_I(p0, r18, r19) 285 cmp.ne p7,p0=r18,r19 314 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 317 ITC_D(p0, r18, r19) 329 cmp.ne p7,p0=r18,r19 344 MOV_FROM_IPSR(p0, r21) 351 cmp.gt p8,p0=6,r22 // user mode 364 cmp.ne p8,p0=r0,r23 // psr.cpl != 0? [all …]
|
D | fsys.S | 89 cmp.ne p8,p0=0,r9 118 cmp.ne p8,p0=0,r9 146 tnat.nz p6,p0 = r33 // guard against NaT argument 195 tnat.nz p6,p0 = r31 // guard against Nat argument 210 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled 230 (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 233 (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control 252 (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful 272 cmp4.ne p7,p0 = r28,r10 283 cmp.ge p6,p0 = r8,r2 [all …]
|
/arch/arm64/boot/dts/xilinx/ |
D | zynqmp-zc1232-revA.dts | 42 ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>; 43 ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>; 44 ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>; 45 ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
|
D | zynqmp-zc1751-xm015-dc1.dts | 103 ceva,p0-cominit-params = /bits/ 8 <0x1B 0x4D 0x18 0x28>; 104 ceva,p0-comwake-params = /bits/ 8 <0x06 0x19 0x08 0x0E>; 105 ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>; 106 ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
|