Home
last modified time | relevance | path

Searched refs:p0 (Results 1 – 25 of 59) sorted by relevance

123

/arch/nds32/kernel/
Dex-entry.S22 smw.adm $p0, [$sp], $p0, #0x1
29 sethi $p0, hi20(has_fpu)
30 lbsi $p0, [$p0+lo12(has_fpu)]
31 beqz $p0, skip_fucop_ctl
32 mfsr $p0, $FUCOP_CTL
33 smw.adm $p0, [$sp], $p0, #0x1
34 bclr $p0, $p0, #FUCOP_CTL_offCP0EN
35 mtsr $p0, $FUCOP_CTL
67 andi $p0, $r20, #PSW_mskPOM
69 cmovz $fp, $p1, $p0
[all …]
Dex-exit.S29 sethi $p0, hi20(has_fpu)
30 lbsi $p0, [$p0+lo12(has_fpu)]
31 beqz $p0, 2f
53 pop $p0
54 cmovn $sp, $p0, $p0
94 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
95 andi $p0, $p0, #PSW_mskINTL
96 bnez $p0, resume_kernel ! done with iret
139 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
140 andi $p0, $p0, #PSW_mskINTL
[all …]
Dex-scall.S20 la $p0, __entry_task
21 sw $r1, [$p0]
54 lwi $p0, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
56 andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls?
/arch/x86/include/asm/
Dxor_avx.h31 static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) in xor_avx_2() argument
43 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_2()
45 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_2()
50 p0 = (unsigned long *)((uintptr_t)p0 + 512); in xor_avx_2()
57 static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, in xor_avx_3() argument
72 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_3()
74 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_3()
79 p0 = (unsigned long *)((uintptr_t)p0 + 512); in xor_avx_3()
87 static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, in xor_avx_4() argument
104 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_4()
[all …]
/arch/hexagon/mm/
Dcopy_user_template.S19 p0 = cmp.gtu(bytes,#0) define
20 if (!p0.new) jump:nt .Ldone
26 p0 = bitsclr(r3,#7) define
27 if (!p0.new) jump:nt .Loop_not_aligned_8
52 p0 = bitsclr(r4,#7) define
53 if (p0.new) jump:nt .Lalign
56 p0 = bitsclr(r3,#3) define
57 if (!p0.new) jump:nt .Loop_not_aligned_4
82 p0 = bitsclr(r3,#1) define
83 if (!p0.new) jump:nt .Loop_not_aligned
[all …]
/arch/hexagon/lib/
Dmemset.S29 p0 = cmp.eq(r2, #0) define
36 if p0 jumpr r31 /* count == 0, so return */
41 p0 = tstbit(r9, #0) define
58 p0 = tstbit(r9, #1) define
60 if !p0 jump 3f /* skip initial byte store */
71 p0 = tstbit(r9, #2) define
73 if !p0 jump 4f /* skip initial half store */
84 p0 = cmp.gtu(r2, #7) define
86 if !p0 jump 5f /* skip initial word store */
91 p0 = cmp.gtu(r2, #11) define
[all …]
Dmemcpy.S148 #define ifbyte p0 /* if transfer has bytes in epilog/prolog */
149 #define ifhword p0 /* if transfer has shorts in epilog/prolog */
150 #define ifword p0 /* if transfer has words in epilog/prolog */
151 #define noprolog p0 /* no prolog, xfer starts at 32byte */
153 #define noepilog p0 /* no epilog, xfer ends on 32byte boundary */
155 #define kernel1 p0 /* kernel count == 1 */
187 p0 = cmp.gtu(len, #23); /* %1, <24 */ define
207 if(!p0) jump .Lbytes23orless; /* %1, <24 */
322 p0 = cmp.gt(over, #0); define
324 if(p0) rest = add(rest, #16);
[all …]
/arch/unicore32/mm/
Dtlb-ucv2.S31 movc p0.c6, r0, #3
37 movc p0.c6, r0, #5
44 movc p0.c6, r0, #2
50 movc p0.c6, r0, #4
69 movc p0.c6, r0, #3
72 movc p0.c6, r0, #5
79 movc p0.c6, r0, #2
82 movc p0.c6, r0, #4
Dproc-ucv2.S37 movc p0.c5, ip, #28 @ Cache invalidate all
40 movc p0.c6, ip, #6 @ TLB invalidate all
43 movc ip, p0.c1, #0 @ ctrl register
46 movc p0.c1, ip, #0 @ disable caches and mmu
75 3: movc p0.c5, r10, #11 @ clean D entry
84 movc p0.c5, ip, #10 @ Dcache clean all
101 movc p0.c2, r0, #0 @ update page table ptr
104 movc p0.c6, ip, #6 @ TLB invalidate all
122 movc p0.c5, r2, #11 @ Dcache clean line
126 movc p0.c5, ip, #10 @ Dcache clean all
Dcache-ucv2.S31 movc p0.c5, r0, #14 @ Dcache flush all
35 movc p0.c5, r0, #20 @ Icache invalidate all
70 movc p0.c5, ip, #14 @ Dcache flush all
74 movc p0.c5, ip, #20 @ Icache invalidate all
110 103: movc p0.c5, r10, #11 @ Dcache clean line of R10
120 movc p0.c5, ip, #10 @ Dcache clean all
124 movc p0.c5, ip, #20 @ Icache invalidate all
137 movc p0.c5, ip, #14 @ Dcache flush all
164 1: movc p0.c5, r10, #11 @ Dcache clean line of R10
173 movc p0.c5, ip, #10 @ Dcache clean all
[all …]
/arch/nds32/lib/
Dmemmove.S25 slt $p0, $r0, $r1 ! check if $r0 < $r1
26 beqz $p0, do_reverse ! branch if dst > src
34 lmw.bim $p0, [$r1], $p0 ! Read a word from src
36 smw.bim $p0, [$r0], $p0 ! Copy the word to det
49 lmw.adm $p0, [$r1], $p0 ! Read a word from src
51 smw.adm $p0, [$r0], $p0 ! Copy the word to det
59 lb.bi $p0, [$r1], $t0 ! Read a byte from src
61 sb.bi $p0, [$r0], $t0 ! copy the byte to det
Dclear_user.S20 srli $p0, $r1, #2 ! $p0 = number of word to clear
22 beqz $p0, byte_clear ! Only less than a word to clear
25 addi $p0, $p0, #-1 ! Decrease word count
26 bnez $p0, word_clear ! Continue looping to clear all words
Dmemset.S16 slli $p0, $r1, #8 ! $p0 = 0000ab00
17 or $r1, $r1, $p0 ! $r1 = 0000abab
18 slli $p0, $r1, #16 ! $p0 = abab0000
19 or $r1, $r1, $p0 ! $r1 = abababab
/arch/ia64/lib/
Dmemset.S69 cmp.eq p_scr, p0 = cnt, r0
81 cmp.ne p_unalgn, p0 = tmp, r0 //
84 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
118 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
137 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
186 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
194 cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
207 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
240 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
248 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
[all …]
Dclear_user.S60 cmp.eq p6,p0=r0,len // check for zero length
69 cmp.lt p6,p0=16,len // if len > 16 then long memset
104 tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear)
108 tbit.nz p6,p0=buf,1
112 tbit.nz p6,p0=buf,2
116 tbit.nz p6,p0=buf,3
122 cmp.eq p6,p0=r0,cnt
167 tbit.nz p6,p0=len,3
Dstrlen.S104 cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and
119 cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
120 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
137 cmp.eq.and p7,p0=8,val1// val1==8?
138 tnat.nz.and p7,p0=val2 // test NaT if val2
174 cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop
184 cmp.eq p6,p0=8,val1 // val1==8 ?
Ddo_csum.S134 cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len)
142 tbit.nz p15,p0=buf,0 // is buf an odd address?
187 cmp.ltu p6,p0=result1[0],word1[0] // check the carry
188 cmp.eq.or.andcm p8,p0=0,count // exit if zero 8-byte
201 cmp.ltu p6,p0=result1[0],word1[1]
227 (ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1]
229 (ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1]
245 cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1
246 cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2
253 cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1]
Dmemcpy_mck.S95 cmp.gt p15,p0=8,in2 // check for small size
96 cmp.ne p13,p0=0,r28 // check dest alignment
97 cmp.ne p14,p0=0,r29 // check src alignment
104 cmp.le p6,p0 = 1,r30 // for .align_dest
119 cmp.lt p6,p0=2*PREFETCH_DIST,cnt
145 cmp.eq p10,p0=r29,r0 // do we really need to loop?
147 cmp.le p6,p0=8,tmp
149 cmp.le p7,p0=16,tmp
174 cmp.le p8,p0=24,tmp
218 cmp.eq p16, p0 = r0, r0 // reset p16 to 1
[all …]
Dcopy_user.S87 cmp.eq p8,p0=r0,len // check for zero length
204 cmp.gtu p9,p0=16,len1
235 #define SWITCH(pred, shift) cmp.eq pred,p0=shift,rshift
336 tbit.nz p7,p0=src1,1
340 tbit.nz p8,p0=src1,2
349 tbit.nz p9,p0=src1,3
364 tbit.nz p6,p0=len1,3
365 cmp.eq p7,p0=r0,cnt
392 tbit.nz p7,p0=len1,2
395 tbit.nz p8,p0=len1,1
[all …]
/arch/ia64/kernel/
Divt.S130 cmp.ne p8,p0=r18,r26
270 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
273 ITC_I(p0, r18, r19)
285 cmp.ne p7,p0=r18,r19
314 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
317 ITC_D(p0, r18, r19)
329 cmp.ne p7,p0=r18,r19
344 MOV_FROM_IPSR(p0, r21)
351 cmp.gt p8,p0=6,r22 // user mode
364 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
[all …]
Dfsys.S89 cmp.ne p8,p0=0,r9
118 cmp.ne p8,p0=0,r9
146 tnat.nz p6,p0 = r33 // guard against NaT argument
195 tnat.nz p6,p0 = r31 // guard against Nat argument
210 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
230 (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13
233 (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
252 (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful
272 cmp4.ne p7,p0 = r28,r10
283 cmp.ge p6,p0 = r8,r2
[all …]
/arch/unicore32/kernel/
Dsleep.S21 movc r3, p0.c7, #0 @ PID
22 movc r4, p0.c2, #0 @ translation table base addr
23 movc r5, p0.c1, #0 @ control reg
67 movc p0.c5, r1, #14
168 movc p0.c6, r1, #6 @ invalidate I & D TLBs
169 movc p0.c5, r1, #28 @ invalidate I & D caches, BTB
171 movc p0.c7, r3, #0 @ PID
172 movc p0.c2, r4, #0 @ translation table base addr
173 movc p0.c1, r5, #0 @ control reg, turn on mmu
Dhibernate_asm.S28 movc p0.c6, r5, #6 @invalidate ITLB & DTLB
29 movc p0.c2, r0, #0
61 movc p0.c6, r5, #6
62 movc p0.c2, r0, #0
/arch/arm64/boot/dts/xilinx/
Dzynqmp-zc1232-revA.dts42 ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
43 ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
44 ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
45 ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
/arch/unicore32/boot/compressed/
Dhead.S84 movc p0.c5, r0, #28 @ cache invalidate all
86 movc p0.c6, r0, #6 @ tlb invalidate all
90 movc p0.c1, r0, #0
140 movc p0.c5, r0, #14 @ flush dcache
142 movc p0.c5, r0, #20 @ icache invalidate all
149 movc p0.c1, r0, #0

123