Home
last modified time | relevance | path

Searched refs:p6 (Results 1 – 24 of 24) sorted by relevance

/arch/ia64/lib/
Dclear_user.S60 cmp.eq p6,p0=r0,len // check for zero length
67 (p6) br.ret.spnt.many rp
69 cmp.lt p6,p0=16,len // if len > 16 then long memset
71 (p6) br.cond.dptk .long_do_clear
104 tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear)
106 EX( .Lexit3, (p6) st1 [buf]=r0,1 ) // 1-byte aligned
107 (p6) adds len=-1,len;; // sync because buf is modified
108 tbit.nz p6,p0=buf,1
110 EX( .Lexit3, (p6) st2 [buf]=r0,2 ) // 2-byte aligned
111 (p6) adds len=-2,len;;
[all …]
Didiv64.S51 frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b)
53 (p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0
54 (p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1
56 (p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0
57 (p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0
62 (p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1
63 (p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0
65 (p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1
66 (p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a
72 (p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2
Dstrlen.S104 cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and
119 cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
120 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
121 (p6) br.wtop.dptk 1b // loop until p6 == 0
131 tnat.nz p6,p7=val1 // test NaT on val1
132 (p6) br.cond.spnt .recover // jump to recovery if val1 is NaT
174 cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop
180 (p6) ld8 val=[base],8 // will fail if unrecoverable fault
184 cmp.eq p6,p0=8,val1 // val1==8 ?
185 (p6) br.wtop.dptk 2b // loop until p6 == 0
Dstrncpy_from_user.S29 cmp.eq p6,p0=r0,in2
30 (p6) br.ret.spnt.many rp
37 cmp.ne p6,p7=r8,r0
39 (p6) cmp.ne.unc p8,p0=in1,r10
42 (p6) mov r8=in2 // buffer filled up---return buffer length
Didiv32.S61 frcpa.s1 f6, p6 = f8, f9 // y0 = frcpa(b)
63 (p6) fmpy.s1 f8 = f8, f6 // q0 = a*y0
64 (p6) fnma.s1 f6 = f9, f6, f1 // e0 = -b*y0 + 1
69 (p6) fma.s1 f8 = f6, f8, f8 // q1 = e0*q0 + q0
70 (p6) fma.s1 f6 = f6, f6, f7 // e1 = e0*e0 + 2^-34
75 (p6) fma.s1 f6 = f6, f8, f8 // q2 = e1*q1 + q1
Ddo_csum.S134 cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len)
139 (p6) br.ret.spnt.many rp // return if zero or negative length
187 cmp.ltu p6,p0=result1[0],word1[0] // check the carry
190 (p6) adds result1[0]=1,result1[0]
201 cmp.ltu p6,p0=result1[0],word1[1]
203 (p6) adds result1[0]=1,result1[0]
245 cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1
248 (p6) adds result1[LOAD_LATENCY+1]=1,result1[LOAD_LATENCY+1]
253 cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1]
255 (p6) adds result1[0]=1,result1[0]
Dmemcpy_mck.S104 cmp.le p6,p0 = 1,r30 // for .align_dest
119 cmp.lt p6,p0=2*PREFETCH_DIST,cnt
130 (p6) br.cond.dpnt .long_copy
147 cmp.le p6,p0=8,tmp
170 EX(.ex_handler, (p6) ld8 t1=[src0])
178 EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1
292 cmp.lt p6,p7=blocksize,in2
295 (p6) mov in2=blocksize
321 cmp.le p6,p7=8,curlen; // have at least 8 byte remaining?
355 EX(.ex_handler, (p6) ld8 r37=[src1],8)
[all …]
Dcopy_user.S263 SWITCH(p6, 8)
271 CASE(p6, 8)
321 tbit.nz p6,p7=src1,0 // odd alignment
334 EX(.failure_in1,(p6) ld1 val1[0]=[src1],1) // 1-byte aligned
335 (p6) adds len1=-1,len1;;
348 EX(.failure_out,(p6) st1 [dst1]=val1[0],1)
364 tbit.nz p6,p0=len1,3
391 EX(.failure_in1,(p6) ld8 val1[0]=[src1],8) // at least 8 bytes
400 EX(.failure_out, (p6) st8 [dst1]=val1[0],8)
582 cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ?
[all …]
Dstrnlen_user.S37 cmp.eq p6,p0=r8,r0
38 (p6) br.cond.dpnt .Lexit
Dmemcpy.S63 cmp.eq p6,p0=in2,r0 // zero length?
65 (p6) br.ret.spnt.many rp // zero length, return immediately
81 cmp.ne p6,p0=t0,r0
85 (p6) br.cond.spnt.few .memcpy_long
239 cmp.ne p6,p0=t0,r0 // is src aligned, too?
254 (p6) ld8 val[1]=[src2],8 // prime the pump...
Dcopy_page.S75 cmp.ltu p6,p0 = tgtf, tgt_last
92 (p6) lfetch [srcf], 64
93 (p6) lfetch [tgtf], 64
Dip_fast_csum.S38 cmp.ne p6,p7=5,in1 // size other than 20 byte?
42 cmp.ne.or.andcm p6,p7=r14,r0
46 (p6) br.spnt .generic
Dmemset.S43 #define p_scr p6 // default register for same-cycle branches
/arch/ia64/kernel/
Divt.S137 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
143 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
145 .pred.rel "mutex", p6, p7
146 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
149 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
151 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
160 (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
167 (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
176 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
182 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
[all …]
Dfsys.S102 tnat.z p6,p7=r32 // check argument register for being NaT
121 (p6) st8 [r18]=r32
146 tnat.nz p6,p0 = r33 // guard against NaT argument
147 (p6) br.cond.spnt.few .fail_einval
195 tnat.nz p6,p0 = r31 // guard against Nat argument
196 (p6) br.cond.spnt.few .fail_einval
210 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
211 (p6) br.cond.spnt.many fsys_fallback_syscall
236 MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
244 (p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
[all …]
Drelocate_kernel.S79 cmp.ltu p6,p7=r24,r19
136 tbit.z p0, p6=r30, 0;; // 0x1 dest page
137 (p6) and r17=r30, r16
138 (p6) br.cond.sptk.few .loop;;
140 tbit.z p0, p6=r30, 1;; // 0x2 indirect page
141 (p6) and in0=r30, r16
142 (p6) br.cond.sptk.few .loop;;
144 tbit.z p0, p6=r30, 2;; // 0x4 end flag
145 (p6) br.cond.sptk.few .end_loop;;
147 tbit.z p6, p0=r30, 3;; // 0x8 source page
[all …]
Dentry.S74 cmp4.ge p6,p7=r8,r0
80 (p6) mov ar.pfs=r0 // clear ar.pfs on success
180 cmp.eq p7,p6=r25,in0
185 (p6) cmp.eq p7,p6=r26,r27
186 (p6) br.cond.dpnt .map
477 cmp.lt p6,p0=r8,r0 // check tracehook
481 (p6) br.cond.sptk strace_error // syscall failed ->
504 cmp.leu p6,p7=r15,r3
506 (p6) ld8 r20=[r20] // load address of syscall entry point
512 cmp.lt p6,p0=r8,r0 // syscall failed?
[all …]
Dgate.S339 cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
346 (p6) ld8 r18=[r18] // M0|1
350 (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
354 (p6) mov b7=r18 // I0
365 (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
367 (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
369 (p6) mov b7=r14
370 (p6) br.sptk.many b7
372 BRL_COND_FSYS_BUBBLE_DOWN(p6)
Dmca_asm.S81 cmp.ltu p6,p7=r24,r19
155 tbit.nz p6,p7=r18,60
880 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
882 (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
1090 cmp.lt p6,p7=in0,r14
1092 (p6) ld8 r8=[in0]
Dminstate.h221 cmp.ge p6,p7 = 33,r17; \
223 (p6) mov r17=0x310; \
Dhead.S378 cmp4.lt p7,p6=0,r18
/arch/sparc/kernel/
Dsetup_64.c315 struct popc_6insn_patch_entry *p6; in popc_patch() local
331 p6 = &__popc_6insn_patch; in popc_patch()
332 while (p6 < &__popc_6insn_patch_end) { in popc_patch()
333 unsigned long i, addr = p6->addr; in popc_patch()
336 *(unsigned int *) (addr + (i * 4)) = p6->insns[i]; in popc_patch()
342 p6++; in popc_patch()
/arch/x86/events/intel/
DMakefile4 obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
/arch/arm/mach-iop32x/include/mach/
Dentry-macro.S20 mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC