| /kernel/linux/linux-5.10/include/asm-generic/ |
| D | xor.h | 31 unsigned long *p3) in xor_8regs_3() argument 36 p1[0] ^= p2[0] ^ p3[0]; in xor_8regs_3() 37 p1[1] ^= p2[1] ^ p3[1]; in xor_8regs_3() 38 p1[2] ^= p2[2] ^ p3[2]; in xor_8regs_3() 39 p1[3] ^= p2[3] ^ p3[3]; in xor_8regs_3() 40 p1[4] ^= p2[4] ^ p3[4]; in xor_8regs_3() 41 p1[5] ^= p2[5] ^ p3[5]; in xor_8regs_3() 42 p1[6] ^= p2[6] ^ p3[6]; in xor_8regs_3() 43 p1[7] ^= p2[7] ^ p3[7]; in xor_8regs_3() 46 p3 += 8; in xor_8regs_3() [all …]
|
| /kernel/linux/linux-6.6/include/asm-generic/ |
| D | xor.h | 33 const unsigned long * __restrict p3) in xor_8regs_3() argument 38 p1[0] ^= p2[0] ^ p3[0]; in xor_8regs_3() 39 p1[1] ^= p2[1] ^ p3[1]; in xor_8regs_3() 40 p1[2] ^= p2[2] ^ p3[2]; in xor_8regs_3() 41 p1[3] ^= p2[3] ^ p3[3]; in xor_8regs_3() 42 p1[4] ^= p2[4] ^ p3[4]; in xor_8regs_3() 43 p1[5] ^= p2[5] ^ p3[5]; in xor_8regs_3() 44 p1[6] ^= p2[6] ^ p3[6]; in xor_8regs_3() 45 p1[7] ^= p2[7] ^ p3[7]; in xor_8regs_3() 48 p3 += 8; in xor_8regs_3() [all …]
|
| /kernel/linux/linux-6.6/arch/hexagon/include/asm/ |
| D | spinlock.h | 32 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" in arch_read_lock() 33 " { if (!P3) jump 1b; }\n" in arch_read_lock() 34 " memw_locked(%0,P3) = R6;\n" in arch_read_lock() 35 " { if (!P3) jump 1b; }\n" in arch_read_lock() 38 : "memory", "r6", "p3" in arch_read_lock() 48 " memw_locked(%0,P3) = R6\n" in arch_read_unlock() 49 " if (!P3) jump 1b;\n" in arch_read_unlock() 52 : "memory", "r6", "p3" in arch_read_unlock() 63 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" in arch_read_trylock() 64 " { if (!P3) jump 1f; }\n" in arch_read_trylock() [all …]
|
| D | atomic.h | 41 " memw_locked(%1,P3)=%0;\n" \ 42 " if (!P3) jump 1b;\n" \ 45 : "memory", "p3" \ 57 " memw_locked(%1,P3)=%0;\n" \ 58 " if (!P3) jump 1b;\n" \ 61 : "memory", "p3" \ 74 " memw_locked(%2,P3)=%1;\n" \ 75 " if (!P3) jump 1b;\n" \ 78 : "memory", "p3" \ 117 " p3 = cmp.eq(%0, %4);" in ATOMIC_OPS() [all …]
|
| /kernel/linux/linux-5.10/arch/hexagon/include/asm/ |
| D | spinlock.h | 32 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" in arch_read_lock() 33 " { if (!P3) jump 1b; }\n" in arch_read_lock() 34 " memw_locked(%0,P3) = R6;\n" in arch_read_lock() 35 " { if (!P3) jump 1b; }\n" in arch_read_lock() 38 : "memory", "r6", "p3" in arch_read_lock() 48 " memw_locked(%0,P3) = R6\n" in arch_read_unlock() 49 " if (!P3) jump 1b;\n" in arch_read_unlock() 52 : "memory", "r6", "p3" in arch_read_unlock() 63 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" in arch_read_trylock() 64 " { if (!P3) jump 1f; }\n" in arch_read_trylock() [all …]
|
| D | atomic.h | 91 " memw_locked(%1,P3)=%0;\n" \ 92 " if (!P3) jump 1b;\n" \ 95 : "memory", "p3" \ 107 " memw_locked(%1,P3)=%0;\n" \ 108 " if (!P3) jump 1b;\n" \ 111 : "memory", "p3" \ 124 " memw_locked(%2,P3)=%1;\n" \ 125 " if (!P3) jump 1b;\n" \ 128 : "memory", "p3" \ 168 " p3 = cmp.eq(%0, %4);" in ATOMIC_OPS() [all …]
|
| /kernel/linux/linux-5.10/arch/s390/include/asm/ |
| D | kvm_para.h | 82 unsigned long p2, unsigned long p3) in __kvm_hypercall3() argument 87 register unsigned long __p3 asm("4") = p3; in __kvm_hypercall3() 97 unsigned long p2, unsigned long p3) in kvm_hypercall3() argument 100 return __kvm_hypercall3(nr, p1, p2, p3); in kvm_hypercall3() 104 unsigned long p2, unsigned long p3, in __kvm_hypercall4() argument 110 register unsigned long __p3 asm("4") = p3; in __kvm_hypercall4() 121 unsigned long p2, unsigned long p3, in kvm_hypercall4() argument 125 return __kvm_hypercall4(nr, p1, p2, p3, p4); in kvm_hypercall4() 129 unsigned long p2, unsigned long p3, in __kvm_hypercall5() argument 135 register unsigned long __p3 asm("4") = p3; in __kvm_hypercall5() [all …]
|
| /kernel/linux/linux-5.10/arch/x86/include/asm/ |
| D | xor.h | 43 #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" 47 #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" 146 unsigned long *p3) in xor_sse_3() argument 192 " add %[inc], %[p3] ;\n" in xor_sse_3() 196 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3() 205 unsigned long *p3) in xor_sse_3_pf64() argument 229 " add %[inc], %[p3] ;\n" in xor_sse_3_pf64() 233 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3_pf64() 242 unsigned long *p3, unsigned long *p4) in xor_sse_4() argument 294 " add %[inc], %[p3] ;\n" in xor_sse_4() [all …]
|
| /kernel/linux/linux-6.6/arch/x86/include/asm/ |
| D | xor.h | 43 #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" 47 #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" 149 const unsigned long * __restrict p3) in xor_sse_3() argument 195 " add %[inc], %[p3] ;\n" in xor_sse_3() 199 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3() 209 const unsigned long * __restrict p3) in xor_sse_3_pf64() argument 233 " add %[inc], %[p3] ;\n" in xor_sse_3_pf64() 237 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3_pf64() 247 const unsigned long * __restrict p3, in xor_sse_4() argument 300 " add %[inc], %[p3] ;\n" in xor_sse_4() [all …]
|
| /kernel/linux/linux-6.6/arch/arm/include/asm/ |
| D | xor.h | 70 const unsigned long * __restrict p3) in xor_arm4regs_3() argument 85 XOR_BLOCK_4(p3); in xor_arm4regs_3() 93 const unsigned long * __restrict p3, in xor_arm4regs_4() argument 105 XOR_BLOCK_2(p3); in xor_arm4regs_4() 114 const unsigned long * __restrict p3, in xor_arm4regs_5() argument 127 XOR_BLOCK_2(p3); in xor_arm4regs_5() 171 const unsigned long * __restrict p3) in xor_neon_3() argument 174 xor_arm4regs_3(bytes, p1, p2, p3); in xor_neon_3() 177 xor_block_neon_inner.do_3(bytes, p1, p2, p3); in xor_neon_3() 185 const unsigned long * __restrict p3, in xor_neon_4() argument [all …]
|
| /kernel/linux/linux-5.10/arch/arm/include/asm/ |
| D | xor.h | 68 unsigned long *p3) in xor_arm4regs_3() argument 83 XOR_BLOCK_4(p3); in xor_arm4regs_3() 90 unsigned long *p3, unsigned long *p4) in xor_arm4regs_4() argument 101 XOR_BLOCK_2(p3); in xor_arm4regs_4() 109 unsigned long *p3, unsigned long *p4, unsigned long *p5) in xor_arm4regs_5() argument 120 XOR_BLOCK_2(p3); in xor_arm4regs_5() 162 unsigned long *p3) in xor_neon_3() argument 165 xor_arm4regs_3(bytes, p1, p2, p3); in xor_neon_3() 168 xor_block_neon_inner.do_3(bytes, p1, p2, p3); in xor_neon_3() 175 unsigned long *p3, unsigned long *p4) in xor_neon_4() argument [all …]
|
| /kernel/linux/linux-6.6/scripts/coccinelle/free/ |
| D | iounmap.cocci | 23 position p1,p2,p3; 46 return@p3 ...; } 54 p3 << iom.p3; 59 cocci.print_secs("needed iounmap",p3) 64 p3 << iom.p3; 68 coccilib.report.print_report(p3[0],msg)
|
| D | clk_put.cocci | 23 position p1,p2,p3; 46 return@p3 ...; } 54 p3 << clk.p3; 59 cocci.print_secs("needed clk_put",p3) 64 p3 << clk.p3; 68 coccilib.report.print_report(p3[0],msg)
|
| /kernel/linux/linux-5.10/scripts/coccinelle/free/ |
| D | iounmap.cocci | 23 position p1,p2,p3; 46 return@p3 ...; } 54 p3 << iom.p3; 59 cocci.print_secs("needed iounmap",p3) 64 p3 << iom.p3; 68 coccilib.report.print_report(p3[0],msg)
|
| D | clk_put.cocci | 23 position p1,p2,p3; 46 return@p3 ...; } 54 p3 << clk.p3; 59 cocci.print_secs("needed clk_put",p3) 64 p3 << clk.p3; 68 coccilib.report.print_report(p3[0],msg)
|
| /kernel/linux/linux-6.6/tools/testing/selftests/ftrace/test.d/instances/ |
| D | instance-event.tc | 55 p3=$! 56 echo $p3 60 kill -1 $p3 65 wait $p1 $p2 $p3 107 p3=$! 108 echo $p3 123 kill -1 $p3 128 wait $p1 $p2 $p3 $p4 $p5
|
| /kernel/linux/linux-5.10/tools/testing/selftests/ftrace/test.d/instances/ |
| D | instance-event.tc | 55 p3=$! 56 echo $p3 60 kill -1 $p3 65 wait $p1 $p2 $p3 107 p3=$! 108 echo $p3 123 kill -1 $p3 128 wait $p1 $p2 $p3 $p4 $p5
|
| /kernel/linux/linux-5.10/arch/loongarch/include/asm/ |
| D | xor_simd.h | 12 unsigned long *p2, unsigned long *p3); 14 unsigned long *p2, unsigned long *p3, 17 unsigned long *p2, unsigned long *p3, 25 unsigned long *p2, unsigned long *p3); 27 unsigned long *p2, unsigned long *p3, 30 unsigned long *p2, unsigned long *p3,
|
| /kernel/linux/linux-6.6/arch/arm64/lib/ |
| D | xor-neon.c | 42 const unsigned long * __restrict p3) in xor_arm64_neon_3() argument 46 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_neon_3() 58 /* p1 ^= p3 */ in xor_arm64_neon_3() 78 const unsigned long * __restrict p3, in xor_arm64_neon_4() argument 83 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_neon_4() 96 /* p1 ^= p3 */ in xor_arm64_neon_4() 123 const unsigned long * __restrict p3, in xor_arm64_neon_5() argument 129 uint64_t *dp3 = (uint64_t *)p3; in xor_arm64_neon_5() 143 /* p1 ^= p3 */ in xor_arm64_neon_5() 197 const unsigned long * __restrict p3) in xor_arm64_eor3_3() argument [all …]
|
| /kernel/linux/linux-5.10/arch/loongarch/lib/ |
| D | xor_simd.h | 16 unsigned long *p2, unsigned long *p3); 18 unsigned long *p2, unsigned long *p3, 21 unsigned long *p2, unsigned long *p3, 29 unsigned long *p2, unsigned long *p3); 31 unsigned long *p2, unsigned long *p3, 34 unsigned long *p2, unsigned long *p3,
|
| /kernel/linux/linux-6.6/arch/loongarch/include/asm/ |
| D | xor_simd.h | 12 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 14 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 17 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 25 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 27 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 30 const unsigned long * __restrict p2, const unsigned long * __restrict p3,
|
| /kernel/linux/linux-6.6/arch/loongarch/lib/ |
| D | xor_simd.h | 16 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 18 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 21 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 29 const unsigned long * __restrict p2, const unsigned long * __restrict p3); 31 const unsigned long * __restrict p2, const unsigned long * __restrict p3, 34 const unsigned long * __restrict p2, const unsigned long * __restrict p3,
|
| /kernel/linux/linux-6.6/arch/powerpc/lib/ |
| D | xor_vmx_glue.c | 28 const unsigned long * __restrict p3) in xor_altivec_3() argument 32 __xor_altivec_3(bytes, p1, p2, p3); in xor_altivec_3() 40 const unsigned long * __restrict p3, in xor_altivec_4() argument 45 __xor_altivec_4(bytes, p1, p2, p3, p4); in xor_altivec_4() 53 const unsigned long * __restrict p3, in xor_altivec_5() argument 59 __xor_altivec_5(bytes, p1, p2, p3, p4, p5); in xor_altivec_5()
|
| /kernel/linux/linux-6.6/arch/hexagon/lib/ |
| D | memcpy.S | 192 p3 = cmp.gtu(len, #95); /* %8 < 97 */ define 202 p2 = and(p2,!p3); /* %8 < 97 */ 255 p3 = cmp.gtu(back, #8); define 290 if(p3) dataF8 = memd(ptr_in+#8); 320 p3 = sp1loop0(.Ldword_loop_prolog, prolog) define 327 if(p3) memd(ptr_out++#8) = ldata0; 339 p3 = cmp.gtu(kernel, #0); define 341 if(p3.new) kernel = add(kernel, #-1); 343 if(p3.new) epilog = add(epilog, #32); 349 p3 = cmp.gtu(dalign, #24); define [all …]
|
| /kernel/linux/linux-5.10/arch/hexagon/lib/ |
| D | memcpy.S | 192 p3 = cmp.gtu(len, #95); /* %8 < 97 */ define 202 p2 = and(p2,!p3); /* %8 < 97 */ 255 p3 = cmp.gtu(back, #8); define 290 if(p3) dataF8 = memd(ptr_in+#8); 320 p3 = sp1loop0(.Ldword_loop_prolog, prolog) define 327 if(p3) memd(ptr_out++#8) = ldata0; 339 p3 = cmp.gtu(kernel, #0); define 341 if(p3.new) kernel = add(kernel, #-1); 343 if(p3.new) epilog = add(epilog, #32); 349 p3 = cmp.gtu(dalign, #24); define [all …]
|