Home
last modified time | relevance | path

Searched refs:o0 (Results 1 – 25 of 496) sorted by relevance

12345678910>>...20

/external/python/cpython2/Modules/_ctypes/libffi/src/sparc/
Dv8.S43 add %o0, %o1, %o2
45 1: flush %o0
47 1: iflush %o0
49 add %o0, 8, %o0
50 cmp %o0, %o2
78 mov %l0, %o0 ! call routine to set up frame
82 ld [%l0+ARGS], %o0 ! call foreign function
99 st %o0, [%i4] ! (delay)
111 sll %o0, 24, %o0 ! (delay)
115 sll %o0, 24, %o0 ! (delay)
[all …]
/external/libffi/src/sparc/
Dv8.S43 add %o0, %o1, %o2
45 1: flush %o0
47 1: iflush %o0
49 add %o0, 8, %o0
50 cmp %o0, %o2
78 mov %l0, %o0 ! call routine to set up frame
82 ld [%l0+ARGS], %o0 ! call foreign function
99 st %o0, [%i4] ! (delay)
111 sll %o0, 24, %o0 ! (delay)
115 sll %o0, 24, %o0 ! (delay)
[all …]
/external/llvm/test/CodeGen/SPARC/
Dbasictest.ll7 ; CHECK: add %o0, 1, %o0
17 ; CHECK: xnor %o0, %o1, %o0
25 ; CHECK: xnor %o0, %o1, %o0
29 ; CHECK: st %g0, [%o0]
40 ; CHECK: sra %o0, 31, %o2
42 ; CHECK: sdiv %o0, %o1, %o0
50 ; CHECK: udiv %o0, %o1, %o0
57 ; CHECK: smul %o0, %o1, %o0
64 ; CHECK: smul %o0, %o1, %o1
65 ; CHECK: rd %y, %o0
[all …]
Dspill.ll10 ; CHECK: and %i0, %i1, %o0
11 ; CHECK: st %o0, [%fp+{{.+}}]
12 ; CHECK: add %o0, %o0, %g0
17 ; The clobber list has all registers except g0/o0. (Only o0 is usable.)
23 ; CHECK: and %i0, %i2, %o0
25 ; CHECK: std %o0, [%fp+{{.+}}]
26 ; CHECK: add %o0, %o0, %g0
31 ; The clobber list has all registers except g0,g1,o0,o1. (Only o0/o1 are a usable pair)
32 ; So, o0/o1 must be used.
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SPARC/
Dbasictest.ll7 ; CHECK: add %o0, 1, %o0
17 ; CHECK: xnor %o0, %o1, %o0
25 ; CHECK: xnor %o0, %o1, %o0
29 ; CHECK: st %g0, [%o0]
40 ; CHECK: sra %o0, 31, %o2
42 ; CHECK: sdiv %o0, %o1, %o0
50 ; CHECK: udiv %o0, %o1, %o0
57 ; CHECK: smul %o0, %o1, %o0
64 ; CHECK: smul %o0, %o1, %o1
65 ; CHECK: rd %y, %o0
[all …]
Drem.ll8 ; CHECK-NEXT: sdivx %o0, %o1, %o2
11 ; CHECK-NEXT: sub %o0, %o1, %o0
20 ; CHECK-NEXT: udivx %o0, %o1, %o2
23 ; CHECK-NEXT: sub %o0, %o1, %o0
35 ; CHECK-NEXT: mulx %o0, %o1, %o0
36 ; CHECK-NEXT: udivx %o0, 1021, %o1
39 ; CHECK-NEXT: sub %o0, %o1, %o0
Dimm.ll10 ; SPARC-NEXT: mov %g0, %o0
18 ; SPARC-NEXT: mov 2047, %o0
26 ; SPARC-NEXT: mov -2047, %o0
33 ; SPARC-NEXT: sethi 1695242, %o0
35 ; SPARC-NEXT: or %o0, 751, %o0
42 ; SPARC-NEXT: sethi 3648367, %o0
44 ; SPARC-NEXT: or %o0, 751, %o0
Dspill.ll10 ; CHECK: and %i0, %i1, %o0
11 ; CHECK: st %o0, [%fp+{{.+}}]
12 ; CHECK: add %o0, %o0, %g0
17 ; The clobber list has all registers except g0/o0. (Only o0 is usable.)
23 ; CHECK: and %i0, %i2, %o0
25 ; CHECK: std %o0, [%fp+{{.+}}]
26 ; CHECK: add %o0, %o0, %g0
31 ; The clobber list has all registers except g0,g1,o0,o1. (Only o0/o1 are a usable pair)
32 ; So, o0/o1 must be used.
/external/XNNPACK/src/f32-vmulcaddc/gen/
Dc8-neonfma-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
77 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
78 vst1q_f32(o0, vacc0x4567); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
100 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
124 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
131 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
136 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
141 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c8__neonfma_2x()
Dc8-neon-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
81 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
82 vst1q_f32(o0, vacc0x4567); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
106 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
132 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
139 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
144 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
149 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c8__neon_2x()
Dc8-psimd-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
78 psimd_store_f32(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
79 psimd_store_f32(o0 + 4, vacc0x4567); in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
80 o0 += 8; in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
106 psimd_store_f32(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
107 o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
133 psimd_store2_f32(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
139 o0 += 2; in xnn_f32_vmulcaddc_ukernel_c8__psimd_2x()
[all …]
Dc4-psimd-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
68 psimd_store_f32(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
69 o0 += 4; in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
95 psimd_store2_f32(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
101 o0 += 2; in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
105 psimd_store1_f32(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
108 o0 += 1; in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
113 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c4__psimd_2x()
[all …]
Dc4-neonfma-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
67 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
91 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
98 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
103 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
108 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__neonfma_2x()
Dc8-sse-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c8__sse_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
83 _mm_storeu_ps(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
84 _mm_storeu_ps(o0 + 4, vacc0x4567); in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
85 o0 += 8; in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
114 _mm_storeu_ps(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
115 o0 += 4; in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
144 _mm_storel_pi((__m64*) o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
150 o0 += 2; in xnn_f32_vmulcaddc_ukernel_c8__sse_2x()
[all …]
Dc4-sse-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c4__sse_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
71 _mm_storeu_ps(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
72 o0 += 4; in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
101 _mm_storel_pi((__m64*) o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
107 o0 += 2; in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
111 _mm_store_ss(o0, vacc0x0123); in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
114 o0 += 1; in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
119 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c4__sse_2x()
[all …]
Dc4-neon-2x.c33 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c4__neon_2x() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
38 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
69 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
95 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
102 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
107 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
112 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__neon_2x()
Dc4-scalar-2x.c34 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() local
36 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
39 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
96 o0[0] = vacc0x0; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
97 o0[1] = vacc0x1; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
98 o0[2] = vacc0x2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
99 o0[3] = vacc0x3; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
100 o0 += 4; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
127 *o0++ = vacc0; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
134 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
[all …]
Dc4-wasm-2x.c34 float* o0 = output; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() local
36 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
39 o1 = o0; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
96 o0[0] = vacc0x0; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
97 o0[1] = vacc0x1; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
98 o0[2] = vacc0x2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
99 o0[3] = vacc0x3; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
100 o0 += 4; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
127 *o0++ = vacc0; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
134 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
[all …]
/external/XNNPACK/src/f32-prelu/gen/
Dsse41-2x8.c33 float* o0 = output; in xnn_f32_prelu_ukernel__sse41_2x8() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__sse41_2x8()
38 o1 = o0; in xnn_f32_prelu_ukernel__sse41_2x8()
81 _mm_storeu_ps(o0, vacc0x0123); in xnn_f32_prelu_ukernel__sse41_2x8()
82 _mm_storeu_ps(o0 + 4, vacc0x4567); in xnn_f32_prelu_ukernel__sse41_2x8()
83 o0 += 8; in xnn_f32_prelu_ukernel__sse41_2x8()
109 _mm_storeu_ps(o0, vacc0x0123); in xnn_f32_prelu_ukernel__sse41_2x8()
110 o0 += 4; in xnn_f32_prelu_ukernel__sse41_2x8()
136 _mm_storel_pi((__m64*) o0, vacc0x0123); in xnn_f32_prelu_ukernel__sse41_2x8()
142 o0 += 2; in xnn_f32_prelu_ukernel__sse41_2x8()
[all …]
Dpsimd-2x8.c33 float* o0 = output; in xnn_f32_prelu_ukernel__psimd_2x8() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__psimd_2x8()
38 o1 = o0; in xnn_f32_prelu_ukernel__psimd_2x8()
81 psimd_store_f32(o0, vacc0x0123); in xnn_f32_prelu_ukernel__psimd_2x8()
82 psimd_store_f32(o0 + 4, vacc0x4567); in xnn_f32_prelu_ukernel__psimd_2x8()
83 o0 += 8; in xnn_f32_prelu_ukernel__psimd_2x8()
109 psimd_store_f32(o0, vacc0x0123); in xnn_f32_prelu_ukernel__psimd_2x8()
110 o0 += 4; in xnn_f32_prelu_ukernel__psimd_2x8()
136 psimd_store2_f32(o0, vacc0x0123); in xnn_f32_prelu_ukernel__psimd_2x8()
142 o0 += 2; in xnn_f32_prelu_ukernel__psimd_2x8()
[all …]
Dsse41-2x4.c33 float* o0 = output; in xnn_f32_prelu_ukernel__sse41_2x4() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__sse41_2x4()
38 o1 = o0; in xnn_f32_prelu_ukernel__sse41_2x4()
70 _mm_storeu_ps(o0, vacc0x0123); in xnn_f32_prelu_ukernel__sse41_2x4()
71 o0 += 4; in xnn_f32_prelu_ukernel__sse41_2x4()
97 _mm_storel_pi((__m64*) o0, vacc0x0123); in xnn_f32_prelu_ukernel__sse41_2x4()
103 o0 += 2; in xnn_f32_prelu_ukernel__sse41_2x4()
107 _mm_store_ss(o0, vacc0x0123); in xnn_f32_prelu_ukernel__sse41_2x4()
110 o0 += 1; in xnn_f32_prelu_ukernel__sse41_2x4()
115 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__sse41_2x4()
[all …]
Dpsimd-2x4.c33 float* o0 = output; in xnn_f32_prelu_ukernel__psimd_2x4() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__psimd_2x4()
38 o1 = o0; in xnn_f32_prelu_ukernel__psimd_2x4()
70 psimd_store_f32(o0, vacc0x0123); in xnn_f32_prelu_ukernel__psimd_2x4()
71 o0 += 4; in xnn_f32_prelu_ukernel__psimd_2x4()
97 psimd_store2_f32(o0, vacc0x0123); in xnn_f32_prelu_ukernel__psimd_2x4()
103 o0 += 2; in xnn_f32_prelu_ukernel__psimd_2x4()
107 psimd_store1_f32(o0, vacc0x0123); in xnn_f32_prelu_ukernel__psimd_2x4()
110 o0 += 1; in xnn_f32_prelu_ukernel__psimd_2x4()
115 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__psimd_2x4()
[all …]
Dneon-2x8.c33 float* o0 = output; in xnn_f32_prelu_ukernel__neon_2x8() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__neon_2x8()
38 o1 = o0; in xnn_f32_prelu_ukernel__neon_2x8()
82 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_2x8()
83 vst1q_f32(o0, vacc0x4567); o0 += 4; in xnn_f32_prelu_ukernel__neon_2x8()
109 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_2x8()
137 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_prelu_ukernel__neon_2x8()
144 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_prelu_ukernel__neon_2x8()
149 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__neon_2x8()
154 o1 = o0; in xnn_f32_prelu_ukernel__neon_2x8()
Dneon-2x4.c33 float* o0 = output; in xnn_f32_prelu_ukernel__neon_2x4() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__neon_2x4()
38 o1 = o0; in xnn_f32_prelu_ukernel__neon_2x4()
69 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_2x4()
97 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_prelu_ukernel__neon_2x4()
104 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_prelu_ukernel__neon_2x4()
109 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__neon_2x4()
114 o1 = o0; in xnn_f32_prelu_ukernel__neon_2x4()
Dscalar-2x4.c33 float* o0 = output; in xnn_f32_prelu_ukernel__scalar_2x4() local
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_f32_prelu_ukernel__scalar_2x4()
38 o1 = o0; in xnn_f32_prelu_ukernel__scalar_2x4()
93 o0[0] = vacc0x0; in xnn_f32_prelu_ukernel__scalar_2x4()
94 o0[1] = vacc0x1; in xnn_f32_prelu_ukernel__scalar_2x4()
95 o0[2] = vacc0x2; in xnn_f32_prelu_ukernel__scalar_2x4()
96 o0[3] = vacc0x3; in xnn_f32_prelu_ukernel__scalar_2x4()
97 o0 += 4; in xnn_f32_prelu_ukernel__scalar_2x4()
121 *o0++ = vacc0; in xnn_f32_prelu_ukernel__scalar_2x4()
125 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__scalar_2x4()
[all …]

12345678910>>...20