Home
last modified time | relevance | path

Searched refs:o0 (Results 1 – 25 of 885) sorted by relevance

12345678910>>...36

/external/XNNPACK/src/x24-transposec/gen/
D4x1-scalar.c37 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__4x1_scalar() local
42 o0[0] = i0[0]; in xnn_x24_transposec_ukernel__4x1_scalar()
43 o0[1] = i0[1]; in xnn_x24_transposec_ukernel__4x1_scalar()
44 o0[2] = i0[2]; in xnn_x24_transposec_ukernel__4x1_scalar()
45 o0[3] = i1[0]; in xnn_x24_transposec_ukernel__4x1_scalar()
46 o0[4] = i1[1]; in xnn_x24_transposec_ukernel__4x1_scalar()
47 o0[5] = i1[2]; in xnn_x24_transposec_ukernel__4x1_scalar()
48 o0[6] = i2[0]; in xnn_x24_transposec_ukernel__4x1_scalar()
49 o0[7] = i2[1]; in xnn_x24_transposec_ukernel__4x1_scalar()
50 o0[8] = i2[2]; in xnn_x24_transposec_ukernel__4x1_scalar()
[all …]
D4x2-scalar.c37 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__4x2_scalar() local
38 uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride); in xnn_x24_transposec_ukernel__4x2_scalar()
42 o1 = o0; in xnn_x24_transposec_ukernel__4x2_scalar()
59 o0[0] = i0[0]; in xnn_x24_transposec_ukernel__4x2_scalar()
60 o0[1] = i0[1]; in xnn_x24_transposec_ukernel__4x2_scalar()
61 o0[2] = i0[2]; in xnn_x24_transposec_ukernel__4x2_scalar()
62 o0[3] = i1[0]; in xnn_x24_transposec_ukernel__4x2_scalar()
63 o0[4] = i1[1]; in xnn_x24_transposec_ukernel__4x2_scalar()
64 o0[5] = i1[2]; in xnn_x24_transposec_ukernel__4x2_scalar()
65 o0[6] = i2[0]; in xnn_x24_transposec_ukernel__4x2_scalar()
[all …]
D4x4-scalar.c37 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__4x4_scalar() local
38 uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride); in xnn_x24_transposec_ukernel__4x4_scalar()
44 o1 = o0; in xnn_x24_transposec_ukernel__4x4_scalar()
47 o2 = o0; in xnn_x24_transposec_ukernel__4x4_scalar()
50 o3 = o0; in xnn_x24_transposec_ukernel__4x4_scalar()
93 o0[0] = i0[0]; in xnn_x24_transposec_ukernel__4x4_scalar()
94 o0[1] = i0[1]; in xnn_x24_transposec_ukernel__4x4_scalar()
95 o0[2] = i0[2]; in xnn_x24_transposec_ukernel__4x4_scalar()
96 o0[3] = i1[0]; in xnn_x24_transposec_ukernel__4x4_scalar()
97 o0[4] = i1[1]; in xnn_x24_transposec_ukernel__4x4_scalar()
[all …]
D2x2-scalar.c35 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__2x2_scalar() local
36 uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride); in xnn_x24_transposec_ukernel__2x2_scalar()
40 o1 = o0; in xnn_x24_transposec_ukernel__2x2_scalar()
51 o0[0] = i0[0]; in xnn_x24_transposec_ukernel__2x2_scalar()
52 o0[1] = i0[1]; in xnn_x24_transposec_ukernel__2x2_scalar()
53 o0[2] = i0[2]; in xnn_x24_transposec_ukernel__2x2_scalar()
54 o0[3] = i1[0]; in xnn_x24_transposec_ukernel__2x2_scalar()
55 o0[4] = i1[1]; in xnn_x24_transposec_ukernel__2x2_scalar()
56 o0[5] = i1[2]; in xnn_x24_transposec_ukernel__2x2_scalar()
57 o0 += 6; in xnn_x24_transposec_ukernel__2x2_scalar()
[all …]
D2x1-scalar.c35 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__2x1_scalar() local
40 o0[0] = i0[0]; in xnn_x24_transposec_ukernel__2x1_scalar()
41 o0[1] = i0[1]; in xnn_x24_transposec_ukernel__2x1_scalar()
42 o0[2] = i0[2]; in xnn_x24_transposec_ukernel__2x1_scalar()
43 o0[3] = i1[0]; in xnn_x24_transposec_ukernel__2x1_scalar()
44 o0[4] = i1[1]; in xnn_x24_transposec_ukernel__2x1_scalar()
45 o0[5] = i1[2]; in xnn_x24_transposec_ukernel__2x1_scalar()
46 o0 += 6; in xnn_x24_transposec_ukernel__2x1_scalar()
52 o0[0] = i[0]; in xnn_x24_transposec_ukernel__2x1_scalar()
53 o0[1] = i[1]; in xnn_x24_transposec_ukernel__2x1_scalar()
[all …]
D2x4-scalar.c35 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__2x4_scalar() local
36 uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride); in xnn_x24_transposec_ukernel__2x4_scalar()
42 o1 = o0; in xnn_x24_transposec_ukernel__2x4_scalar()
45 o2 = o0; in xnn_x24_transposec_ukernel__2x4_scalar()
48 o3 = o0; in xnn_x24_transposec_ukernel__2x4_scalar()
73 o0[0] = i0[0]; in xnn_x24_transposec_ukernel__2x4_scalar()
74 o0[1] = i0[1]; in xnn_x24_transposec_ukernel__2x4_scalar()
75 o0[2] = i0[2]; in xnn_x24_transposec_ukernel__2x4_scalar()
76 o0[3] = i1[0]; in xnn_x24_transposec_ukernel__2x4_scalar()
77 o0[4] = i1[1]; in xnn_x24_transposec_ukernel__2x4_scalar()
[all …]
/external/python/cpython2/Modules/_ctypes/libffi/src/sparc/
Dv8.S43 add %o0, %o1, %o2
45 1: flush %o0
47 1: iflush %o0
49 add %o0, 8, %o0
50 cmp %o0, %o2
78 mov %l0, %o0 ! call routine to set up frame
82 ld [%l0+ARGS], %o0 ! call foreign function
99 st %o0, [%i4] ! (delay)
111 sll %o0, 24, %o0 ! (delay)
115 sll %o0, 24, %o0 ! (delay)
[all …]
/external/libffi/src/sparc/
Dv8.S54 1: iflush %o0
82 mov %i0, %o0 ! copy cif
89 and %o0, SPARC_FLAG_RET_MASK, %l0 ! save return type
90 srl %o0, SPARC_SIZEMASK_SHIFT, %l1 ! save return size
91 ld [%sp+64+4], %o0 ! load all argument registers
119 and %o0, 0xff, %o0
120 st %o0, [%i2]
124 sll %o0, 24, %o0
126 sra %o0, 24, %o0
128 sll %o0, 16, %o0
[all …]
/external/llvm/test/CodeGen/SPARC/
Dbasictest.ll7 ; CHECK: add %o0, 1, %o0
17 ; CHECK: xnor %o0, %o1, %o0
25 ; CHECK: xnor %o0, %o1, %o0
29 ; CHECK: st %g0, [%o0]
40 ; CHECK: sra %o0, 31, %o2
42 ; CHECK: sdiv %o0, %o1, %o0
50 ; CHECK: udiv %o0, %o1, %o0
57 ; CHECK: smul %o0, %o1, %o0
64 ; CHECK: smul %o0, %o1, %o1
65 ; CHECK: rd %y, %o0
[all …]
/external/XNNPACK/src/f32-prelu/gen/
Dneon-1x16.c32 float* o0 = output; in xnn_f32_prelu_ukernel__neon_1x16() local
66 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x16()
67 vst1q_f32(o0, vacc0x4567); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x16()
68 vst1q_f32(o0, vacc0x89AB); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x16()
69 vst1q_f32(o0, vacc0xCDEF); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x16()
82 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x16()
97 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_prelu_ukernel__neon_1x16()
102 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_prelu_ukernel__neon_1x16()
106 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__neon_1x16()
Dwasmsimd-bitselect-1x16.c32 float* o0 = output; in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16() local
68 wasm_v128_store(o0, vacc0x0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
69 wasm_v128_store(o0 + 4, vacc0x4567); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
70 wasm_v128_store(o0 + 8, vacc0x89AB); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
71 wasm_v128_store(o0 + 12, vacc0xCDEF); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
72 o0 += 16; in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
86 wasm_v128_store(o0, vacc0x0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
87 o0 += 4; in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
102 *((double*) o0) = wasm_f64x2_extract_lane(vacc0x0123, 0); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
106 o0 += 2; in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
[all …]
Dneon-1x8.c32 float* o0 = output; in xnn_f32_prelu_ukernel__neon_1x8() local
56 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x8()
57 vst1q_f32(o0, vacc0x4567); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x8()
70 vst1q_f32(o0, vacc0x0123); o0 += 4; in xnn_f32_prelu_ukernel__neon_1x8()
85 vst1_f32(o0, vacc0x01); o0 += 2; in xnn_f32_prelu_ukernel__neon_1x8()
90 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1; in xnn_f32_prelu_ukernel__neon_1x8()
94 o0 = (float*) ((uintptr_t) o0 + output_increment); in xnn_f32_prelu_ukernel__neon_1x8()
/external/XNNPACK/src/x24-transposec/
D2x2-neon-tbl.c40 uint8_t* o0 = (uint8_t*) output; in xnn_x24_transposec_ukernel__2x2_neon_tbl() local
41 uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride); in xnn_x24_transposec_ukernel__2x2_neon_tbl()
47 o1 = o0; in xnn_x24_transposec_ukernel__2x2_neon_tbl()
59 … vst1_lane_u32((void*) o0, vreinterpret_u32_u8(vres0), 0); o0 = (uint8_t*) ((uintptr_t) o0 + 4); in xnn_x24_transposec_ukernel__2x2_neon_tbl()
61 …vst1_lane_u16((void*) o0, vreinterpret_u16_u8(vres0), 2); o0 = (uint8_t*) ((uintptr_t) o0 + tile_w… in xnn_x24_transposec_ukernel__2x2_neon_tbl()
75 vst1_lane_u16((void*) o0, vreinterpret_u16_u8(vres0), 0); o0 += 2; in xnn_x24_transposec_ukernel__2x2_neon_tbl()
77 vst1_lane_u8(o0, vres0, 2); o0 += 1; in xnn_x24_transposec_ukernel__2x2_neon_tbl()
82 o0 = (uint8_t*) ((uintptr_t) o0 + output_reset); in xnn_x24_transposec_ukernel__2x2_neon_tbl()
/external/XNNPACK/src/x32-transposec/gen/
D4x4-scalar-float.c40 float* o0 = (float*) output; in xnn_x32_transposec_ukernel__4x4_scalar_float() local
41 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_x32_transposec_ukernel__4x4_scalar_float()
47 o1 = o0; in xnn_x32_transposec_ukernel__4x4_scalar_float()
50 o2 = o0; in xnn_x32_transposec_ukernel__4x4_scalar_float()
53 o3 = o0; in xnn_x32_transposec_ukernel__4x4_scalar_float()
69 *o0++ = i0[0]; in xnn_x32_transposec_ukernel__4x4_scalar_float()
70 *o0++ = i1[0]; in xnn_x32_transposec_ukernel__4x4_scalar_float()
71 *o0++ = i2[0]; in xnn_x32_transposec_ukernel__4x4_scalar_float()
72 *o0++ = i3[0]; in xnn_x32_transposec_ukernel__4x4_scalar_float()
89 o0[0] = i0[0]; in xnn_x32_transposec_ukernel__4x4_scalar_float()
[all …]
D4x4-scalar-int.c40 int* o0 = (int*) output; in xnn_x32_transposec_ukernel__4x4_scalar_int() local
41 int* o1 = (int*) ((uintptr_t) o0 + output_stride); in xnn_x32_transposec_ukernel__4x4_scalar_int()
47 o1 = o0; in xnn_x32_transposec_ukernel__4x4_scalar_int()
50 o2 = o0; in xnn_x32_transposec_ukernel__4x4_scalar_int()
53 o3 = o0; in xnn_x32_transposec_ukernel__4x4_scalar_int()
69 *o0++ = i0[0]; in xnn_x32_transposec_ukernel__4x4_scalar_int()
70 *o0++ = i1[0]; in xnn_x32_transposec_ukernel__4x4_scalar_int()
71 *o0++ = i2[0]; in xnn_x32_transposec_ukernel__4x4_scalar_int()
72 *o0++ = i3[0]; in xnn_x32_transposec_ukernel__4x4_scalar_int()
89 o0[0] = i0[0]; in xnn_x32_transposec_ukernel__4x4_scalar_int()
[all …]
D4x2-scalar-int.c40 int* o0 = (int*) output; in xnn_x32_transposec_ukernel__4x2_scalar_int() local
41 int* o1 = (int*) ((uintptr_t) o0 + output_stride); in xnn_x32_transposec_ukernel__4x2_scalar_int()
45 o1 = o0; in xnn_x32_transposec_ukernel__4x2_scalar_int()
53 *o0++ = i0[0]; in xnn_x32_transposec_ukernel__4x2_scalar_int()
54 *o0++ = i1[0]; in xnn_x32_transposec_ukernel__4x2_scalar_int()
55 *o0++ = i2[0]; in xnn_x32_transposec_ukernel__4x2_scalar_int()
56 *o0++ = i3[0]; in xnn_x32_transposec_ukernel__4x2_scalar_int()
67 o0[0] = i0[0]; in xnn_x32_transposec_ukernel__4x2_scalar_int()
68 o0[1] = i1[0]; in xnn_x32_transposec_ukernel__4x2_scalar_int()
69 o0 += 2; in xnn_x32_transposec_ukernel__4x2_scalar_int()
[all …]
D4x2-scalar-float.c40 float* o0 = (float*) output; in xnn_x32_transposec_ukernel__4x2_scalar_float() local
41 float* o1 = (float*) ((uintptr_t) o0 + output_stride); in xnn_x32_transposec_ukernel__4x2_scalar_float()
45 o1 = o0; in xnn_x32_transposec_ukernel__4x2_scalar_float()
53 *o0++ = i0[0]; in xnn_x32_transposec_ukernel__4x2_scalar_float()
54 *o0++ = i1[0]; in xnn_x32_transposec_ukernel__4x2_scalar_float()
55 *o0++ = i2[0]; in xnn_x32_transposec_ukernel__4x2_scalar_float()
56 *o0++ = i3[0]; in xnn_x32_transposec_ukernel__4x2_scalar_float()
67 o0[0] = i0[0]; in xnn_x32_transposec_ukernel__4x2_scalar_float()
68 o0[1] = i1[0]; in xnn_x32_transposec_ukernel__4x2_scalar_float()
69 o0 += 2; in xnn_x32_transposec_ukernel__4x2_scalar_float()
[all …]
D4x1-scalar-int.c40 int* o0 = (int*) output; in xnn_x32_transposec_ukernel__4x1_scalar_int() local
45 *o0++ = i0[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
46 *o0++ = i1[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
47 *o0++ = i2[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
48 *o0++ = i3[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
56 o0[0] = i0[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
57 o0[1] = i1[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
58 o0 += 2; in xnn_x32_transposec_ukernel__4x1_scalar_int()
62 o0[0] = i[0]; in xnn_x32_transposec_ukernel__4x1_scalar_int()
69 o0 = (int*) ((uintptr_t) o0 + output_reset); in xnn_x32_transposec_ukernel__4x1_scalar_int()
D4x1-scalar-float.c40 float* o0 = (float*) output; in xnn_x32_transposec_ukernel__4x1_scalar_float() local
45 *o0++ = i0[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
46 *o0++ = i1[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
47 *o0++ = i2[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
48 *o0++ = i3[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
56 o0[0] = i0[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
57 o0[1] = i1[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
58 o0 += 2; in xnn_x32_transposec_ukernel__4x1_scalar_float()
62 o0[0] = i[0]; in xnn_x32_transposec_ukernel__4x1_scalar_float()
69 o0 = (float*) ((uintptr_t) o0 + output_reset); in xnn_x32_transposec_ukernel__4x1_scalar_float()
/external/XNNPACK/src/x64-transposec/gen/
D4x2-scalar-float.c40 double* o0 = (double*) output; in xnn_x64_transposec_ukernel__4x2_scalar_float() local
41 double* o1 = (double*) ((uintptr_t) o0 + output_stride); in xnn_x64_transposec_ukernel__4x2_scalar_float()
45 o1 = o0; in xnn_x64_transposec_ukernel__4x2_scalar_float()
53 *o0++ = i0[0]; in xnn_x64_transposec_ukernel__4x2_scalar_float()
54 *o0++ = i1[0]; in xnn_x64_transposec_ukernel__4x2_scalar_float()
55 *o0++ = i2[0]; in xnn_x64_transposec_ukernel__4x2_scalar_float()
56 *o0++ = i3[0]; in xnn_x64_transposec_ukernel__4x2_scalar_float()
67 o0[0] = i0[0]; in xnn_x64_transposec_ukernel__4x2_scalar_float()
68 o0[1] = i1[0]; in xnn_x64_transposec_ukernel__4x2_scalar_float()
69 o0 += 2; in xnn_x64_transposec_ukernel__4x2_scalar_float()
[all …]
D4x2-scalar-int.c40 int64_t* o0 = (int64_t*) output; in xnn_x64_transposec_ukernel__4x2_scalar_int() local
41 int64_t* o1 = (int64_t*) ((uintptr_t) o0 + output_stride); in xnn_x64_transposec_ukernel__4x2_scalar_int()
45 o1 = o0; in xnn_x64_transposec_ukernel__4x2_scalar_int()
53 *o0++ = i0[0]; in xnn_x64_transposec_ukernel__4x2_scalar_int()
54 *o0++ = i1[0]; in xnn_x64_transposec_ukernel__4x2_scalar_int()
55 *o0++ = i2[0]; in xnn_x64_transposec_ukernel__4x2_scalar_int()
56 *o0++ = i3[0]; in xnn_x64_transposec_ukernel__4x2_scalar_int()
67 o0[0] = i0[0]; in xnn_x64_transposec_ukernel__4x2_scalar_int()
68 o0[1] = i1[0]; in xnn_x64_transposec_ukernel__4x2_scalar_int()
69 o0 += 2; in xnn_x64_transposec_ukernel__4x2_scalar_int()
[all …]
/external/XNNPACK/src/x8-transposec/gen/
D4x4-scalar-int.c40 int8_t* o0 = (int8_t*) output; in xnn_x8_transposec_ukernel__4x4_scalar_int() local
41 int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride); in xnn_x8_transposec_ukernel__4x4_scalar_int()
47 o1 = o0; in xnn_x8_transposec_ukernel__4x4_scalar_int()
50 o2 = o0; in xnn_x8_transposec_ukernel__4x4_scalar_int()
53 o3 = o0; in xnn_x8_transposec_ukernel__4x4_scalar_int()
69 *o0++ = i0[0]; in xnn_x8_transposec_ukernel__4x4_scalar_int()
70 *o0++ = i1[0]; in xnn_x8_transposec_ukernel__4x4_scalar_int()
71 *o0++ = i2[0]; in xnn_x8_transposec_ukernel__4x4_scalar_int()
72 *o0++ = i3[0]; in xnn_x8_transposec_ukernel__4x4_scalar_int()
89 o0[0] = i0[0]; in xnn_x8_transposec_ukernel__4x4_scalar_int()
[all …]
D4x2-scalar-int.c40 int8_t* o0 = (int8_t*) output; in xnn_x8_transposec_ukernel__4x2_scalar_int() local
41 int8_t* o1 = (int8_t*) ((uintptr_t) o0 + output_stride); in xnn_x8_transposec_ukernel__4x2_scalar_int()
45 o1 = o0; in xnn_x8_transposec_ukernel__4x2_scalar_int()
53 *o0++ = i0[0]; in xnn_x8_transposec_ukernel__4x2_scalar_int()
54 *o0++ = i1[0]; in xnn_x8_transposec_ukernel__4x2_scalar_int()
55 *o0++ = i2[0]; in xnn_x8_transposec_ukernel__4x2_scalar_int()
56 *o0++ = i3[0]; in xnn_x8_transposec_ukernel__4x2_scalar_int()
67 o0[0] = i0[0]; in xnn_x8_transposec_ukernel__4x2_scalar_int()
68 o0[1] = i1[0]; in xnn_x8_transposec_ukernel__4x2_scalar_int()
69 o0 += 2; in xnn_x8_transposec_ukernel__4x2_scalar_int()
[all …]
/external/XNNPACK/src/x16-transposec/gen/
D4x4-scalar-int.c40 int16_t* o0 = (int16_t*) output; in xnn_x16_transposec_ukernel__4x4_scalar_int() local
41 int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride); in xnn_x16_transposec_ukernel__4x4_scalar_int()
47 o1 = o0; in xnn_x16_transposec_ukernel__4x4_scalar_int()
50 o2 = o0; in xnn_x16_transposec_ukernel__4x4_scalar_int()
53 o3 = o0; in xnn_x16_transposec_ukernel__4x4_scalar_int()
69 *o0++ = i0[0]; in xnn_x16_transposec_ukernel__4x4_scalar_int()
70 *o0++ = i1[0]; in xnn_x16_transposec_ukernel__4x4_scalar_int()
71 *o0++ = i2[0]; in xnn_x16_transposec_ukernel__4x4_scalar_int()
72 *o0++ = i3[0]; in xnn_x16_transposec_ukernel__4x4_scalar_int()
89 o0[0] = i0[0]; in xnn_x16_transposec_ukernel__4x4_scalar_int()
[all …]
D4x2-scalar-int.c40 int16_t* o0 = (int16_t*) output; in xnn_x16_transposec_ukernel__4x2_scalar_int() local
41 int16_t* o1 = (int16_t*) ((uintptr_t) o0 + output_stride); in xnn_x16_transposec_ukernel__4x2_scalar_int()
45 o1 = o0; in xnn_x16_transposec_ukernel__4x2_scalar_int()
53 *o0++ = i0[0]; in xnn_x16_transposec_ukernel__4x2_scalar_int()
54 *o0++ = i1[0]; in xnn_x16_transposec_ukernel__4x2_scalar_int()
55 *o0++ = i2[0]; in xnn_x16_transposec_ukernel__4x2_scalar_int()
56 *o0++ = i3[0]; in xnn_x16_transposec_ukernel__4x2_scalar_int()
67 o0[0] = i0[0]; in xnn_x16_transposec_ukernel__4x2_scalar_int()
68 o0[1] = i1[0]; in xnn_x16_transposec_ukernel__4x2_scalar_int()
69 o0 += 2; in xnn_x16_transposec_ukernel__4x2_scalar_int()
[all …]

12345678910>>...36