Home
last modified time | relevance | path

Searched refs:__fp16 (Results 1 – 25 of 45) sorted by relevance

12

/external/clang/test/Sema/
Dfp16-sema.c4 extern void f (__fp16); // expected-error {{parameters cannot have __fp16 type; did you forget * ?}}
5 extern void g (__fp16 *);
7 extern void (*pf) (__fp16); // expected-error {{parameters cannot have __fp16 type; did you forget…
8 extern void (*pg) (__fp16*);
10 typedef void(*tf) (__fp16); // expected-error {{parameters cannot have __fp16 type; did you forget…
11 typedef void(*tg) (__fp16*);
14 __fp16 a; { // expected-error {{parameters cannot have __fp16 type; did you forget * ?}} in kf()
18 __fp16 *a; { in kg()
22 extern __fp16 f1 (void); // expected-error {{function return value cannot have __fp16 type; did you…
23 extern __fp16 *g1 (void);
[all …]
Drenderscript.rs19 __fp16 fp16_return();
24 void fp16_arg(__fp16 p);
/external/XNNPACK/src/f16-gemm/gen/
D8x8-neonfp16arith-ld64.c37 assert(kc % sizeof(__fp16) == 0); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
39 const __fp16* a0 = a; in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
40 __fp16* c0 = c; in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
41 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
42 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
47 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
48 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
53 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
54 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
59 const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
[all …]
D6x8-neonfp16arith-ld64.c37 assert(kc % sizeof(__fp16) == 0); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
39 const __fp16* a0 = a; in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
40 __fp16* c0 = c; in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
41 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
42 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
47 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
48 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
53 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
54 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
59 const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
[all …]
D4x8-neonfp16arith-ld64.c37 assert(kc % sizeof(__fp16) == 0); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
39 const __fp16* a0 = a; in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
40 __fp16* c0 = c; in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
41 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
42 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
47 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
48 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
53 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
54 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
67 while (k >= 4 * sizeof(__fp16)) { in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
[all …]
/external/clang/test/CodeGen/
Dfp16-ops.c16 volatile __fp16 h0 = 0.0, h1 = 1.0, h2;
84 h1 = h0 * (__fp16) -2.0f; in foo()
114 h1 = (h0 / (__fp16) -2.0f); in foo()
144 h1 = ((__fp16)-2.0 + h0); in foo()
174 h1 = ((__fp16)-2.0f - h0); in foo()
202 test = (h2 < (__fp16)42.0); in foo()
231 test = ((__fp16)42.0 > h2); in foo()
260 test = (h2 <= (__fp16)42.0); in foo()
290 test = (h0 >= (__fp16)-2.0); in foo()
319 test = (h1 == (__fp16)1.0); in foo()
[all …]
Darm-fp16-arguments.c5 __fp16 g;
7 void t1(__fp16 a) { g = a; } in t1()
17 __fp16 t2() { return g; } in t2()
Darm64-aapcs-arguments.c46 __fp16 test_half(__fp16 A) { } in test_half()
50 struct HFA_half { __fp16 a[4]; };
Dcatch-undef-behavior.c231 void int_fp16_overflow(int n, __fp16 *p) { in int_fp16_overflow()
297 signed char fp16_char_overflow(__fp16 *p) { in fp16_char_overflow()
/external/XNNPACK/src/f16-spmm/gen/
D8x1-neonfp16arith-unroll2.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
51 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff0); in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
55 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff1); in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
65 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
83 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_8x1__neonfp16arith_unroll2()
[all …]
D8x1-neonfp16arith.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
49 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
67 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
78 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
93 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_8x1__neonfp16arith()
[all …]
D16x1-neonfp16arith.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
51 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
74 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
85 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
100 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_16x1__neonfp16arith()
[all …]
D16x1-neonfp16arith-unroll2.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
54 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff0); in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
60 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff1); in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
74 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
97 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_16x1__neonfp16arith_unroll2()
[all …]
D24x1-neonfp16arith-unroll2.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
57 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff0); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
65 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff1); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
83 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
111 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2()
[all …]
D32x1-neonfp16arith.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
55 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
88 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
101 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
120 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_32x1__neonfp16arith()
[all …]
D24x1-neonfp16arith.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
53 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
81 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
94 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
113 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_24x1__neonfp16arith()
[all …]
D32x1-neonfp16arith-unroll2.c29 const __fp16*restrict a = input; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
30 __fp16*restrict c = output; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
32 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
33 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
34 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
38 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
60 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff0); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
70 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff1); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
92 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
125 const __fp16*restrict w = weights; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2()
[all …]
/external/clang/test/CodeGenCXX/
Dfp16-mangle.cpp5 template <> int S<__fp16, __fp16>::i = 3;
8 void f (__fp16 *x) { } in f()
11 void g (__fp16 *x, __fp16 *y) { } in g()
Dfp16-overload.cpp6 __fp16 a;
/external/XNNPACK/src/f16-gemm/
Dneonfp16arith-ld64.c.in35 assert(kc % sizeof(__fp16) == 0);
37 const __fp16* a0 = a;
38 __fp16* c0 = c;
40 const __fp16* a${M} = (const __fp16*) ((uintptr_t) a${M-1} + a_stride);
41 __fp16* c${M} = (__fp16*) ((uintptr_t) c${M-1} + cm_stride);
66 while (k >= 4 * sizeof(__fp16)) {
86 k -= 4 * sizeof(__fp16);
100 k -= sizeof(__fp16);
104 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale);
109 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max);
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Dneon-inline-asm-16-bit-fp.ll4 ; __fp16 test(__fp16 a1, __fp16 a2) {
5 ; __fp16 res0;
Dfp16-vector-shuffle.ll160 ; float16x4_t dup_64(__fp16 a) { return vdup_n_f16(a); }
172 ; float16x8_t dup_128(__fp16 a) { return vdupq_n_f16(a); }
252 ; float16x4_t set_lane_64(float16x4_t a, __fp16 b) { return vset_lane_f16(b, a, 2); }
266 ; float16x8_t set_lane_128(float16x8_t a, __fp16 b) { return vsetq_lane_f16(b, a, 2); }
279 ; __fp16 get_lane_64(float16x4_t a) { return vget_lane_f16(a, 2); }
291 ; __fp16 get_lane_128(float16x8_t a) { return vgetq_lane_f16(a, 2); }
/external/XNNPACK/src/f16-spmm/
Dneonfp16arith.c.in27 const __fp16*restrict a = input;
28 __fp16*restrict c = output;
30 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale);
31 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max);
32 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min);
36 const __fp16*restrict w = weights;
58 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff${K});
78 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff);
104 const __fp16*restrict w = weights;
129 a = (const __fp16*restrict) ((uintptr_t) a + (uintptr_t) diff);
/external/arm-neon-tests/
Dcompute_ref_data.c151 __fp16 buffer_float16x4[4] = {-16, -15, -14, -13};
182 __fp16 buffer_float16x8[8] = {-16, -15, -14, -13, -12, -11, -10, -9};
219 __fp16 buffer_dup_float16x4[4] = {-16, -15, -14, -13};
250 __fp16 buffer_dup_float16x8[8] = {-16, -15, -14, -13, -12, -11, -10, -9};
285 __fp16 buffer_vld2_float16x4x2[4*2] = {-16, -15, -14, -13, -12, -11, -10, -9};
318 __fp16 buffer_vld2_float16x8x2[8*2] = {-16, -15, -14, -13, -12, -11, -10, -9,
358 __fp16 buffer_vld3_float16x4x3[4*3] = {-16, -15, -14, -13, -12, -11, -10, -9,
394 __fp16 buffer_vld3_float16x8x3[8*3] = {-16, -15, -14, -13, -12, -11, -10, -9,
439 __fp16 buffer_vld4_float16x4x4[4*4] = {-16, -15, -14, -13, -12, -11, -10, -9,
477 __fp16 buffer_vld4_float16x8x4[8*4] = {-16, -15, -14, -13, -12, -11, -10, -9,
[all …]
/external/llvm/test/CodeGen/AArch64/
Dfp16-vector-shuffle.ll160 ; float16x4_t dup_64(__fp16 a) { return vdup_n_f16(a); }
172 ; float16x8_t dup_128(__fp16 a) { return vdupq_n_f16(a); }
252 ; float16x4_t set_lane_64(float16x4_t a, __fp16 b) { return vset_lane_f16(b, a, 2); }
266 ; float16x8_t set_lane_128(float16x8_t a, __fp16 b) { return vsetq_lane_f16(b, a, 2); }
279 ; __fp16 get_lane_64(float16x4_t a) { return vget_lane_f16(a, 2); }
291 ; __fp16 get_lane_128(float16x8_t a) { return vgetq_lane_f16(a, 2); }

12