Home
last modified time | relevance | path

Searched refs:__m64 (Results 1 – 18 of 18) sorted by relevance

/external/clang/test/CodeGen/
Dmmx-builtins.c10 __m64 test1(__m64 a, __m64 b) { in test1()
15 __m64 test2(__m64 a, __m64 b) { in test2()
20 __m64 test3(__m64 a, __m64 b) { in test3()
25 __m64 test4(__m64 a, __m64 b) { in test4()
30 __m64 test5(__m64 a, __m64 b) { in test5()
35 __m64 test6(__m64 a, __m64 b) { in test6()
40 __m64 test7(__m64 a, __m64 b) { in test7()
45 __m64 test8(__m64 a, __m64 b) { in test8()
50 __m64 test9(__m64 a, __m64 b) { in test9()
55 __m64 test10(__m64 a, __m64 b) { in test10()
[all …]
D3dnow-builtins.c8 __m64 test_m_pavgusb(__m64 m1, __m64 m2) { in test_m_pavgusb()
14 __m64 test_m_pf2id(__m64 m) { in test_m_pf2id()
20 __m64 test_m_pfacc(__m64 m1, __m64 m2) { in test_m_pfacc()
26 __m64 test_m_pfadd(__m64 m1, __m64 m2) { in test_m_pfadd()
32 __m64 test_m_pfcmpeq(__m64 m1, __m64 m2) { in test_m_pfcmpeq()
38 __m64 test_m_pfcmpge(__m64 m1, __m64 m2) { in test_m_pfcmpge()
44 __m64 test_m_pfcmpgt(__m64 m1, __m64 m2) { in test_m_pfcmpgt()
50 __m64 test_m_pfmax(__m64 m1, __m64 m2) { in test_m_pfmax()
56 __m64 test_m_pfmin(__m64 m1, __m64 m2) { in test_m_pfmin()
62 __m64 test_m_pfmul(__m64 m1, __m64 m2) { in test_m_pfmul()
[all …]
Dasm-inout.c43 typedef long long __m64 __attribute__((__vector_size__(8))); typedef
44 __m64 test5(__m64 __A, __m64 __B) { in test5()
Dvector.c30 __m64 *p = (__m64 *)array; in test4()
32 __m64 accum = _mm_setzero_si64(); in test4()
37 __m64 accum2 = _mm_unpackhi_pi32(accum, accum); in test4()
Dmmx-inline-asm.c8 __m64 vfill = _mm_cvtsi64_m64(fill); in foo()
9 __m64 v1, v2, v3, v4, v5, v6, v7; in foo()
Dmmx-shift-with-immediate.c4 void shift(__m64 a, __m64 b, int c) { in shift()
/external/clang/lib/Headers/
Dmmintrin.h31 typedef long long __m64 __attribute__((__vector_size__(8))); typedef
43 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
46 return (__m64)__builtin_ia32_vec_init_v2si(__i, 0); in _mm_cvtsi32_si64()
50 _mm_cvtsi64_si32(__m64 __m) in _mm_cvtsi64_si32()
55 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
58 return (__m64)__i; in _mm_cvtsi64_m64()
62 _mm_cvtm64_si64(__m64 __m) in _mm_cvtm64_si64()
67 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
68 _mm_packs_pi16(__m64 __m1, __m64 __m2) in _mm_packs_pi16()
70 return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); in _mm_packs_pi16()
[all …]
Dmm3dnow.h36 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
37 _m_pavgusb(__m64 __m1, __m64 __m2) { in _m_pavgusb()
38 return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2); in _m_pavgusb()
41 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
42 _m_pf2id(__m64 __m) { in _m_pf2id()
43 return (__m64)__builtin_ia32_pf2id((__v2sf)__m); in _m_pf2id()
46 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
47 _m_pfacc(__m64 __m1, __m64 __m2) { in _m_pfacc()
48 return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2); in _m_pfacc()
51 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
[all …]
Dtmmintrin.h33 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
34 _mm_abs_pi8(__m64 a) in _mm_abs_pi8()
36 return (__m64)__builtin_ia32_pabsb((__v8qi)a); in _mm_abs_pi8()
45 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
46 _mm_abs_pi16(__m64 a) in _mm_abs_pi16()
48 return (__m64)__builtin_ia32_pabsw((__v4hi)a); in _mm_abs_pi16()
57 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
58 _mm_abs_pi32(__m64 a) in _mm_abs_pi32()
60 return (__m64)__builtin_ia32_pabsd((__v2si)a); in _mm_abs_pi32()
75 __m64 __a = (a); \
[all …]
Dxmmintrin.h417 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
420 return (__m64)__builtin_ia32_cvtps2pi(a); in _mm_cvtps_pi32()
423 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
447 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
450 return (__m64)__builtin_ia32_cvttps2pi(a); in _mm_cvttps_pi32()
453 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
484 _mm_cvtpi32_ps(__m128 a, __m64 b) in _mm_cvtpi32_ps()
490 _mm_cvt_pi2ps(__m128 a, __m64 b) in _mm_cvt_pi2ps()
502 _mm_loadh_pi(__m128 a, const __m64 *p) in _mm_loadh_pi()
514 _mm_loadl_pi(__m128 a, const __m64 *p) in _mm_loadl_pi()
[all …]
Demmintrin.h434 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
437 return (__m64)__builtin_ia32_cvtpd2pi(a); in _mm_cvtpd_pi32()
440 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
443 return (__m64)__builtin_ia32_cvttpd2pi(a); in _mm_cvttpd_pi32()
447 _mm_cvtpi32_pd(__m64 a) in _mm_cvtpi32_pd()
632 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
633 _mm_add_si64(__m64 a, __m64 b) in _mm_add_si64()
728 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
729 _mm_mul_su32(__m64 a, __m64 b) in _mm_mul_su32()
764 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
[all …]
/external/clang/test/Sema/
Dx86-builtin-palignr.c6 __m64 test1(__m64 a, __m64 b, int c) { in test1()
Dexprs.c136 typedef long long __m64 __attribute__((__vector_size__(8))); in test14() typedef
141 __m64 mask = (__m64)((__v4hi)a > (__v4hi)a); in test14()
/external/opencv/cv/src/
Dcvsmooth.cpp585 *(__m64*) &y[0] = _mm_add_pi16( *(__m64*) &y[0], *(__m64*) &x[0] ); in histogram_add()
586 *(__m64*) &y[4] = _mm_add_pi16( *(__m64*) &y[4], *(__m64*) &x[4] ); in histogram_add()
587 *(__m64*) &y[8] = _mm_add_pi16( *(__m64*) &y[8], *(__m64*) &x[8] ); in histogram_add()
588 *(__m64*) &y[12] = _mm_add_pi16( *(__m64*) &y[12], *(__m64*) &x[12] ); in histogram_add()
620 *(__m64*) &y[0] = _mm_sub_pi16( *(__m64*) &y[0], *(__m64*) &x[0] ); in histogram_sub()
621 *(__m64*) &y[4] = _mm_sub_pi16( *(__m64*) &y[4], *(__m64*) &x[4] ); in histogram_sub()
622 *(__m64*) &y[8] = _mm_sub_pi16( *(__m64*) &y[8], *(__m64*) &x[8] ); in histogram_sub()
623 *(__m64*) &y[12] = _mm_sub_pi16( *(__m64*) &y[12], *(__m64*) &x[12] ); in histogram_sub()
/external/qemu/distrib/sdl-1.2.12/src/video/
DSDL_blit_A.c456 __m64 src1, src2, dst1, dst2, lmask, hmask, dsta; in BlitRGBtoRGBSurfaceAlpha128MMX()
473 dst1 = *(__m64*)dstp; /* 2 x dst -> dst1(ARGBARGB) */ in BlitRGBtoRGBSurfaceAlpha128MMX()
476 src1 = *(__m64*)srcp; /* 2 x src -> src1(ARGBARGB) */ in BlitRGBtoRGBSurfaceAlpha128MMX()
489 *(__m64*)dstp = dst1; /* dst1 -> 2 x dst pixels */ in BlitRGBtoRGBSurfaceAlpha128MMX()
520 __m64 src1, src2, dst1, dst2, mm_alpha, mm_zero, dsta; in BlitRGBtoRGBSurfaceAlphaMMX()
559 src1 = *(__m64*)srcp; /* 2 x src -> src1(ARGBARGB)*/ in BlitRGBtoRGBSurfaceAlphaMMX()
564 dst1 = *(__m64*)dstp;/* 2 x dst -> dst1(ARGBARGB) */ in BlitRGBtoRGBSurfaceAlphaMMX()
582 *(__m64*)dstp = dst1; /* dst1 -> 2 x pixel */ in BlitRGBtoRGBSurfaceAlphaMMX()
609 __m64 src1, dst1, mm_alpha, mm_zero, dmask; in BlitRGBtoRGBPixelAlphaMMX()
613 dmask = *(__m64*) &multmask; /* dst alpha mask -> dmask */ in BlitRGBtoRGBPixelAlphaMMX()
[all …]
/external/llvm/lib/Target/X86/
DREADME-MMX.txt48 __m64 t() {
DX86CallingConv.td101 // The X86-Win64 calling convention always returns __m64 values in RAX.
266 // The first 3 __m64 vector arguments are passed in mmx registers if the
297 // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
/external/qemu/android/skin/
Dargb.h20 typedef __m64 mmx_t;