• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2006 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkUtils_DEFINED
9 #define SkUtils_DEFINED
10 
11 #include "include/private/base/SkAttributes.h"
12 #include "include/private/base/SkAPI.h"
13 
14 #include <cstring>
15 #include <type_traits> // is_trivially_copyable
16 
17 namespace SkHexadecimalDigits {
18     extern const char gUpper[16];  // 0-9A-F
19     extern const char gLower[16];  // 0-9a-f
20 }  // namespace SkHexadecimalDigits
21 
22 ///////////////////////////////////////////////////////////////////////////////
23 
24 // If T is an 8-byte GCC or Clang vector extension type, it would naturally pass or return in the
25 // MMX mm0 register on 32-bit x86 builds.  This has the fun side effect of clobbering any state in
26 // the x87 st0 register.  (There is no ABI governing who should preserve mm?/st? registers, so no
27 // one does!)
28 //
29 // We force-inline sk_unaligned_load() and sk_unaligned_store() to avoid that, making them safe to
30 // use for all types on all platforms, thus solving the problem once and for all!
31 
32 // A separate problem exists with 32-bit x86. The default calling convention returns values in
33 // ST0 (the x87 FPU). Unfortunately, doing so can mutate some bit patterns (signaling NaNs
34 // become quiet). If you're using these functions to pass data around as floats, but it's actually
35 // integers, that can be bad -- raster pipeline does this.
36 //
37 // With GCC and Clang, the always_inline attribute ensures we don't have a problem. MSVC, though,
38 // ignores __forceinline in debug builds, so the return-via-ST0 is always present. Switching to
39 // __vectorcall changes the functions to return in xmm0.
40 #if defined(_MSC_VER) && defined(_M_IX86)
41     #define SK_FP_SAFE_ABI __vectorcall
42 #else
43     #define SK_FP_SAFE_ABI
44 #endif
45 
46 template <typename T, typename P>
sk_unaligned_load(const P * ptr)47 static SK_ALWAYS_INLINE T SK_FP_SAFE_ABI sk_unaligned_load(const P* ptr) {
48     static_assert(std::is_trivially_copyable_v<P> || std::is_void_v<P>);
49     static_assert(std::is_trivially_copyable_v<T>);
50     T val;
51     // gcc's class-memaccess sometimes triggers when:
52     // - `T` is trivially copyable but
53     // - `T` is non-trivial (e.g. at least one eligible default constructor is
54     //    non-trivial).
55     // Use `reinterpret_cast<const void*>` to explicit suppress this warning; a
56     // trivially copyable type is safe to memcpy from/to.
57     memcpy(&val, static_cast<const void*>(ptr), sizeof(val));
58     return val;
59 }
60 
61 template <typename T, typename P>
sk_unaligned_store(P * ptr,T val)62 static SK_ALWAYS_INLINE void SK_FP_SAFE_ABI sk_unaligned_store(P* ptr, T val) {
63     static_assert(std::is_trivially_copyable<T>::value);
64     memcpy(ptr, &val, sizeof(val));
65 }
66 
67 // Copy the bytes from src into an instance of type Dst and return it.
68 template <typename Dst, typename Src>
sk_bit_cast(const Src & src)69 static SK_ALWAYS_INLINE Dst SK_FP_SAFE_ABI sk_bit_cast(const Src& src) {
70     static_assert(sizeof(Dst) == sizeof(Src));
71     static_assert(std::is_trivially_copyable<Dst>::value);
72     static_assert(std::is_trivially_copyable<Src>::value);
73     return sk_unaligned_load<Dst>(&src);
74 }
75 
76 #undef SK_FP_SAFE_ABI
77 
78 // vma cache
79 void SK_API SkSetVmaCacheFlag(bool flag);
80 
81 bool SkGetMemoryOptimizedFlag();
82 bool SkGetVmaCacheFlag();
83 int SkGetVmaBlockSizeMB();
84 int SkGetNeedCachedMemroySize();
85 bool SkGetVmaDefragmentOn();
86 size_t SkGetVmaBlockCountMax();
87 bool SkGetVmaDebugFlag();
88 bool SkGetPreAllocFlag();
89 size_t SkGetPreAllocDelay();
90 
91 #endif
92