1 //===-- common.h ------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_COMMON_H_
10 #define SCUDO_COMMON_H_
11
12 #include "internal_defs.h"
13
14 #include "fuchsia.h"
15 #include "linux.h"
16 #include "trusty.h"
17
18 #include <stddef.h>
19 #include <string.h>
20
21 namespace scudo {
22
bit_cast(const Source & S)23 template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
24 static_assert(sizeof(Dest) == sizeof(Source), "");
25 Dest D;
26 memcpy(&D, &S, sizeof(D));
27 return D;
28 }
29
isPowerOfTwo(uptr X)30 inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
31
roundUp(uptr X,uptr Boundary)32 inline constexpr uptr roundUp(uptr X, uptr Boundary) {
33 DCHECK(isPowerOfTwo(Boundary));
34 return (X + Boundary - 1) & ~(Boundary - 1);
35 }
roundUpSlow(uptr X,uptr Boundary)36 inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
37 return ((X + Boundary - 1) / Boundary) * Boundary;
38 }
39
roundDown(uptr X,uptr Boundary)40 inline constexpr uptr roundDown(uptr X, uptr Boundary) {
41 DCHECK(isPowerOfTwo(Boundary));
42 return X & ~(Boundary - 1);
43 }
roundDownSlow(uptr X,uptr Boundary)44 inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
45 return (X / Boundary) * Boundary;
46 }
47
isAligned(uptr X,uptr Alignment)48 inline constexpr bool isAligned(uptr X, uptr Alignment) {
49 DCHECK(isPowerOfTwo(Alignment));
50 return (X & (Alignment - 1)) == 0;
51 }
isAlignedSlow(uptr X,uptr Alignment)52 inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
53 return X % Alignment == 0;
54 }
55
Min(T A,T B)56 template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
57
Max(T A,T B)58 template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
59
Swap(T & A,T & B)60 template <class T> void Swap(T &A, T &B) {
61 T Tmp = A;
62 A = B;
63 B = Tmp;
64 }
65
getMostSignificantSetBitIndex(uptr X)66 inline uptr getMostSignificantSetBitIndex(uptr X) {
67 DCHECK_NE(X, 0U);
68 return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
69 }
70
roundUpPowerOfTwo(uptr Size)71 inline uptr roundUpPowerOfTwo(uptr Size) {
72 DCHECK(Size);
73 if (isPowerOfTwo(Size))
74 return Size;
75 const uptr Up = getMostSignificantSetBitIndex(Size);
76 DCHECK_LT(Size, (1UL << (Up + 1)));
77 DCHECK_GT(Size, (1UL << Up));
78 return 1UL << (Up + 1);
79 }
80
getLeastSignificantSetBitIndex(uptr X)81 inline uptr getLeastSignificantSetBitIndex(uptr X) {
82 DCHECK_NE(X, 0U);
83 return static_cast<uptr>(__builtin_ctzl(X));
84 }
85
getLog2(uptr X)86 inline uptr getLog2(uptr X) {
87 DCHECK(isPowerOfTwo(X));
88 return getLeastSignificantSetBitIndex(X);
89 }
90
getRandomU32(u32 * State)91 inline u32 getRandomU32(u32 *State) {
92 // ANSI C linear congruential PRNG (16-bit output).
93 // return (*State = *State * 1103515245 + 12345) >> 16;
94 // XorShift (32-bit output).
95 *State ^= *State << 13;
96 *State ^= *State >> 17;
97 *State ^= *State << 5;
98 return *State;
99 }
100
getRandomModN(u32 * State,u32 N)101 inline u32 getRandomModN(u32 *State, u32 N) {
102 return getRandomU32(State) % N; // [0, N)
103 }
104
shuffle(T * A,u32 N,u32 * RandState)105 template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
106 if (N <= 1)
107 return;
108 u32 State = *RandState;
109 for (u32 I = N - 1; I > 0; I--)
110 Swap(A[I], A[getRandomModN(&State, I + 1)]);
111 *RandState = State;
112 }
113
114 // Hardware specific inlinable functions.
115
yieldProcessor(UNUSED u8 Count)116 inline void yieldProcessor(UNUSED u8 Count) {
117 #if defined(__i386__) || defined(__x86_64__)
118 __asm__ __volatile__("" ::: "memory");
119 for (u8 I = 0; I < Count; I++)
120 __asm__ __volatile__("pause");
121 #elif defined(__aarch64__) || defined(__arm__)
122 __asm__ __volatile__("" ::: "memory");
123 for (u8 I = 0; I < Count; I++)
124 __asm__ __volatile__("yield");
125 #endif
126 __asm__ __volatile__("" ::: "memory");
127 }
128
129 // Platform specific functions.
130
131 extern uptr PageSizeCached;
132 uptr getPageSizeSlow();
getPageSizeCached()133 inline uptr getPageSizeCached() {
134 // Bionic uses a hardcoded value.
135 if (SCUDO_ANDROID)
136 return 4096U;
137 if (LIKELY(PageSizeCached))
138 return PageSizeCached;
139 return getPageSizeSlow();
140 }
141
142 // Returns 0 if the number of CPUs could not be determined.
143 u32 getNumberOfCPUs();
144
145 const char *getEnv(const char *Name);
146
147 uptr GetRSS();
148
149 u64 getMonotonicTime();
150 // Gets the time faster but with less accuracy. Can call getMonotonicTime
151 // if no fast version is available.
152 u64 getMonotonicTimeFast();
153
154 u32 getThreadID();
155
156 // Our randomness gathering function is limited to 256 bytes to ensure we get
157 // as many bytes as requested, and avoid interruptions (on Linux).
158 constexpr uptr MaxRandomLength = 256U;
159 bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
160
161 // Platform memory mapping functions.
162
163 #define MAP_ALLOWNOMEM (1U << 0)
164 #define MAP_NOACCESS (1U << 1)
165 #define MAP_RESIZABLE (1U << 2)
166 #define MAP_MEMTAG (1U << 3)
167 #define MAP_PRECOMMIT (1U << 4)
168
169 // Our platform memory mapping use is restricted to 3 scenarios:
170 // - reserve memory at a random address (MAP_NOACCESS);
171 // - commit memory in a previously reserved space;
172 // - commit memory at a random address.
173 // As such, only a subset of parameters combinations is valid, which is checked
174 // by the function implementation. The Data parameter allows to pass opaque
175 // platform specific data to the function.
176 // Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
177 void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
178 MapPlatformData *Data = nullptr);
179
180 // Indicates that we are getting rid of the whole mapping, which might have
181 // further consequences on Data, depending on the platform.
182 #define UNMAP_ALL (1U << 0)
183
184 void unmap(void *Addr, uptr Size, uptr Flags = 0,
185 MapPlatformData *Data = nullptr);
186
187 void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
188 MapPlatformData *Data = nullptr);
189
190 void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
191 MapPlatformData *Data = nullptr);
192
193 // Internal map & unmap fatal error. This must not call map(). SizeIfOOM shall
194 // hold the requested size on an out-of-memory error, 0 otherwise.
195 void NORETURN dieOnMapUnmapError(uptr SizeIfOOM = 0);
196
197 // Logging related functions.
198
199 void setAbortMessage(const char *Message);
200
201 struct BlockInfo {
202 uptr BlockBegin;
203 uptr BlockSize;
204 uptr RegionBegin;
205 uptr RegionEnd;
206 };
207
208 enum class Option : u8 {
209 ReleaseInterval, // Release to OS interval in milliseconds.
210 MemtagTuning, // Whether to tune tagging for UAF or overflow.
211 ThreadDisableMemInit, // Whether to disable automatic heap initialization and,
212 // where possible, memory tagging, on this thread.
213 MaxCacheEntriesCount, // Maximum number of blocks that can be cached.
214 MaxCacheEntrySize, // Maximum size of a block that can be cached.
215 MaxTSDsCount, // Number of usable TSDs for the shared registry.
216 };
217
218 enum class ReleaseToOS : u8 {
219 Normal, // Follow the normal rules for releasing pages to the OS
220 Force, // Force release pages to the OS, but avoid cases that take too long.
221 ForceAll, // Force release every page possible regardless of how long it will
222 // take.
223 };
224
225 constexpr unsigned char PatternFillByte = 0xAB;
226
227 enum FillContentsMode {
228 NoFill = 0,
229 ZeroFill = 1,
230 PatternOrZeroFill = 2 // Pattern fill unless the memory is known to be
231 // zero-initialized already.
232 };
233
234 } // namespace scudo
235
236 #endif // SCUDO_COMMON_H_
237