1 //===-- memtag.h ------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_MEMTAG_H_
10 #define SCUDO_MEMTAG_H_
11
12 #include "internal_defs.h"
13
14 #if SCUDO_LINUX
15 #include <sys/auxv.h>
16 #include <sys/prctl.h>
17 #endif
18
19 namespace scudo {
20
21 #if defined(__aarch64__) || defined(SCUDO_FUZZ)
22
23 // We assume that Top-Byte Ignore is enabled if the architecture supports memory
24 // tagging. Not all operating systems enable TBI, so we only claim architectural
25 // support for memory tagging if the operating system enables TBI.
26 #if SCUDO_LINUX && !defined(SCUDO_DISABLE_TBI)
archSupportsMemoryTagging()27 inline constexpr bool archSupportsMemoryTagging() { return true; }
28 #else
29 inline constexpr bool archSupportsMemoryTagging() { return false; }
30 #endif
31
archMemoryTagGranuleSize()32 inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
33
untagPointer(uptr Ptr)34 inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
35
extractTag(uptr Ptr)36 inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
37
38 #else
39
40 inline constexpr bool archSupportsMemoryTagging() { return false; }
41
42 inline uptr archMemoryTagGranuleSize() {
43 UNREACHABLE("memory tagging not supported");
44 }
45
46 inline uptr untagPointer(uptr Ptr) {
47 (void)Ptr;
48 UNREACHABLE("memory tagging not supported");
49 }
50
51 inline uint8_t extractTag(uptr Ptr) {
52 (void)Ptr;
53 UNREACHABLE("memory tagging not supported");
54 }
55
56 #endif
57
58 #if defined(__aarch64__)
59
60 #if SCUDO_LINUX
61
systemSupportsMemoryTagging()62 inline bool systemSupportsMemoryTagging() {
63 #ifndef HWCAP2_MTE
64 #define HWCAP2_MTE (1 << 18)
65 #endif
66 return getauxval(AT_HWCAP2) & HWCAP2_MTE;
67 }
68
systemDetectsMemoryTagFaultsTestOnly()69 inline bool systemDetectsMemoryTagFaultsTestOnly() {
70 #ifndef PR_GET_TAGGED_ADDR_CTRL
71 #define PR_GET_TAGGED_ADDR_CTRL 56
72 #endif
73 #ifndef PR_MTE_TCF_SHIFT
74 #define PR_MTE_TCF_SHIFT 1
75 #endif
76 #ifndef PR_MTE_TCF_NONE
77 #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
78 #endif
79 #ifndef PR_MTE_TCF_MASK
80 #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
81 #endif
82 return (static_cast<unsigned long>(
83 prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &
84 PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
85 }
86
87 #else // !SCUDO_LINUX
88
systemSupportsMemoryTagging()89 inline bool systemSupportsMemoryTagging() { return false; }
90
systemDetectsMemoryTagFaultsTestOnly()91 inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
92
93 #endif // SCUDO_LINUX
94
disableMemoryTagChecksTestOnly()95 inline void disableMemoryTagChecksTestOnly() {
96 __asm__ __volatile__(
97 R"(
98 .arch_extension memtag
99 msr tco, #1
100 )");
101 }
102
enableMemoryTagChecksTestOnly()103 inline void enableMemoryTagChecksTestOnly() {
104 __asm__ __volatile__(
105 R"(
106 .arch_extension memtag
107 msr tco, #0
108 )");
109 }
110
111 class ScopedDisableMemoryTagChecks {
112 size_t PrevTCO;
113
114 public:
ScopedDisableMemoryTagChecks()115 ScopedDisableMemoryTagChecks() {
116 __asm__ __volatile__(
117 R"(
118 .arch_extension memtag
119 mrs %0, tco
120 msr tco, #1
121 )"
122 : "=r"(PrevTCO));
123 }
124
~ScopedDisableMemoryTagChecks()125 ~ScopedDisableMemoryTagChecks() {
126 __asm__ __volatile__(
127 R"(
128 .arch_extension memtag
129 msr tco, %0
130 )"
131 :
132 : "r"(PrevTCO));
133 }
134 };
135
selectRandomTag(uptr Ptr,uptr ExcludeMask)136 inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
137 uptr TaggedPtr;
138 __asm__ __volatile__(
139 R"(
140 .arch_extension memtag
141 irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
142 )"
143 : [TaggedPtr] "=r"(TaggedPtr)
144 : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
145 return TaggedPtr;
146 }
147
addFixedTag(uptr Ptr,uptr Tag)148 inline uptr addFixedTag(uptr Ptr, uptr Tag) { return Ptr | (Tag << 56); }
149
storeTags(uptr Begin,uptr End)150 inline uptr storeTags(uptr Begin, uptr End) {
151 DCHECK(Begin % 16 == 0);
152 uptr LineSize, Next, Tmp;
153 __asm__ __volatile__(
154 R"(
155 .arch_extension memtag
156
157 // Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
158 // of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
159 // indicates that the DC instructions are unavailable.
160 DCZID .req %[Tmp]
161 mrs DCZID, dczid_el0
162 tbnz DCZID, #4, 3f
163 and DCZID, DCZID, #15
164 mov %[LineSize], #4
165 lsl %[LineSize], %[LineSize], DCZID
166 .unreq DCZID
167
168 // Our main loop doesn't handle the case where we don't need to perform any
169 // DC GZVA operations. If the size of our tagged region is less than
170 // twice the cache line size, bail out to the slow path since it's not
171 // guaranteed that we'll be able to do a DC GZVA.
172 Size .req %[Tmp]
173 sub Size, %[End], %[Cur]
174 cmp Size, %[LineSize], lsl #1
175 b.lt 3f
176 .unreq Size
177
178 LineMask .req %[Tmp]
179 sub LineMask, %[LineSize], #1
180
181 // STZG until the start of the next cache line.
182 orr %[Next], %[Cur], LineMask
183 1:
184 stzg %[Cur], [%[Cur]], #16
185 cmp %[Cur], %[Next]
186 b.lt 1b
187
188 // DC GZVA cache lines until we have no more full cache lines.
189 bic %[Next], %[End], LineMask
190 .unreq LineMask
191 2:
192 dc gzva, %[Cur]
193 add %[Cur], %[Cur], %[LineSize]
194 cmp %[Cur], %[Next]
195 b.lt 2b
196
197 // STZG until the end of the tagged region. This loop is also used to handle
198 // slow path cases.
199 3:
200 cmp %[Cur], %[End]
201 b.ge 4f
202 stzg %[Cur], [%[Cur]], #16
203 b 3b
204
205 4:
206 )"
207 : [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next),
208 [Tmp] "=&r"(Tmp)
209 : [End] "r"(End)
210 : "memory");
211 return Begin;
212 }
213
storeTag(uptr Ptr)214 inline void storeTag(uptr Ptr) {
215 __asm__ __volatile__(R"(
216 .arch_extension memtag
217 stg %0, [%0]
218 )"
219 :
220 : "r"(Ptr)
221 : "memory");
222 }
223
loadTag(uptr Ptr)224 inline uptr loadTag(uptr Ptr) {
225 uptr TaggedPtr = Ptr;
226 __asm__ __volatile__(
227 R"(
228 .arch_extension memtag
229 ldg %0, [%0]
230 )"
231 : "+r"(TaggedPtr)
232 :
233 : "memory");
234 return TaggedPtr;
235 }
236
237 #else
238
systemSupportsMemoryTagging()239 inline bool systemSupportsMemoryTagging() {
240 UNREACHABLE("memory tagging not supported");
241 }
242
systemDetectsMemoryTagFaultsTestOnly()243 inline bool systemDetectsMemoryTagFaultsTestOnly() {
244 UNREACHABLE("memory tagging not supported");
245 }
246
disableMemoryTagChecksTestOnly()247 inline void disableMemoryTagChecksTestOnly() {
248 UNREACHABLE("memory tagging not supported");
249 }
250
enableMemoryTagChecksTestOnly()251 inline void enableMemoryTagChecksTestOnly() {
252 UNREACHABLE("memory tagging not supported");
253 }
254
255 struct ScopedDisableMemoryTagChecks {
ScopedDisableMemoryTagChecksScopedDisableMemoryTagChecks256 ScopedDisableMemoryTagChecks() {}
257 };
258
selectRandomTag(uptr Ptr,uptr ExcludeMask)259 inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
260 (void)Ptr;
261 (void)ExcludeMask;
262 UNREACHABLE("memory tagging not supported");
263 }
264
addFixedTag(uptr Ptr,uptr Tag)265 inline uptr addFixedTag(uptr Ptr, uptr Tag) {
266 (void)Ptr;
267 (void)Tag;
268 UNREACHABLE("memory tagging not supported");
269 }
270
storeTags(uptr Begin,uptr End)271 inline uptr storeTags(uptr Begin, uptr End) {
272 (void)Begin;
273 (void)End;
274 UNREACHABLE("memory tagging not supported");
275 }
276
storeTag(uptr Ptr)277 inline void storeTag(uptr Ptr) {
278 (void)Ptr;
279 UNREACHABLE("memory tagging not supported");
280 }
281
loadTag(uptr Ptr)282 inline uptr loadTag(uptr Ptr) {
283 (void)Ptr;
284 UNREACHABLE("memory tagging not supported");
285 }
286
287 #endif
288
setRandomTag(void * Ptr,uptr Size,uptr ExcludeMask,uptr * TaggedBegin,uptr * TaggedEnd)289 inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
290 uptr *TaggedBegin, uptr *TaggedEnd) {
291 *TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
292 *TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
293 }
294
untagPointer(void * Ptr)295 inline void *untagPointer(void *Ptr) {
296 return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
297 }
298
loadTag(void * Ptr)299 inline void *loadTag(void *Ptr) {
300 return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
301 }
302
addFixedTag(void * Ptr,uptr Tag)303 inline void *addFixedTag(void *Ptr, uptr Tag) {
304 return reinterpret_cast<void *>(
305 addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
306 }
307
308 template <typename Config>
allocatorSupportsMemoryTagging()309 inline constexpr bool allocatorSupportsMemoryTagging() {
310 return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging;
311 }
312
313 } // namespace scudo
314
315 #endif
316