1 //===-- memtag.h ------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_MEMTAG_H_
10 #define SCUDO_MEMTAG_H_
11
12 #include "internal_defs.h"
13
14 #if SCUDO_LINUX
15 #include <sys/auxv.h>
16 #include <sys/prctl.h>
17 #endif
18
19 namespace scudo {
20
21 #if (__clang_major__ >= 12 && defined(__aarch64__)) || defined(SCUDO_FUZZ)
22
23 // We assume that Top-Byte Ignore is enabled if the architecture supports memory
24 // tagging. Not all operating systems enable TBI, so we only claim architectural
25 // support for memory tagging if the operating system enables TBI.
26 // HWASan uses the top byte for its own purpose and Scudo should not touch it.
27 #if SCUDO_LINUX && !defined(SCUDO_DISABLE_TBI) && \
28 !__has_feature(hwaddress_sanitizer)
archSupportsMemoryTagging()29 inline constexpr bool archSupportsMemoryTagging() { return true; }
30 #else
31 inline constexpr bool archSupportsMemoryTagging() { return false; }
32 #endif
33
archMemoryTagGranuleSize()34 inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
35
untagPointer(uptr Ptr)36 inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
37
extractTag(uptr Ptr)38 inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
39
40 #else
41
42 inline constexpr bool archSupportsMemoryTagging() { return false; }
43
44 inline NORETURN uptr archMemoryTagGranuleSize() {
45 UNREACHABLE("memory tagging not supported");
46 }
47
48 inline NORETURN uptr untagPointer(uptr Ptr) {
49 (void)Ptr;
50 UNREACHABLE("memory tagging not supported");
51 }
52
53 inline NORETURN uint8_t extractTag(uptr Ptr) {
54 (void)Ptr;
55 UNREACHABLE("memory tagging not supported");
56 }
57
58 #endif
59
60 #if __clang_major__ >= 12 && defined(__aarch64__)
61
62 #if SCUDO_LINUX
63
systemSupportsMemoryTagging()64 inline bool systemSupportsMemoryTagging() {
65 #ifndef HWCAP2_MTE
66 #define HWCAP2_MTE (1 << 18)
67 #endif
68 return getauxval(AT_HWCAP2) & HWCAP2_MTE;
69 }
70
systemDetectsMemoryTagFaultsTestOnly()71 inline bool systemDetectsMemoryTagFaultsTestOnly() {
72 #ifndef PR_SET_TAGGED_ADDR_CTRL
73 #define PR_SET_TAGGED_ADDR_CTRL 54
74 #endif
75 #ifndef PR_GET_TAGGED_ADDR_CTRL
76 #define PR_GET_TAGGED_ADDR_CTRL 56
77 #endif
78 #ifndef PR_TAGGED_ADDR_ENABLE
79 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
80 #endif
81 #ifndef PR_MTE_TCF_SHIFT
82 #define PR_MTE_TCF_SHIFT 1
83 #endif
84 #ifndef PR_MTE_TAG_SHIFT
85 #define PR_MTE_TAG_SHIFT 3
86 #endif
87 #ifndef PR_MTE_TCF_NONE
88 #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
89 #endif
90 #ifndef PR_MTE_TCF_SYNC
91 #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
92 #endif
93 #ifndef PR_MTE_TCF_MASK
94 #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
95 #endif
96 int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
97 if (res == -1)
98 return false;
99 return (static_cast<unsigned long>(res) & PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
100 }
101
enableSystemMemoryTaggingTestOnly()102 inline void enableSystemMemoryTaggingTestOnly() {
103 prctl(PR_SET_TAGGED_ADDR_CTRL,
104 PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
105 0, 0, 0);
106 }
107
108 #else // !SCUDO_LINUX
109
systemSupportsMemoryTagging()110 inline bool systemSupportsMemoryTagging() { return false; }
111
systemDetectsMemoryTagFaultsTestOnly()112 inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
113 UNREACHABLE("memory tagging not supported");
114 }
115
enableSystemMemoryTaggingTestOnly()116 inline NORETURN void enableSystemMemoryTaggingTestOnly() {
117 UNREACHABLE("memory tagging not supported");
118 }
119
120 #endif // SCUDO_LINUX
121
122 class ScopedDisableMemoryTagChecks {
123 uptr PrevTCO;
124
125 public:
ScopedDisableMemoryTagChecks()126 ScopedDisableMemoryTagChecks() {
127 __asm__ __volatile__(
128 R"(
129 .arch_extension memtag
130 mrs %0, tco
131 msr tco, #1
132 )"
133 : "=r"(PrevTCO));
134 }
135
~ScopedDisableMemoryTagChecks()136 ~ScopedDisableMemoryTagChecks() {
137 __asm__ __volatile__(
138 R"(
139 .arch_extension memtag
140 msr tco, %0
141 )"
142 :
143 : "r"(PrevTCO));
144 }
145 };
146
selectRandomTag(uptr Ptr,uptr ExcludeMask)147 inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
148 ExcludeMask |= 1; // Always exclude Tag 0.
149 uptr TaggedPtr;
150 __asm__ __volatile__(
151 R"(
152 .arch_extension memtag
153 irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
154 )"
155 : [TaggedPtr] "=r"(TaggedPtr)
156 : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
157 return TaggedPtr;
158 }
159
addFixedTag(uptr Ptr,uptr Tag)160 inline uptr addFixedTag(uptr Ptr, uptr Tag) {
161 DCHECK_LT(Tag, 16);
162 DCHECK_EQ(untagPointer(Ptr), Ptr);
163 return Ptr | (Tag << 56);
164 }
165
storeTags(uptr Begin,uptr End)166 inline uptr storeTags(uptr Begin, uptr End) {
167 DCHECK_EQ(0, Begin % 16);
168 uptr LineSize, Next, Tmp;
169 __asm__ __volatile__(
170 R"(
171 .arch_extension memtag
172
173 // Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
174 // of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
175 // indicates that the DC instructions are unavailable.
176 DCZID .req %[Tmp]
177 mrs DCZID, dczid_el0
178 tbnz DCZID, #4, 3f
179 and DCZID, DCZID, #15
180 mov %[LineSize], #4
181 lsl %[LineSize], %[LineSize], DCZID
182 .unreq DCZID
183
184 // Our main loop doesn't handle the case where we don't need to perform any
185 // DC GZVA operations. If the size of our tagged region is less than
186 // twice the cache line size, bail out to the slow path since it's not
187 // guaranteed that we'll be able to do a DC GZVA.
188 Size .req %[Tmp]
189 sub Size, %[End], %[Cur]
190 cmp Size, %[LineSize], lsl #1
191 b.lt 3f
192 .unreq Size
193
194 LineMask .req %[Tmp]
195 sub LineMask, %[LineSize], #1
196
197 // STZG until the start of the next cache line.
198 orr %[Next], %[Cur], LineMask
199 1:
200 stzg %[Cur], [%[Cur]], #16
201 cmp %[Cur], %[Next]
202 b.lt 1b
203
204 // DC GZVA cache lines until we have no more full cache lines.
205 bic %[Next], %[End], LineMask
206 .unreq LineMask
207 2:
208 dc gzva, %[Cur]
209 add %[Cur], %[Cur], %[LineSize]
210 cmp %[Cur], %[Next]
211 b.lt 2b
212
213 // STZG until the end of the tagged region. This loop is also used to handle
214 // slow path cases.
215 3:
216 cmp %[Cur], %[End]
217 b.ge 4f
218 stzg %[Cur], [%[Cur]], #16
219 b 3b
220
221 4:
222 )"
223 : [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next),
224 [Tmp] "=&r"(Tmp)
225 : [End] "r"(End)
226 : "memory");
227 DCHECK_EQ(0, Begin % 16);
228 return Begin;
229 }
230
storeTag(uptr Ptr)231 inline void storeTag(uptr Ptr) {
232 DCHECK_EQ(0, Ptr % 16);
233 __asm__ __volatile__(R"(
234 .arch_extension memtag
235 stg %0, [%0]
236 )"
237 :
238 : "r"(Ptr)
239 : "memory");
240 }
241
loadTag(uptr Ptr)242 inline uptr loadTag(uptr Ptr) {
243 DCHECK_EQ(0, Ptr % 16);
244 uptr TaggedPtr = Ptr;
245 __asm__ __volatile__(
246 R"(
247 .arch_extension memtag
248 ldg %0, [%0]
249 )"
250 : "+r"(TaggedPtr)
251 :
252 : "memory");
253 return TaggedPtr;
254 }
255
256 #else
257
systemSupportsMemoryTagging()258 inline NORETURN bool systemSupportsMemoryTagging() {
259 UNREACHABLE("memory tagging not supported");
260 }
261
systemDetectsMemoryTagFaultsTestOnly()262 inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
263 UNREACHABLE("memory tagging not supported");
264 }
265
enableSystemMemoryTaggingTestOnly()266 inline NORETURN void enableSystemMemoryTaggingTestOnly() {
267 UNREACHABLE("memory tagging not supported");
268 }
269
270 struct ScopedDisableMemoryTagChecks {
ScopedDisableMemoryTagChecksScopedDisableMemoryTagChecks271 ScopedDisableMemoryTagChecks() {}
272 };
273
selectRandomTag(uptr Ptr,uptr ExcludeMask)274 inline NORETURN uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
275 (void)Ptr;
276 (void)ExcludeMask;
277 UNREACHABLE("memory tagging not supported");
278 }
279
addFixedTag(uptr Ptr,uptr Tag)280 inline NORETURN uptr addFixedTag(uptr Ptr, uptr Tag) {
281 (void)Ptr;
282 (void)Tag;
283 UNREACHABLE("memory tagging not supported");
284 }
285
storeTags(uptr Begin,uptr End)286 inline NORETURN uptr storeTags(uptr Begin, uptr End) {
287 (void)Begin;
288 (void)End;
289 UNREACHABLE("memory tagging not supported");
290 }
291
storeTag(uptr Ptr)292 inline NORETURN void storeTag(uptr Ptr) {
293 (void)Ptr;
294 UNREACHABLE("memory tagging not supported");
295 }
296
loadTag(uptr Ptr)297 inline NORETURN uptr loadTag(uptr Ptr) {
298 (void)Ptr;
299 UNREACHABLE("memory tagging not supported");
300 }
301
302 #endif
303
304 #pragma GCC diagnostic push
305 #pragma GCC diagnostic ignored "-Wmissing-noreturn"
setRandomTag(void * Ptr,uptr Size,uptr ExcludeMask,uptr * TaggedBegin,uptr * TaggedEnd)306 inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
307 uptr *TaggedBegin, uptr *TaggedEnd) {
308 *TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
309 *TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
310 }
311 #pragma GCC diagnostic pop
312
untagPointer(void * Ptr)313 inline void *untagPointer(void *Ptr) {
314 return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
315 }
316
loadTag(void * Ptr)317 inline void *loadTag(void *Ptr) {
318 return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
319 }
320
addFixedTag(void * Ptr,uptr Tag)321 inline void *addFixedTag(void *Ptr, uptr Tag) {
322 return reinterpret_cast<void *>(
323 addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
324 }
325
326 template <typename Config>
allocatorSupportsMemoryTagging()327 inline constexpr bool allocatorSupportsMemoryTagging() {
328 return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging &&
329 (1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize();
330 }
331
332 } // namespace scudo
333
334 #endif
335