1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "heap_tagging.h"
30 #include "malloc_common.h"
31 #include "malloc_tagged_pointers.h"
32
33 #include <bionic/pthread_internal.h>
34 #include <platform/bionic/malloc.h>
35 #include <sanitizer/hwasan_interface.h>
36 #include <sys/auxv.h>
37
38 extern "C" void scudo_malloc_disable_memory_tagging();
39 extern "C" void scudo_malloc_set_track_allocation_stacks(int);
40
41 // Protected by `g_heap_tagging_lock`.
42 static HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
43
SetDefaultHeapTaggingLevel()44 void SetDefaultHeapTaggingLevel() {
45 #if defined(__aarch64__)
46 #if !__has_feature(hwaddress_sanitizer)
47 heap_tagging_level = __libc_shared_globals()->initial_heap_tagging_level;
48 #endif
49
50 __libc_globals.mutate([](libc_globals* globals) {
51 switch (heap_tagging_level) {
52 case M_HEAP_TAGGING_LEVEL_TBI:
53 // Arrange for us to set pointer tags to POINTER_TAG, check tags on
54 // deallocation and untag when passing pointers to the allocator.
55 globals->heap_pointer_tag = (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT) |
56 (0xffull << CHECK_SHIFT) | (0xffull << UNTAG_SHIFT);
57 break;
58 case M_HEAP_TAGGING_LEVEL_SYNC:
59 case M_HEAP_TAGGING_LEVEL_ASYNC:
60 atomic_store(&globals->memtag_stack, __libc_shared_globals()->initial_memtag_stack);
61 break;
62 default:
63 break;
64 };
65 });
66
67 #if defined(USE_SCUDO)
68 switch (heap_tagging_level) {
69 case M_HEAP_TAGGING_LEVEL_TBI:
70 case M_HEAP_TAGGING_LEVEL_NONE:
71 scudo_malloc_disable_memory_tagging();
72 break;
73 case M_HEAP_TAGGING_LEVEL_SYNC:
74 scudo_malloc_set_track_allocation_stacks(1);
75 break;
76 default:
77 break;
78 }
79 #endif // USE_SCUDO
80 #endif // aarch64
81 }
82
set_tcf_on_all_threads(int tcf)83 static bool set_tcf_on_all_threads(int tcf) {
84 return android_run_on_all_threads(
85 [](void* arg) {
86 int tcf = *reinterpret_cast<int*>(arg);
87 int tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
88 if (tagged_addr_ctrl < 0) {
89 return false;
90 }
91
92 tagged_addr_ctrl = (tagged_addr_ctrl & ~PR_MTE_TCF_MASK) | tcf;
93 if (prctl(PR_SET_TAGGED_ADDR_CTRL, tagged_addr_ctrl, 0, 0, 0) < 0) {
94 return false;
95 }
96 return true;
97 },
98 &tcf);
99 }
100
101 pthread_mutex_t g_heap_tagging_lock = PTHREAD_MUTEX_INITIALIZER;
102
103 // Requires `g_heap_tagging_lock` to be held.
SetHeapTaggingLevel(HeapTaggingLevel tag_level)104 bool SetHeapTaggingLevel(HeapTaggingLevel tag_level) {
105 if (tag_level == heap_tagging_level) {
106 return true;
107 }
108
109 switch (tag_level) {
110 case M_HEAP_TAGGING_LEVEL_NONE:
111 __libc_globals.mutate([](libc_globals* globals) {
112 if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
113 // Preserve the untag mask (we still want to untag pointers when passing them to the
114 // allocator), but clear the fixed tag and the check mask, so that pointers are no longer
115 // tagged and checks no longer happen.
116 globals->heap_pointer_tag = static_cast<uintptr_t>(0xffull << UNTAG_SHIFT);
117 }
118 atomic_store(&globals->memtag_stack, false);
119 });
120
121 if (heap_tagging_level != M_HEAP_TAGGING_LEVEL_TBI) {
122 if (!set_tcf_on_all_threads(PR_MTE_TCF_NONE)) {
123 error_log("SetHeapTaggingLevel: set_tcf_on_all_threads failed");
124 return false;
125 }
126 }
127 #if defined(USE_SCUDO)
128 scudo_malloc_disable_memory_tagging();
129 #endif
130 break;
131 case M_HEAP_TAGGING_LEVEL_TBI:
132 case M_HEAP_TAGGING_LEVEL_ASYNC:
133 case M_HEAP_TAGGING_LEVEL_SYNC:
134 if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
135 #if !__has_feature(hwaddress_sanitizer)
136 // Suppress the error message in HWASan builds. Apps can try to enable TBI (or even MTE
137 // modes) being unaware of HWASan, fail them silently.
138 error_log(
139 "SetHeapTaggingLevel: re-enabling tagging after it was disabled is not supported");
140 #endif
141 return false;
142 } else if (tag_level == M_HEAP_TAGGING_LEVEL_TBI ||
143 heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
144 error_log("SetHeapTaggingLevel: switching between TBI and ASYNC/SYNC is not supported");
145 return false;
146 }
147
148 if (tag_level == M_HEAP_TAGGING_LEVEL_ASYNC) {
149 // When entering ASYNC mode, specify that we want to allow upgrading to SYNC by OR'ing in
150 // the SYNC flag. But if the kernel doesn't support specifying multiple TCF modes, fall back
151 // to specifying a single mode.
152 if (!set_tcf_on_all_threads(PR_MTE_TCF_ASYNC | PR_MTE_TCF_SYNC)) {
153 set_tcf_on_all_threads(PR_MTE_TCF_ASYNC);
154 }
155 #if defined(USE_SCUDO)
156 scudo_malloc_set_track_allocation_stacks(0);
157 #endif
158 } else if (tag_level == M_HEAP_TAGGING_LEVEL_SYNC) {
159 set_tcf_on_all_threads(PR_MTE_TCF_SYNC);
160 #if defined(USE_SCUDO)
161 scudo_malloc_set_track_allocation_stacks(1);
162 #endif
163 }
164 break;
165 default:
166 error_log("SetHeapTaggingLevel: unknown tagging level");
167 return false;
168 }
169
170 heap_tagging_level = tag_level;
171 info_log("SetHeapTaggingLevel: tag level set to %d", tag_level);
172
173 return true;
174 }
175
176 #ifdef __aarch64__
untag_memory(void * from,void * to)177 static inline __attribute__((no_sanitize("memtag"))) void untag_memory(void* from, void* to) {
178 __asm__ __volatile__(
179 ".arch_extension mte\n"
180 "1:\n"
181 "stg %[Ptr], [%[Ptr]], #16\n"
182 "cmp %[Ptr], %[End]\n"
183 "b.lt 1b\n"
184 : [Ptr] "+&r"(from)
185 : [End] "r"(to)
186 : "memory");
187 }
188 #endif
189
190 #ifdef __aarch64__
191 // 128Mb of stack should be enough for anybody.
192 static constexpr size_t kUntagLimit = 128 * 1024 * 1024;
193 #endif // __aarch64__
194
memtag_handle_longjmp(void * sp_dst __unused)195 extern "C" __LIBC_HIDDEN__ __attribute__((no_sanitize("memtag"))) void memtag_handle_longjmp(
196 void* sp_dst __unused) {
197 #ifdef __aarch64__
198 if (__libc_globals->memtag_stack) {
199 void* sp = __builtin_frame_address(0);
200 size_t distance = reinterpret_cast<uintptr_t>(sp_dst) - reinterpret_cast<uintptr_t>(sp);
201 if (distance > kUntagLimit) {
202 async_safe_fatal(
203 "memtag_handle_longjmp: stack adjustment too large! %p -> %p, distance %zx > %zx\n", sp,
204 sp_dst, distance, kUntagLimit);
205 } else {
206 untag_memory(sp, sp_dst);
207 }
208 }
209 #endif // __aarch64__
210
211 #if __has_feature(hwaddress_sanitizer)
212 __hwasan_handle_longjmp(sp_dst);
213 #endif // __has_feature(hwaddress_sanitizer)
214 }
215
216 extern "C" __LIBC_HIDDEN__ __attribute__((no_sanitize("memtag"), no_sanitize("hwaddress"))) void
memtag_handle_vfork(void * sp __unused)217 memtag_handle_vfork(void* sp __unused) {
218 #ifdef __aarch64__
219 if (__libc_globals->memtag_stack) {
220 void* child_sp = __get_thread()->vfork_child_stack_bottom;
221 __get_thread()->vfork_child_stack_bottom = nullptr;
222 if (child_sp) {
223 size_t distance = reinterpret_cast<uintptr_t>(sp) - reinterpret_cast<uintptr_t>(child_sp);
224 if (distance > kUntagLimit) {
225 async_safe_fatal(
226 "memtag_handle_vfork: stack adjustment too large! %p -> %p, distance %zx > %zx\n",
227 child_sp, sp, distance, kUntagLimit);
228 } else {
229 untag_memory(child_sp, sp);
230 }
231 } else {
232 async_safe_fatal("memtag_handle_vfork: child SP unknown\n");
233 }
234 }
235 #endif // __aarch64__
236
237 #if __has_feature(hwaddress_sanitizer)
238 __hwasan_handle_vfork(sp);
239 #endif // __has_feature(hwaddress_sanitizer)
240 }
241