• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "heap_tagging.h"
30 #include "malloc_common.h"
31 #include "malloc_tagged_pointers.h"
32 
33 #include <bionic/pthread_internal.h>
34 #include <platform/bionic/malloc.h>
35 #include <sanitizer/hwasan_interface.h>
36 #include <sys/auxv.h>
37 #include <sys/prctl.h>
38 
39 extern "C" void scudo_malloc_disable_memory_tagging();
40 extern "C" void scudo_malloc_set_track_allocation_stacks(int);
41 
42 extern "C" const char* __scudo_get_stack_depot_addr();
43 extern "C" const char* __scudo_get_ring_buffer_addr();
44 extern "C" size_t __scudo_get_ring_buffer_size();
45 extern "C" size_t __scudo_get_stack_depot_size();
46 
47 // Protected by `g_heap_tagging_lock`.
48 static HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
49 
SetDefaultHeapTaggingLevel()50 void SetDefaultHeapTaggingLevel() {
51 #if defined(__aarch64__)
52 #if !__has_feature(hwaddress_sanitizer)
53   heap_tagging_level = __libc_shared_globals()->initial_heap_tagging_level;
54 #endif
55 
56   __libc_memtag_stack_abi = __libc_shared_globals()->initial_memtag_stack_abi;
57 
58   __libc_globals.mutate([](libc_globals* globals) {
59     switch (heap_tagging_level) {
60       case M_HEAP_TAGGING_LEVEL_TBI:
61         // Arrange for us to set pointer tags to POINTER_TAG, check tags on
62         // deallocation and untag when passing pointers to the allocator.
63         globals->heap_pointer_tag = (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT) |
64                                     (0xffull << CHECK_SHIFT) | (0xffull << UNTAG_SHIFT);
65         break;
66       case M_HEAP_TAGGING_LEVEL_SYNC:
67       case M_HEAP_TAGGING_LEVEL_ASYNC:
68         atomic_store(&globals->memtag, true);
69         atomic_store(&__libc_memtag_stack, __libc_shared_globals()->initial_memtag_stack);
70         break;
71       default:
72         break;
73     };
74   });
75 
76 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
77   switch (heap_tagging_level) {
78     case M_HEAP_TAGGING_LEVEL_TBI:
79     case M_HEAP_TAGGING_LEVEL_NONE:
80       scudo_malloc_disable_memory_tagging();
81       break;
82     case M_HEAP_TAGGING_LEVEL_SYNC:
83       scudo_malloc_set_track_allocation_stacks(1);
84       break;
85     default:
86       break;
87   }
88 #endif  // USE_SCUDO
89 #endif  // aarch64
90 }
91 
set_tcf_on_all_threads(int tcf)92 static bool set_tcf_on_all_threads(int tcf) {
93   return android_run_on_all_threads(
94       [](void* arg) {
95         int tcf = *reinterpret_cast<int*>(arg);
96         int tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
97         if (tagged_addr_ctrl < 0) {
98           return false;
99         }
100 
101         tagged_addr_ctrl = (tagged_addr_ctrl & ~PR_MTE_TCF_MASK) | tcf;
102         return prctl(PR_SET_TAGGED_ADDR_CTRL, tagged_addr_ctrl, 0, 0, 0) >= 0;
103       },
104       &tcf);
105 }
106 
107 pthread_mutex_t g_heap_tagging_lock = PTHREAD_MUTEX_INITIALIZER;
108 
109 // Requires `g_heap_tagging_lock` to be held.
SetHeapTaggingLevel(HeapTaggingLevel tag_level)110 bool SetHeapTaggingLevel(HeapTaggingLevel tag_level) {
111   if (tag_level == heap_tagging_level) {
112     return true;
113   }
114 
115   switch (tag_level) {
116     case M_HEAP_TAGGING_LEVEL_NONE:
117       __libc_globals.mutate([](libc_globals* globals) {
118         if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
119           // Preserve the untag mask (we still want to untag pointers when passing them to the
120           // allocator), but clear the fixed tag and the check mask, so that pointers are no longer
121           // tagged and checks no longer happen.
122           globals->heap_pointer_tag = static_cast<uintptr_t>(0xffull << UNTAG_SHIFT);
123         }
124         atomic_store(&__libc_memtag_stack, false);
125         atomic_store(&globals->memtag, false);
126         atomic_store(&__libc_shared_globals()->memtag_currently_on, false);
127       });
128 
129       if (heap_tagging_level != M_HEAP_TAGGING_LEVEL_TBI) {
130         if (!set_tcf_on_all_threads(PR_MTE_TCF_NONE)) {
131           error_log("SetHeapTaggingLevel: set_tcf_on_all_threads failed");
132           return false;
133         }
134       }
135 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
136       scudo_malloc_disable_memory_tagging();
137 #endif
138       break;
139     case M_HEAP_TAGGING_LEVEL_TBI:
140     case M_HEAP_TAGGING_LEVEL_ASYNC:
141     case M_HEAP_TAGGING_LEVEL_SYNC:
142       if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
143 #if !__has_feature(hwaddress_sanitizer)
144         // Suppress the error message in HWASan builds. Apps can try to enable TBI (or even MTE
145         // modes) being unaware of HWASan, fail them silently.
146         error_log(
147             "SetHeapTaggingLevel: re-enabling tagging after it was disabled is not supported");
148 #endif
149         return false;
150       } else if (tag_level == M_HEAP_TAGGING_LEVEL_TBI ||
151                  heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
152         error_log("SetHeapTaggingLevel: switching between TBI and ASYNC/SYNC is not supported");
153         return false;
154       }
155 
156       if (tag_level == M_HEAP_TAGGING_LEVEL_ASYNC) {
157         // When entering ASYNC mode, specify that we want to allow upgrading to SYNC by OR'ing in
158         // the SYNC flag. But if the kernel doesn't support specifying multiple TCF modes, fall back
159         // to specifying a single mode.
160         if (!set_tcf_on_all_threads(PR_MTE_TCF_ASYNC | PR_MTE_TCF_SYNC)) {
161           set_tcf_on_all_threads(PR_MTE_TCF_ASYNC);
162         }
163 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
164         scudo_malloc_set_track_allocation_stacks(0);
165 #endif
166       } else if (tag_level == M_HEAP_TAGGING_LEVEL_SYNC) {
167         set_tcf_on_all_threads(PR_MTE_TCF_SYNC);
168 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
169         scudo_malloc_set_track_allocation_stacks(1);
170         __libc_shared_globals()->scudo_ring_buffer = __scudo_get_ring_buffer_addr();
171         __libc_shared_globals()->scudo_ring_buffer_size = __scudo_get_ring_buffer_size();
172         __libc_shared_globals()->scudo_stack_depot = __scudo_get_stack_depot_addr();
173         __libc_shared_globals()->scudo_stack_depot_size = __scudo_get_stack_depot_size();
174 #endif
175       }
176       break;
177     default:
178       error_log("SetHeapTaggingLevel: unknown tagging level");
179       return false;
180   }
181 
182   heap_tagging_level = tag_level;
183   info_log("SetHeapTaggingLevel: tag level set to %d", tag_level);
184 
185   return true;
186 }
187 
188 #ifdef __aarch64__
untag_memory(void * from,void * to)189 static inline __attribute__((no_sanitize("memtag"))) void untag_memory(void* from, void* to) {
190   if (from == to) {
191     return;
192   }
193   __asm__ __volatile__(
194       ".arch_extension mte\n"
195       "1:\n"
196       "stg %[Ptr], [%[Ptr]], #16\n"
197       "cmp %[Ptr], %[End]\n"
198       "b.lt 1b\n"
199       : [Ptr] "+&r"(from)
200       : [End] "r"(to)
201       : "memory");
202 }
203 #endif
204 
205 #ifdef __aarch64__
206 // 128Mb of stack should be enough for anybody.
207 static constexpr size_t kUntagLimit = 128 * 1024 * 1024;
208 #endif  // __aarch64__
209 
memtag_handle_longjmp(void * sp_dst __unused,void * sp_src __unused)210 extern "C" __LIBC_HIDDEN__ __attribute__((no_sanitize("memtag"))) void memtag_handle_longjmp(
211     void* sp_dst __unused, void* sp_src __unused) {
212   // A usual longjmp looks like this, where sp_dst was the LR in the call to setlongjmp (i.e.
213   // the SP of the frame calling setlongjmp).
214   // ┌─────────────────────┐                  │
215   // │                     │                  │
216   // ├─────────────────────┤◄──────── sp_dst  │ stack
217   // │         ...         │                  │ grows
218   // ├─────────────────────┤                  │ to lower
219   // │         ...         │                  │ addresses
220   // ├─────────────────────┤◄──────── sp_src  │
221   // │siglongjmp           │                  │
222   // ├─────────────────────┤                  │
223   // │memtag_handle_longjmp│                  │
224   // └─────────────────────┘                  ▼
225 #ifdef __aarch64__
226   if (atomic_load(&__libc_memtag_stack)) {
227     size_t distance = reinterpret_cast<uintptr_t>(sp_dst) - reinterpret_cast<uintptr_t>(sp_src);
228     if (distance > kUntagLimit) {
229       async_safe_fatal(
230           "memtag_handle_longjmp: stack adjustment too large! %p -> %p, distance %zx > %zx\n",
231           sp_src, sp_dst, distance, kUntagLimit);
232     } else {
233       untag_memory(sp_src, sp_dst);
234     }
235   }
236 #endif  // __aarch64__
237 
238   // We can use __has_feature here rather than __hwasan_handle_longjmp as a
239   // weak symbol because this is part of libc which is always sanitized for a
240   // hwasan enabled process.
241 #if __has_feature(hwaddress_sanitizer)
242   __hwasan_handle_longjmp(sp_dst);
243 #endif  // __has_feature(hwaddress_sanitizer)
244 }
245