1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/tagging.h"
6
7 #include "build/build_config.h"
8 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
9 #include "partition_alloc/partition_alloc_base/cpu.h"
10 #include "partition_alloc/partition_alloc_check.h"
11 #include "partition_alloc/partition_alloc_config.h"
12
13 #if PA_CONFIG(HAS_MEMORY_TAGGING)
14 #include <arm_acle.h>
15 #include <asm/hwcap.h>
16 #include <sys/auxv.h>
17 #include <sys/ifunc.h>
18 #include <sys/prctl.h>
19 #define PR_SET_TAGGED_ADDR_CTRL 55
20 #define PR_GET_TAGGED_ADDR_CTRL 56
21 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
22
23 #if BUILDFLAG(IS_LINUX)
24 #include <linux/version.h>
25
26 // Linux headers already provide these since v5.10.
27 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
28 #define HAS_PR_MTE_MACROS
29 #endif
30 #endif
31
32 #ifndef HAS_PR_MTE_MACROS
33 #define PR_MTE_TCF_SHIFT 1
34 #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
35 #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
36 #define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
37 #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
38 #define PR_MTE_TAG_SHIFT 3
39 #define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
40 #define HWCAP2_MTE (1 << 18)
41 #endif
42 #endif
43
44 #if BUILDFLAG(IS_ANDROID)
45 #include "partition_alloc/partition_alloc_base/files/file_path.h"
46 #include "partition_alloc/partition_alloc_base/native_library.h"
47 #endif // BUILDFLAG(IS_ANDROID)
48
49 namespace partition_alloc {
50
51 #if PA_CONFIG(HAS_MEMORY_TAGGING)
52 namespace {
ChangeMemoryTaggingModeInternal(unsigned prctl_mask)53 void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
54 if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) {
55 int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
56 PA_CHECK(status == 0);
57 }
58 }
59 } // namespace
60 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
61
ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m)62 void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m) {
63 #if PA_CONFIG(HAS_MEMORY_TAGGING)
64 if (m == TagViolationReportingMode::kSynchronous) {
65 ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
66 (0xfffe << PR_MTE_TAG_SHIFT));
67 } else if (m == TagViolationReportingMode::kAsynchronous) {
68 ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC |
69 (0xfffe << PR_MTE_TAG_SHIFT));
70 } else {
71 ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_NONE);
72 }
73 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
74 }
75
76 namespace internal {
77
78 #if BUILDFLAG(IS_ANDROID)
ChangeMemoryTaggingModeForAllThreadsPerProcess(TagViolationReportingMode m)79 void ChangeMemoryTaggingModeForAllThreadsPerProcess(
80 TagViolationReportingMode m) {
81 #if PA_CONFIG(HAS_MEMORY_TAGGING)
82 // In order to support Android NDK API level below 26, we need to call
83 // mallopt via dynamic linker.
84 // int mallopt(int param, int value);
85 using MalloptSignature = int (*)(int, int);
86
87 static MalloptSignature mallopt_fnptr = []() {
88 base::FilePath module_path;
89 base::NativeLibraryLoadError load_error;
90 base::FilePath library_path = module_path.Append("libc.so");
91 base::NativeLibrary library =
92 base::LoadNativeLibrary(library_path, &load_error);
93 PA_CHECK(library);
94 void* func_ptr =
95 base::GetFunctionPointerFromNativeLibrary(library, "mallopt");
96 PA_CHECK(func_ptr);
97 return reinterpret_cast<MalloptSignature>(func_ptr);
98 }();
99
100 int status = 0;
101 if (m == TagViolationReportingMode::kSynchronous) {
102 status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
103 M_HEAP_TAGGING_LEVEL_SYNC);
104 } else if (m == TagViolationReportingMode::kAsynchronous) {
105 status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
106 M_HEAP_TAGGING_LEVEL_ASYNC);
107 } else {
108 status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
109 M_HEAP_TAGGING_LEVEL_NONE);
110 }
111 PA_CHECK(status);
112 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
113 }
114 #endif // BUILDFLAG(IS_ANDROID)
115
116 namespace {
CheckTagRegionParameters(void * ptr,size_t sz)117 [[maybe_unused]] static bool CheckTagRegionParameters(void* ptr, size_t sz) {
118 // Check that ptr and size are correct for MTE
119 uintptr_t ptr_as_uint = reinterpret_cast<uintptr_t>(ptr);
120 bool ret = (ptr_as_uint % kMemTagGranuleSize == 0) &&
121 (sz % kMemTagGranuleSize == 0) && sz;
122 return ret;
123 }
124
125 #if PA_CONFIG(HAS_MEMORY_TAGGING)
TagRegionRandomlyForMTE(void * ptr,size_t sz,uint64_t mask)126 void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
127 // Randomly tag a region (MTE-enabled systems only). The first 16-byte
128 // granule is randomly tagged, all other granules in the region are
129 // then assigned that initial tag via __arm_mte_set_tag.
130 if (!CheckTagRegionParameters(ptr, sz)) {
131 return nullptr;
132 }
133 // __arm_mte_create_random_tag generates a randomly tagged pointer via the
134 // hardware's random number generator, but does not apply it to the memory.
135 char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
136 for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
137 // Next, tag the first and all subsequent granules with the randomly tag.
138 __arm_mte_set_tag(nptr +
139 i); // Tag is taken from the top bits of the argument.
140 }
141 return nptr;
142 }
143
TagRegionIncrementForMTE(void * ptr,size_t sz)144 void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
145 // Increment a region's tag (MTE-enabled systems only), using the tag of the
146 // first granule.
147 if (!CheckTagRegionParameters(ptr, sz)) {
148 return nullptr;
149 }
150 // Increment ptr's tag.
151 char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
152 for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
153 // Apply the tag to the first granule, and all subsequent granules.
154 __arm_mte_set_tag(nptr + i);
155 }
156 return nptr;
157 }
158
RemaskVoidPtrForMTE(void * ptr)159 void* RemaskVoidPtrForMTE(void* ptr) {
160 if (PA_LIKELY(ptr)) {
161 // Can't look up the tag for a null ptr (segfaults).
162 return __arm_mte_get_tag(ptr);
163 }
164 return nullptr;
165 }
166
TagRegionIncrementNoOp(void * ptr,size_t sz)167 void* TagRegionIncrementNoOp(void* ptr, size_t sz) {
168 // Region parameters are checked even on non-MTE systems to check the
169 // intrinsics are used correctly.
170 return ptr;
171 }
172
TagRegionRandomlyNoOp(void * ptr,size_t sz,uint64_t mask)173 void* TagRegionRandomlyNoOp(void* ptr, size_t sz, uint64_t mask) {
174 // Verifies a 16-byte aligned tagging granule, size tagging granule (all
175 // architectures).
176 return ptr;
177 }
178
RemaskVoidPtrNoOp(void * ptr)179 void* RemaskVoidPtrNoOp(void* ptr) {
180 return ptr;
181 }
182 #endif
183
184 } // namespace
185
186 #if PA_CONFIG(HAS_MEMORY_TAGGING)
187 using RemaskPtrInternalFn = void*(void* ptr);
188 using TagMemoryRangeIncrementInternalFn = void*(void* ptr, size_t size);
189
190 using TagMemoryRangeRandomlyInternalFn = void*(void* ptr,
191 size_t size,
192 uint64_t mask);
193
194 extern "C" TagMemoryRangeIncrementInternalFn(
ResolveTagMemoryRangeIncrement(uint64_t hwcap,struct __ifunc_arg_t * hw)195 *ResolveTagMemoryRangeIncrement(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
196 if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
197 return TagRegionIncrementForMTE;
198 }
199 return TagRegionIncrementNoOp;
200 }
201
202 extern "C" TagMemoryRangeRandomlyInternalFn(
ResolveTagMemoryRandomly(uint64_t hwcap,struct __ifunc_arg_t * hw)203 *ResolveTagMemoryRandomly(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
204 if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
205 return TagRegionRandomlyForMTE;
206 }
207 return TagRegionRandomlyNoOp;
208 }
209
210 extern "C" RemaskPtrInternalFn(
ResolveRemaskPointer(uint64_t hwcap,struct __ifunc_arg_t * hw)211 *ResolveRemaskPointer(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
212 if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
213 return RemaskVoidPtrForMTE;
214 }
215 return RemaskVoidPtrNoOp;
216 }
217
218 void* TagMemoryRangeIncrementInternal(void* ptr, size_t size)
219 __attribute__((ifunc("ResolveTagMemoryRangeIncrement")));
220 void* TagMemoryRangeRandomlyInternal(void* ptr, size_t size, uint64_t mask)
221 __attribute__((ifunc("ResolveTagMemoryRandomly")));
222 void* RemaskPointerInternal(void* ptr)
223 __attribute__((ifunc("ResolveRemaskPointer")));
224 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
225
GetMemoryTaggingModeForCurrentThread()226 TagViolationReportingMode GetMemoryTaggingModeForCurrentThread() {
227 #if PA_CONFIG(HAS_MEMORY_TAGGING)
228 base::CPU cpu;
229 if (!cpu.has_mte()) {
230 return TagViolationReportingMode::kUndefined;
231 }
232 int status = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
233 PA_CHECK(status >= 0);
234 // Check for Asynchronous first because ASYNC on Android sets both
235 // PR_MTE_TCF_ASYNC and PR_MTE_TCF_SYNC bits.
236 if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_ASYNC)) {
237 return TagViolationReportingMode::kAsynchronous;
238 }
239 if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_SYNC)) {
240 return TagViolationReportingMode::kSynchronous;
241 }
242 return TagViolationReportingMode::kDisabled;
243 #else
244 return TagViolationReportingMode::kUndefined;
245 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
246 }
247
248 } // namespace internal
249
250 #if PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
251 bool PermissiveMte::enabled_ = false;
252
253 // static
SetEnabled(bool enabled)254 void PermissiveMte::SetEnabled(bool enabled) {
255 PermissiveMte::enabled_ = enabled;
256 }
257
258 // static
HandleCrash(int signo,siginfo_t * siginfo,ucontext_t * context)259 bool PermissiveMte::HandleCrash(int signo,
260 siginfo_t* siginfo,
261 ucontext_t* context) {
262 if (siginfo->si_signo == SIGSEGV &&
263 (siginfo->si_code == SEGV_MTESERR || siginfo->si_code == SEGV_MTEAERR) &&
264 PermissiveMte::enabled_) {
265 // In MTE permissive mode, do not crash the process. Instead, disable MTE
266 // and let the failing instruction be retried. The second time should
267 // succeed (except if there is another non-MTE fault).
268 internal::ChangeMemoryTaggingModeForAllThreadsPerProcess(
269 partition_alloc::TagViolationReportingMode::kDisabled);
270 return true;
271 }
272 return false;
273 }
274 #endif // PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
275
276 } // namespace partition_alloc
277