1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/partition_allocator/tagging.h"
6
7 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
8 #include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
9 #include "base/allocator/partition_allocator/partition_alloc_check.h"
10 #include "base/allocator/partition_allocator/partition_alloc_config.h"
11 #include "build/build_config.h"
12
13 #if PA_CONFIG(HAS_MEMORY_TAGGING)
14 #include <arm_acle.h>
15 #include <sys/auxv.h>
16 #include <sys/prctl.h>
17 #define PR_SET_TAGGED_ADDR_CTRL 55
18 #define PR_GET_TAGGED_ADDR_CTRL 56
19 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
20
21 #if BUILDFLAG(IS_LINUX)
22 #include <linux/version.h>
23
24 // Linux headers already provide these since v5.10.
25 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
26 #define HAS_PR_MTE_MACROS
27 #endif
28 #endif
29
30 #ifndef HAS_PR_MTE_MACROS
31 #define PR_MTE_TCF_SHIFT 1
32 #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
33 #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
34 #define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
35 #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
36 #define PR_MTE_TAG_SHIFT 3
37 #define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
38 #endif
39 #endif
40
41 #if BUILDFLAG(IS_ANDROID)
42 #include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
43 #include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
44 #endif // BUILDFLAG(IS_ANDROID)
45
46 namespace partition_alloc {
47
48 #if PA_CONFIG(HAS_MEMORY_TAGGING)
49 namespace {
ChangeMemoryTaggingModeInternal(unsigned prctl_mask)50 void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
51 if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) {
52 int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
53 PA_CHECK(status == 0);
54 }
55 }
56 } // namespace
57 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
58
ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m)59 void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m) {
60 #if PA_CONFIG(HAS_MEMORY_TAGGING)
61 if (m == TagViolationReportingMode::kSynchronous) {
62 ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
63 (0xfffe << PR_MTE_TAG_SHIFT));
64 } else if (m == TagViolationReportingMode::kAsynchronous) {
65 ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC |
66 (0xfffe << PR_MTE_TAG_SHIFT));
67 } else {
68 ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_NONE);
69 }
70 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
71 }
72
73 namespace internal {
74
75 #if BUILDFLAG(IS_ANDROID)
ChangeMemoryTaggingModeForAllThreadsPerProcess(TagViolationReportingMode m)76 void ChangeMemoryTaggingModeForAllThreadsPerProcess(
77 TagViolationReportingMode m) {
78 #if PA_CONFIG(HAS_MEMORY_TAGGING)
79 // In order to support Android NDK API level below 26, we need to call
80 // mallopt via dynamic linker.
81 // int mallopt(int param, int value);
82 using MalloptSignature = int (*)(int, int);
83
84 static MalloptSignature mallopt_fnptr = []() {
85 base::FilePath module_path;
86 base::NativeLibraryLoadError load_error;
87 base::FilePath library_path = module_path.Append("libc.so");
88 base::NativeLibrary library =
89 base::LoadNativeLibrary(library_path, &load_error);
90 PA_CHECK(library);
91 void* func_ptr =
92 base::GetFunctionPointerFromNativeLibrary(library, "mallopt");
93 PA_CHECK(func_ptr);
94 return reinterpret_cast<MalloptSignature>(func_ptr);
95 }();
96
97 int status = 0;
98 if (m == TagViolationReportingMode::kSynchronous) {
99 status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
100 M_HEAP_TAGGING_LEVEL_SYNC);
101 } else if (m == TagViolationReportingMode::kAsynchronous) {
102 status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
103 M_HEAP_TAGGING_LEVEL_ASYNC);
104 } else {
105 status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
106 M_HEAP_TAGGING_LEVEL_NONE);
107 }
108 PA_CHECK(status);
109 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
110 }
111 #endif // BUILDFLAG(IS_ANDROID)
112
113 namespace {
CheckTagRegionParameters(void * ptr,size_t sz)114 [[maybe_unused]] static bool CheckTagRegionParameters(void* ptr, size_t sz) {
115 // Check that ptr and size are correct for MTE
116 uintptr_t ptr_as_uint = reinterpret_cast<uintptr_t>(ptr);
117 bool ret = (ptr_as_uint % kMemTagGranuleSize == 0) &&
118 (sz % kMemTagGranuleSize == 0) && sz;
119 return ret;
120 }
121
122 #if PA_CONFIG(HAS_MEMORY_TAGGING)
HasCPUMemoryTaggingExtension()123 static bool HasCPUMemoryTaggingExtension() {
124 return base::CPU::GetInstanceNoAllocation().has_mte();
125 }
126 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
127
128 #if PA_CONFIG(HAS_MEMORY_TAGGING)
TagRegionRandomlyForMTE(void * ptr,size_t sz,uint64_t mask)129 void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
130 // Randomly tag a region (MTE-enabled systems only). The first 16-byte
131 // granule is randomly tagged, all other granules in the region are
132 // then assigned that initial tag via __arm_mte_set_tag.
133 if (!CheckTagRegionParameters(ptr, sz)) {
134 return nullptr;
135 }
136 // __arm_mte_create_random_tag generates a randomly tagged pointer via the
137 // hardware's random number generator, but does not apply it to the memory.
138 char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
139 for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
140 // Next, tag the first and all subsequent granules with the randomly tag.
141 __arm_mte_set_tag(nptr +
142 i); // Tag is taken from the top bits of the argument.
143 }
144 return nptr;
145 }
146
TagRegionIncrementForMTE(void * ptr,size_t sz)147 void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
148 // Increment a region's tag (MTE-enabled systems only), using the tag of the
149 // first granule.
150 if (!CheckTagRegionParameters(ptr, sz)) {
151 return nullptr;
152 }
153 // Increment ptr's tag.
154 char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
155 for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
156 // Apply the tag to the first granule, and all subsequent granules.
157 __arm_mte_set_tag(nptr + i);
158 }
159 return nptr;
160 }
161
RemaskVoidPtrForMTE(void * ptr)162 void* RemaskVoidPtrForMTE(void* ptr) {
163 if (PA_LIKELY(ptr)) {
164 // Can't look up the tag for a null ptr (segfaults).
165 return __arm_mte_get_tag(ptr);
166 }
167 return nullptr;
168 }
169 #endif
170
TagRegionIncrementNoOp(void * ptr,size_t sz)171 void* TagRegionIncrementNoOp(void* ptr, size_t sz) {
172 // Region parameters are checked even on non-MTE systems to check the
173 // intrinsics are used correctly.
174 return ptr;
175 }
176
TagRegionRandomlyNoOp(void * ptr,size_t sz,uint64_t mask)177 void* TagRegionRandomlyNoOp(void* ptr, size_t sz, uint64_t mask) {
178 // Verifies a 16-byte aligned tagging granule, size tagging granule (all
179 // architectures).
180 return ptr;
181 }
182
RemaskVoidPtrNoOp(void * ptr)183 void* RemaskVoidPtrNoOp(void* ptr) {
184 return ptr;
185 }
186
187 } // namespace
188
InitializeMTESupportIfNeeded()189 void InitializeMTESupportIfNeeded() {
190 #if PA_CONFIG(HAS_MEMORY_TAGGING)
191 if (HasCPUMemoryTaggingExtension()) {
192 global_remask_void_ptr_fn = RemaskVoidPtrForMTE;
193 global_tag_memory_range_increment_fn = TagRegionIncrementForMTE;
194 global_tag_memory_range_randomly_fn = TagRegionRandomlyForMTE;
195 }
196 #endif
197 }
198
199 RemaskPtrInternalFn* global_remask_void_ptr_fn = RemaskVoidPtrNoOp;
200 TagMemoryRangeIncrementInternalFn* global_tag_memory_range_increment_fn =
201 TagRegionIncrementNoOp;
202 TagMemoryRangeRandomlyInternalFn* global_tag_memory_range_randomly_fn =
203 TagRegionRandomlyNoOp;
204
GetMemoryTaggingModeForCurrentThread()205 TagViolationReportingMode GetMemoryTaggingModeForCurrentThread() {
206 #if PA_CONFIG(HAS_MEMORY_TAGGING)
207 base::CPU cpu;
208 if (!cpu.has_mte()) {
209 return TagViolationReportingMode::kUndefined;
210 }
211 int status = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
212 PA_CHECK(status >= 0);
213 if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_SYNC)) {
214 return TagViolationReportingMode::kSynchronous;
215 }
216 if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_ASYNC)) {
217 return TagViolationReportingMode::kAsynchronous;
218 }
219 #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
220 return TagViolationReportingMode::kUndefined;
221 }
222
223 } // namespace internal
224
225 } // namespace partition_alloc
226