1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21
22 #include <memory>
23 #include <mutex>
24 #include <string>
25
26 #include <android-base/strings.h>
27
28 #include <unwindstack/Elf.h>
29 #include <unwindstack/MapInfo.h>
30 #include <unwindstack/Maps.h>
31 #include <unwindstack/Memory.h>
32
33 #include "MemoryFileAtOffset.h"
34 #include "MemoryRange.h"
35
36 namespace unwindstack {
37
ElfFileNotReadable()38 bool MapInfo::ElfFileNotReadable() {
39 const std::string& map_name = name();
40 return memory_backed_elf() && !map_name.empty() && map_name[0] != '[' &&
41 !android::base::StartsWith(map_name, "/memfd:");
42 }
43
GetPrevRealMap()44 std::shared_ptr<MapInfo> MapInfo::GetPrevRealMap() {
45 if (name().empty()) {
46 return nullptr;
47 }
48
49 for (auto prev = prev_map(); prev != nullptr; prev = prev->prev_map()) {
50 if (!prev->IsBlank()) {
51 if (prev->name() == name()) {
52 return prev;
53 }
54 return nullptr;
55 }
56 }
57 return nullptr;
58 }
59
GetNextRealMap()60 std::shared_ptr<MapInfo> MapInfo::GetNextRealMap() {
61 if (name().empty()) {
62 return nullptr;
63 }
64
65 for (auto next = next_map(); next != nullptr; next = next->next_map()) {
66 if (!next->IsBlank()) {
67 if (next->name() == name()) {
68 return next;
69 }
70 return nullptr;
71 }
72 }
73 return nullptr;
74 }
75
InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset * memory)76 bool MapInfo::InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset* memory) {
77 // One last attempt, see if the previous map is read-only with the
78 // same name and stretches across this map.
79 auto prev_real_map = GetPrevRealMap();
80 if (prev_real_map == nullptr || prev_real_map->flags() != PROT_READ ||
81 prev_real_map->offset() >= offset()) {
82 return false;
83 }
84
85 uint64_t map_size = end() - prev_real_map->end();
86 if (!memory->Init(name(), prev_real_map->offset(), map_size)) {
87 return false;
88 }
89
90 uint64_t max_size;
91 if (!Elf::GetInfo(memory, &max_size) || max_size < map_size) {
92 return false;
93 }
94
95 if (!memory->Init(name(), prev_real_map->offset(), max_size)) {
96 return false;
97 }
98
99 set_elf_offset(offset() - prev_real_map->offset());
100 set_elf_start_offset(prev_real_map->offset());
101 return true;
102 }
103
CreateFileMemory()104 std::shared_ptr<Memory> MapInfo::CreateFileMemory() {
105 // Fail on device maps.
106 if (flags() & MAPS_FLAGS_DEVICE_MAP) {
107 return nullptr;
108 }
109
110 auto file_memory = std::make_shared<MemoryFileAtOffset>();
111 if (offset() == 0) {
112 if (file_memory->Init(name(), 0)) {
113 return file_memory;
114 }
115 return nullptr;
116 }
117
118 // These are the possibilities when the offset is non-zero.
119 // - There is an elf file embedded in a file, and the offset is the
120 // the start of the elf in the file.
121 // - There is an elf file embedded in a file, and the offset is the
122 // the start of the executable part of the file. The actual start
123 // of the elf is in the read-only segment preceeding this map.
124 // - The whole file is an elf file, and the offset needs to be saved.
125 //
126 // Map in just the part of the file for the map. If this is not
127 // a valid elf, then reinit as if the whole file is an elf file.
128 // If the offset is a valid elf, then determine the size of the map
129 // and reinit to that size. This is needed because the dynamic linker
130 // only maps in a portion of the original elf, and never the symbol
131 // file data.
132 //
133 // For maps with MAPS_FLAGS_JIT_SYMFILE_MAP, the map range is for a JIT function,
134 // which can be smaller than elf header size. So make sure map_size is large enough
135 // to read elf header.
136 uint64_t map_size = std::max<uint64_t>(end() - start(), sizeof(ElfTypes64::Ehdr));
137 if (!file_memory->Init(name(), offset(), map_size)) {
138 return nullptr;
139 }
140
141 // Check if the start of this map is an embedded elf.
142 uint64_t max_size = 0;
143 if (Elf::GetInfo(file_memory.get(), &max_size)) {
144 set_elf_start_offset(offset());
145 if (max_size > map_size) {
146 if (file_memory->Init(name(), offset(), max_size)) {
147 return file_memory;
148 }
149 // Try to reinit using the default map_size.
150 if (file_memory->Init(name(), offset(), map_size)) {
151 return file_memory;
152 }
153 set_elf_start_offset(0);
154 return nullptr;
155 }
156 return file_memory;
157 }
158
159 // No elf at offset, try to init as if the whole file is an elf.
160 if (file_memory->Init(name(), 0) && Elf::IsValidElf(file_memory.get())) {
161 set_elf_offset(offset());
162 return file_memory;
163 }
164
165 // See if the map previous to this one contains a read-only map
166 // that represents the real start of the elf data.
167 if (InitFileMemoryFromPreviousReadOnlyMap(file_memory.get())) {
168 return file_memory;
169 }
170
171 // Failed to find elf at start of file or at read-only map, return
172 // file object from the current map.
173 if (file_memory->Init(name(), offset(), map_size)) {
174 return file_memory;
175 }
176 return nullptr;
177 }
178
CreateMemory(const std::shared_ptr<Memory> & process_memory)179 std::shared_ptr<Memory> MapInfo::CreateMemory(const std::shared_ptr<Memory>& process_memory) {
180 if (end() <= start()) {
181 return nullptr;
182 }
183
184 set_elf_offset(0);
185
186 // Fail on device maps.
187 if (flags() & MAPS_FLAGS_DEVICE_MAP) {
188 return nullptr;
189 }
190
191 // First try and use the file associated with the info.
192 if (!name().empty()) {
193 auto memory = CreateFileMemory();
194 if (memory != nullptr) {
195 return memory;
196 }
197 }
198
199 if (process_memory == nullptr) {
200 return nullptr;
201 }
202
203 set_memory_backed_elf(true);
204
205 // Need to verify that this elf is valid. It's possible that
206 // only part of the elf file to be mapped into memory is in the executable
207 // map. In this case, there will be another read-only map that includes the
208 // first part of the elf file. This is done if the linker rosegment
209 // option is used.
210 std::shared_ptr<Memory> memory_range(
211 new MemoryRange(process_memory, start(), end() - start(), 0));
212 if (Elf::IsValidElf(memory_range.get())) {
213 set_elf_start_offset(offset());
214
215 auto next_real_map = GetNextRealMap();
216
217 // Might need to peek at the next map to create a memory object that
218 // includes that map too.
219 if (offset() != 0 || next_real_map == nullptr || offset() >= next_real_map->offset()) {
220 return memory_range;
221 }
222
223 // There is a possibility that the elf object has already been created
224 // in the next map. Since this should be a very uncommon path, just
225 // redo the work. If this happens, the elf for this map will eventually
226 // be discarded.
227 MemoryRanges* ranges = new MemoryRanges;
228 std::shared_ptr<Memory> memory_ranges(ranges);
229 ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), 0));
230 ranges->Insert(new MemoryRange(process_memory, next_real_map->start(),
231 next_real_map->end() - next_real_map->start(),
232 next_real_map->offset() - offset()));
233
234 return memory_ranges;
235 }
236
237 auto prev_real_map = GetPrevRealMap();
238
239 // Find the read-only map by looking at the previous map. The linker
240 // doesn't guarantee that this invariant will always be true. However,
241 // if that changes, there is likely something else that will change and
242 // break something.
243 if (offset() == 0 || prev_real_map == nullptr || prev_real_map->offset() >= offset()) {
244 set_memory_backed_elf(false);
245 return nullptr;
246 }
247
248 // Make sure that relative pc values are corrected properly.
249 set_elf_offset(offset() - prev_real_map->offset());
250 // Use this as the elf start offset, otherwise, you always get offsets into
251 // the r-x section, which is not quite the right information.
252 set_elf_start_offset(prev_real_map->offset());
253
254 MemoryRanges* ranges = new MemoryRanges;
255 std::shared_ptr<Memory> memory_ranges(ranges);
256 if (!ranges->Insert(new MemoryRange(process_memory, prev_real_map->start(),
257 prev_real_map->end() - prev_real_map->start(), 0))) {
258 return nullptr;
259 }
260 if (!ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), elf_offset()))) {
261 return nullptr;
262 }
263 return memory_ranges;
264 }
265
266 class ScopedElfCacheLock {
267 public:
ScopedElfCacheLock()268 ScopedElfCacheLock() {
269 if (Elf::CachingEnabled()) Elf::CacheLock();
270 }
~ScopedElfCacheLock()271 ~ScopedElfCacheLock() {
272 if (Elf::CachingEnabled()) Elf::CacheUnlock();
273 }
274 };
275
GetElf(const std::shared_ptr<Memory> & process_memory,ArchEnum expected_arch)276 Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch) {
277 // Make sure no other thread is trying to add the elf to this map.
278 std::lock_guard<std::mutex> guard(elf_mutex());
279
280 if (elf().get() != nullptr) {
281 return elf().get();
282 }
283
284 ScopedElfCacheLock elf_cache_lock;
285 if (Elf::CachingEnabled() && !name().empty()) {
286 if (Elf::CacheGet(this)) {
287 return elf().get();
288 }
289 }
290
291 auto elf_memory = CreateMemory(process_memory);
292 elf().reset(new Elf(elf_memory));
293 // If the init fails, keep the elf around as an invalid object so we
294 // don't try to reinit the object.
295 elf()->Init();
296 if (elf()->valid() && expected_arch != elf()->arch()) {
297 // Make the elf invalid, mismatch between arch and expected arch.
298 elf()->Invalidate();
299 }
300
301 if (!elf()->valid()) {
302 set_elf_start_offset(offset());
303 } else if (auto prev_real_map = GetPrevRealMap(); prev_real_map != nullptr &&
304 prev_real_map->flags() == PROT_READ &&
305 prev_real_map->offset() < offset()) {
306 // If there is a read-only map then a read-execute map that represents the
307 // same elf object, make sure the previous map is using the same elf
308 // object if it hasn't already been set. Locking this should not result
309 // in a deadlock as long as the invariant that the code only ever tries
310 // to lock the previous real map holds true.
311 std::lock_guard<std::mutex> guard(prev_real_map->elf_mutex());
312 if (prev_real_map->elf() == nullptr) {
313 // Need to verify if the map is the previous read-only map.
314 prev_real_map->set_elf(elf());
315 prev_real_map->set_memory_backed_elf(memory_backed_elf());
316 prev_real_map->set_elf_start_offset(elf_start_offset());
317 prev_real_map->set_elf_offset(prev_real_map->offset() - elf_start_offset());
318 } else if (prev_real_map->elf_start_offset() == elf_start_offset()) {
319 // Discard this elf, and use the elf from the previous map instead.
320 set_elf(prev_real_map->elf());
321 }
322 }
323
324 // Cache the elf only after all of the above checks since we might
325 // discard the original elf we created.
326 if (Elf::CachingEnabled()) {
327 Elf::CacheAdd(this);
328 }
329 return elf().get();
330 }
331
GetFunctionName(uint64_t addr,SharedString * name,uint64_t * func_offset)332 bool MapInfo::GetFunctionName(uint64_t addr, SharedString* name, uint64_t* func_offset) {
333 {
334 // Make sure no other thread is trying to update this elf object.
335 std::lock_guard<std::mutex> guard(elf_mutex());
336 if (elf() == nullptr) {
337 return false;
338 }
339 }
340 // No longer need the lock, once the elf object is created, it is not deleted
341 // until this object is deleted.
342 return elf()->GetFunctionName(addr, name, func_offset);
343 }
344
GetLoadBias()345 uint64_t MapInfo::GetLoadBias() {
346 uint64_t cur_load_bias = load_bias().load();
347 if (cur_load_bias != UINT64_MAX) {
348 return cur_load_bias;
349 }
350
351 Elf* elf_obj = GetElfObj();
352 if (elf_obj == nullptr) {
353 return UINT64_MAX;
354 }
355
356 if (elf_obj->valid()) {
357 cur_load_bias = elf_obj->GetLoadBias();
358 set_load_bias(cur_load_bias);
359 return cur_load_bias;
360 }
361
362 set_load_bias(0);
363 return 0;
364 }
365
GetLoadBias(const std::shared_ptr<Memory> & process_memory)366 uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
367 uint64_t cur_load_bias = GetLoadBias();
368 if (cur_load_bias != UINT64_MAX) {
369 return cur_load_bias;
370 }
371
372 // Call lightweight static function that will only read enough of the
373 // elf data to get the load bias.
374 auto memory = CreateMemory(process_memory);
375 cur_load_bias = Elf::GetLoadBias(memory.get());
376 set_load_bias(cur_load_bias);
377 return cur_load_bias;
378 }
379
~MapInfo()380 MapInfo::~MapInfo() {
381 ElfFields* elf_fields = elf_fields_.load();
382 if (elf_fields != nullptr) {
383 delete elf_fields->build_id_.load();
384 delete elf_fields;
385 }
386 }
387
GetFullName()388 std::string MapInfo::GetFullName() {
389 Elf* elf_obj = GetElfObj();
390 if (elf_obj == nullptr || elf_start_offset() == 0 || name().empty()) {
391 return name();
392 }
393
394 std::string soname = elf_obj->GetSoname();
395 if (soname.empty()) {
396 return name();
397 }
398
399 std::string full_name(name());
400 full_name += '!';
401 full_name += soname;
402 return full_name;
403 }
404
GetBuildID()405 SharedString MapInfo::GetBuildID() {
406 SharedString* id = build_id().load();
407 if (id != nullptr) {
408 return *id;
409 }
410
411 // No need to lock, at worst if multiple threads do this at the same
412 // time it should be detected and only one thread should win and
413 // save the data.
414
415 std::string result;
416 Elf* elf_obj = GetElfObj();
417 if (elf_obj != nullptr) {
418 result = elf_obj->GetBuildID();
419 } else {
420 // This will only work if we can get the file associated with this memory.
421 // If this is only available in memory, then the section name information
422 // is not present and we will not be able to find the build id info.
423 auto file_memory = CreateFileMemory();
424 if (file_memory != nullptr) {
425 result = Elf::GetBuildID(file_memory.get());
426 }
427 }
428 return SetBuildID(std::move(result));
429 }
430
SetBuildID(std::string && new_build_id)431 SharedString MapInfo::SetBuildID(std::string&& new_build_id) {
432 std::unique_ptr<SharedString> new_build_id_ptr(new SharedString(std::move(new_build_id)));
433 SharedString* expected_id = nullptr;
434 // Strong version since we need to reliably return the stored pointer.
435 if (build_id().compare_exchange_strong(expected_id, new_build_id_ptr.get())) {
436 // Value saved, so make sure the memory is not freed.
437 return *new_build_id_ptr.release();
438 } else {
439 // The expected value is set to the stored value on failure.
440 return *expected_id;
441 }
442 }
443
GetElfFields()444 MapInfo::ElfFields& MapInfo::GetElfFields() {
445 ElfFields* elf_fields = elf_fields_.load(std::memory_order_acquire);
446 if (elf_fields != nullptr) {
447 return *elf_fields;
448 }
449 // Allocate and initialize the field in thread-safe way.
450 std::unique_ptr<ElfFields> desired(new ElfFields());
451 ElfFields* expected = nullptr;
452 // Strong version is reliable. Weak version might randomly return false.
453 if (elf_fields_.compare_exchange_strong(expected, desired.get())) {
454 return *desired.release(); // Success: we transferred the pointer ownership to the field.
455 } else {
456 return *expected; // Failure: 'expected' is updated to the value set by the other thread.
457 }
458 }
459
GetPrintableBuildID()460 std::string MapInfo::GetPrintableBuildID() {
461 std::string raw_build_id = GetBuildID();
462 return Elf::GetPrintableBuildID(raw_build_id);
463 }
464
465 } // namespace unwindstack
466