1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21
22 #include <memory>
23 #include <mutex>
24 #include <string>
25
26 #include <android-base/strings.h>
27
28 #include <unwindstack/Elf.h>
29 #include <unwindstack/MapInfo.h>
30 #include <unwindstack/Maps.h>
31
32 #include "MemoryFileAtOffset.h"
33 #include "MemoryRange.h"
34
35 namespace unwindstack {
36
ElfFileNotReadable()37 bool MapInfo::ElfFileNotReadable() {
38 const std::string& map_name = name();
39 return memory_backed_elf() && !map_name.empty() && map_name[0] != '[' &&
40 !android::base::StartsWith(map_name, "/memfd:");
41 }
42
GetPrevRealMap()43 std::shared_ptr<MapInfo> MapInfo::GetPrevRealMap() {
44 if (name().empty()) {
45 return nullptr;
46 }
47
48 for (auto prev = prev_map(); prev != nullptr; prev = prev->prev_map()) {
49 if (!prev->IsBlank()) {
50 if (prev->name() == name()) {
51 return prev;
52 }
53 return nullptr;
54 }
55 }
56 return nullptr;
57 }
58
GetNextRealMap()59 std::shared_ptr<MapInfo> MapInfo::GetNextRealMap() {
60 if (name().empty()) {
61 return nullptr;
62 }
63
64 for (auto next = next_map(); next != nullptr; next = next->next_map()) {
65 if (!next->IsBlank()) {
66 if (next->name() == name()) {
67 return next;
68 }
69 return nullptr;
70 }
71 }
72 return nullptr;
73 }
74
InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset * memory)75 bool MapInfo::InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset* memory) {
76 // One last attempt, see if the previous map is read-only with the
77 // same name and stretches across this map.
78 auto prev_real_map = GetPrevRealMap();
79 if (prev_real_map == nullptr || prev_real_map->flags() != PROT_READ ||
80 prev_real_map->offset() >= offset()) {
81 return false;
82 }
83
84 uint64_t map_size = end() - prev_real_map->end();
85 if (!memory->Init(name(), prev_real_map->offset(), map_size)) {
86 return false;
87 }
88
89 uint64_t max_size;
90 if (!Elf::GetInfo(memory, &max_size) || max_size < map_size) {
91 return false;
92 }
93
94 if (!memory->Init(name(), prev_real_map->offset(), max_size)) {
95 return false;
96 }
97
98 set_elf_offset(offset() - prev_real_map->offset());
99 set_elf_start_offset(prev_real_map->offset());
100 return true;
101 }
102
GetFileMemory()103 Memory* MapInfo::GetFileMemory() {
104 // Fail on device maps.
105 if (flags() & MAPS_FLAGS_DEVICE_MAP) {
106 return nullptr;
107 }
108
109 std::unique_ptr<MemoryFileAtOffset> memory(new MemoryFileAtOffset);
110 if (offset() == 0) {
111 if (memory->Init(name(), 0)) {
112 return memory.release();
113 }
114 return nullptr;
115 }
116
117 // These are the possibilities when the offset is non-zero.
118 // - There is an elf file embedded in a file, and the offset is the
119 // the start of the elf in the file.
120 // - There is an elf file embedded in a file, and the offset is the
121 // the start of the executable part of the file. The actual start
122 // of the elf is in the read-only segment preceeding this map.
123 // - The whole file is an elf file, and the offset needs to be saved.
124 //
125 // Map in just the part of the file for the map. If this is not
126 // a valid elf, then reinit as if the whole file is an elf file.
127 // If the offset is a valid elf, then determine the size of the map
128 // and reinit to that size. This is needed because the dynamic linker
129 // only maps in a portion of the original elf, and never the symbol
130 // file data.
131 //
132 // For maps with MAPS_FLAGS_JIT_SYMFILE_MAP, the map range is for a JIT function,
133 // which can be smaller than elf header size. So make sure map_size is large enough
134 // to read elf header.
135 uint64_t map_size = std::max<uint64_t>(end() - start(), sizeof(ElfTypes64::Ehdr));
136 if (!memory->Init(name(), offset(), map_size)) {
137 return nullptr;
138 }
139
140 // Check if the start of this map is an embedded elf.
141 uint64_t max_size = 0;
142 if (Elf::GetInfo(memory.get(), &max_size)) {
143 set_elf_start_offset(offset());
144 if (max_size > map_size) {
145 if (memory->Init(name(), offset(), max_size)) {
146 return memory.release();
147 }
148 // Try to reinit using the default map_size.
149 if (memory->Init(name(), offset(), map_size)) {
150 return memory.release();
151 }
152 set_elf_start_offset(0);
153 return nullptr;
154 }
155 return memory.release();
156 }
157
158 // No elf at offset, try to init as if the whole file is an elf.
159 if (memory->Init(name(), 0) && Elf::IsValidElf(memory.get())) {
160 set_elf_offset(offset());
161 return memory.release();
162 }
163
164 // See if the map previous to this one contains a read-only map
165 // that represents the real start of the elf data.
166 if (InitFileMemoryFromPreviousReadOnlyMap(memory.get())) {
167 return memory.release();
168 }
169
170 // Failed to find elf at start of file or at read-only map, return
171 // file object from the current map.
172 if (memory->Init(name(), offset(), map_size)) {
173 return memory.release();
174 }
175 return nullptr;
176 }
177
CreateMemory(const std::shared_ptr<Memory> & process_memory)178 Memory* MapInfo::CreateMemory(const std::shared_ptr<Memory>& process_memory) {
179 if (end() <= start()) {
180 return nullptr;
181 }
182
183 set_elf_offset(0);
184
185 // Fail on device maps.
186 if (flags() & MAPS_FLAGS_DEVICE_MAP) {
187 return nullptr;
188 }
189
190 // First try and use the file associated with the info.
191 if (!name().empty()) {
192 Memory* memory = GetFileMemory();
193 if (memory != nullptr) {
194 return memory;
195 }
196 }
197
198 if (process_memory == nullptr) {
199 return nullptr;
200 }
201
202 set_memory_backed_elf(true);
203
204 // Need to verify that this elf is valid. It's possible that
205 // only part of the elf file to be mapped into memory is in the executable
206 // map. In this case, there will be another read-only map that includes the
207 // first part of the elf file. This is done if the linker rosegment
208 // option is used.
209 std::unique_ptr<MemoryRange> memory(new MemoryRange(process_memory, start(), end() - start(), 0));
210 if (Elf::IsValidElf(memory.get())) {
211 set_elf_start_offset(offset());
212
213 auto next_real_map = GetNextRealMap();
214
215 // Might need to peek at the next map to create a memory object that
216 // includes that map too.
217 if (offset() != 0 || next_real_map == nullptr || offset() >= next_real_map->offset()) {
218 return memory.release();
219 }
220
221 // There is a possibility that the elf object has already been created
222 // in the next map. Since this should be a very uncommon path, just
223 // redo the work. If this happens, the elf for this map will eventually
224 // be discarded.
225 MemoryRanges* ranges = new MemoryRanges;
226 ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), 0));
227 ranges->Insert(new MemoryRange(process_memory, next_real_map->start(),
228 next_real_map->end() - next_real_map->start(),
229 next_real_map->offset() - offset()));
230
231 return ranges;
232 }
233
234 auto prev_real_map = GetPrevRealMap();
235
236 // Find the read-only map by looking at the previous map. The linker
237 // doesn't guarantee that this invariant will always be true. However,
238 // if that changes, there is likely something else that will change and
239 // break something.
240 if (offset() == 0 || prev_real_map == nullptr || prev_real_map->offset() >= offset()) {
241 set_memory_backed_elf(false);
242 return nullptr;
243 }
244
245 // Make sure that relative pc values are corrected properly.
246 set_elf_offset(offset() - prev_real_map->offset());
247 // Use this as the elf start offset, otherwise, you always get offsets into
248 // the r-x section, which is not quite the right information.
249 set_elf_start_offset(prev_real_map->offset());
250
251 std::unique_ptr<MemoryRanges> ranges(new MemoryRanges);
252 if (!ranges->Insert(new MemoryRange(process_memory, prev_real_map->start(),
253 prev_real_map->end() - prev_real_map->start(), 0))) {
254 return nullptr;
255 }
256 if (!ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), elf_offset()))) {
257 return nullptr;
258 }
259 return ranges.release();
260 }
261
262 class ScopedElfCacheLock {
263 public:
ScopedElfCacheLock()264 ScopedElfCacheLock() {
265 if (Elf::CachingEnabled()) Elf::CacheLock();
266 }
~ScopedElfCacheLock()267 ~ScopedElfCacheLock() {
268 if (Elf::CachingEnabled()) Elf::CacheUnlock();
269 }
270 };
271
GetElf(const std::shared_ptr<Memory> & process_memory,ArchEnum expected_arch)272 Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch) {
273 // Make sure no other thread is trying to add the elf to this map.
274 std::lock_guard<std::mutex> guard(elf_mutex());
275
276 if (elf().get() != nullptr) {
277 return elf().get();
278 }
279
280 ScopedElfCacheLock elf_cache_lock;
281 if (Elf::CachingEnabled() && !name().empty()) {
282 if (Elf::CacheGet(this)) {
283 return elf().get();
284 }
285 }
286
287 elf().reset(new Elf(CreateMemory(process_memory)));
288 // If the init fails, keep the elf around as an invalid object so we
289 // don't try to reinit the object.
290 elf()->Init();
291 if (elf()->valid() && expected_arch != elf()->arch()) {
292 // Make the elf invalid, mismatch between arch and expected arch.
293 elf()->Invalidate();
294 }
295
296 if (!elf()->valid()) {
297 set_elf_start_offset(offset());
298 } else if (auto prev_real_map = GetPrevRealMap(); prev_real_map != nullptr &&
299 prev_real_map->flags() == PROT_READ &&
300 prev_real_map->offset() < offset()) {
301 // If there is a read-only map then a read-execute map that represents the
302 // same elf object, make sure the previous map is using the same elf
303 // object if it hasn't already been set. Locking this should not result
304 // in a deadlock as long as the invariant that the code only ever tries
305 // to lock the previous real map holds true.
306 std::lock_guard<std::mutex> guard(prev_real_map->elf_mutex());
307 if (prev_real_map->elf() == nullptr) {
308 // Need to verify if the map is the previous read-only map.
309 prev_real_map->set_elf(elf());
310 prev_real_map->set_memory_backed_elf(memory_backed_elf());
311 prev_real_map->set_elf_start_offset(elf_start_offset());
312 prev_real_map->set_elf_offset(prev_real_map->offset() - elf_start_offset());
313 } else if (prev_real_map->elf_start_offset() == elf_start_offset()) {
314 // Discard this elf, and use the elf from the previous map instead.
315 set_elf(prev_real_map->elf());
316 }
317 }
318
319 // Cache the elf only after all of the above checks since we might
320 // discard the original elf we created.
321 if (Elf::CachingEnabled()) {
322 Elf::CacheAdd(this);
323 }
324 return elf().get();
325 }
326
GetFunctionName(uint64_t addr,SharedString * name,uint64_t * func_offset)327 bool MapInfo::GetFunctionName(uint64_t addr, SharedString* name, uint64_t* func_offset) {
328 {
329 // Make sure no other thread is trying to update this elf object.
330 std::lock_guard<std::mutex> guard(elf_mutex());
331 if (elf() == nullptr) {
332 return false;
333 }
334 }
335 // No longer need the lock, once the elf object is created, it is not deleted
336 // until this object is deleted.
337 return elf()->GetFunctionName(addr, name, func_offset);
338 }
339
GetLoadBias()340 uint64_t MapInfo::GetLoadBias() {
341 uint64_t cur_load_bias = load_bias().load();
342 if (cur_load_bias != UINT64_MAX) {
343 return cur_load_bias;
344 }
345
346 Elf* elf_obj = GetElfObj();
347 if (elf_obj == nullptr) {
348 return UINT64_MAX;
349 }
350
351 if (elf_obj->valid()) {
352 cur_load_bias = elf_obj->GetLoadBias();
353 set_load_bias(cur_load_bias);
354 return cur_load_bias;
355 }
356
357 set_load_bias(0);
358 return 0;
359 }
360
GetLoadBias(const std::shared_ptr<Memory> & process_memory)361 uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
362 uint64_t cur_load_bias = GetLoadBias();
363 if (cur_load_bias != UINT64_MAX) {
364 return cur_load_bias;
365 }
366
367 // Call lightweight static function that will only read enough of the
368 // elf data to get the load bias.
369 std::unique_ptr<Memory> memory(CreateMemory(process_memory));
370 cur_load_bias = Elf::GetLoadBias(memory.get());
371 set_load_bias(cur_load_bias);
372 return cur_load_bias;
373 }
374
~MapInfo()375 MapInfo::~MapInfo() {
376 ElfFields* elf_fields = elf_fields_.load();
377 if (elf_fields != nullptr) {
378 delete elf_fields->build_id_.load();
379 delete elf_fields;
380 }
381 }
382
GetFullName()383 std::string MapInfo::GetFullName() {
384 Elf* elf_obj = GetElfObj();
385 if (elf_obj == nullptr || elf_start_offset() == 0 || name().empty()) {
386 return name();
387 }
388
389 std::string soname = elf_obj->GetSoname();
390 if (soname.empty()) {
391 return name();
392 }
393
394 std::string full_name(name());
395 full_name += '!';
396 full_name += soname;
397 return full_name;
398 }
399
GetBuildID()400 SharedString MapInfo::GetBuildID() {
401 SharedString* id = build_id().load();
402 if (id != nullptr) {
403 return *id;
404 }
405
406 // No need to lock, at worst if multiple threads do this at the same
407 // time it should be detected and only one thread should win and
408 // save the data.
409
410 std::string result;
411 Elf* elf_obj = GetElfObj();
412 if (elf_obj != nullptr) {
413 result = elf_obj->GetBuildID();
414 } else {
415 // This will only work if we can get the file associated with this memory.
416 // If this is only available in memory, then the section name information
417 // is not present and we will not be able to find the build id info.
418 std::unique_ptr<Memory> memory(GetFileMemory());
419 if (memory != nullptr) {
420 result = Elf::GetBuildID(memory.get());
421 }
422 }
423 return SetBuildID(std::move(result));
424 }
425
SetBuildID(std::string && new_build_id)426 SharedString MapInfo::SetBuildID(std::string&& new_build_id) {
427 std::unique_ptr<SharedString> new_build_id_ptr(new SharedString(std::move(new_build_id)));
428 SharedString* expected_id = nullptr;
429 // Strong version since we need to reliably return the stored pointer.
430 if (build_id().compare_exchange_strong(expected_id, new_build_id_ptr.get())) {
431 // Value saved, so make sure the memory is not freed.
432 return *new_build_id_ptr.release();
433 } else {
434 // The expected value is set to the stored value on failure.
435 return *expected_id;
436 }
437 }
438
GetElfFields()439 MapInfo::ElfFields& MapInfo::GetElfFields() {
440 ElfFields* elf_fields = elf_fields_.load(std::memory_order_acquire);
441 if (elf_fields != nullptr) {
442 return *elf_fields;
443 }
444 // Allocate and initialize the field in thread-safe way.
445 std::unique_ptr<ElfFields> desired(new ElfFields());
446 ElfFields* expected = nullptr;
447 // Strong version is reliable. Weak version might randomly return false.
448 if (elf_fields_.compare_exchange_strong(expected, desired.get())) {
449 return *desired.release(); // Success: we transferred the pointer ownership to the field.
450 } else {
451 return *expected; // Failure: 'expected' is updated to the value set by the other thread.
452 }
453 }
454
GetPrintableBuildID()455 std::string MapInfo::GetPrintableBuildID() {
456 std::string raw_build_id = GetBuildID();
457 return Elf::GetPrintableBuildID(raw_build_id);
458 }
459
460 } // namespace unwindstack
461