1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21
22 #include <memory>
23 #include <mutex>
24 #include <string>
25
26 #include <android-base/stringprintf.h>
27
28 #include <unwindstack/Elf.h>
29 #include <unwindstack/MapInfo.h>
30 #include <unwindstack/Maps.h>
31
32 #include "MemoryFileAtOffset.h"
33 #include "MemoryRange.h"
34
35 namespace unwindstack {
36
InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset * memory)37 bool MapInfo::InitFileMemoryFromPreviousReadOnlyMap(MemoryFileAtOffset* memory) {
38 // One last attempt, see if the previous map is read-only with the
39 // same name and stretches across this map.
40 if (prev_real_map() == nullptr || prev_real_map()->flags() != PROT_READ) {
41 return false;
42 }
43
44 uint64_t map_size = end() - prev_real_map()->end();
45 if (!memory->Init(name(), prev_real_map()->offset(), map_size)) {
46 return false;
47 }
48
49 uint64_t max_size;
50 if (!Elf::GetInfo(memory, &max_size) || max_size < map_size) {
51 return false;
52 }
53
54 if (!memory->Init(name(), prev_real_map()->offset(), max_size)) {
55 return false;
56 }
57
58 set_elf_offset(offset() - prev_real_map()->offset());
59 set_elf_start_offset(prev_real_map()->offset());
60 return true;
61 }
62
GetFileMemory()63 Memory* MapInfo::GetFileMemory() {
64 std::unique_ptr<MemoryFileAtOffset> memory(new MemoryFileAtOffset);
65 if (offset() == 0) {
66 if (memory->Init(name(), 0)) {
67 return memory.release();
68 }
69 return nullptr;
70 }
71
72 // These are the possibilities when the offset is non-zero.
73 // - There is an elf file embedded in a file, and the offset is the
74 // the start of the elf in the file.
75 // - There is an elf file embedded in a file, and the offset is the
76 // the start of the executable part of the file. The actual start
77 // of the elf is in the read-only segment preceeding this map.
78 // - The whole file is an elf file, and the offset needs to be saved.
79 //
80 // Map in just the part of the file for the map. If this is not
81 // a valid elf, then reinit as if the whole file is an elf file.
82 // If the offset is a valid elf, then determine the size of the map
83 // and reinit to that size. This is needed because the dynamic linker
84 // only maps in a portion of the original elf, and never the symbol
85 // file data.
86 uint64_t map_size = end() - start();
87 if (!memory->Init(name(), offset(), map_size)) {
88 return nullptr;
89 }
90
91 // Check if the start of this map is an embedded elf.
92 uint64_t max_size = 0;
93 if (Elf::GetInfo(memory.get(), &max_size)) {
94 set_elf_start_offset(offset());
95 if (max_size > map_size) {
96 if (memory->Init(name(), offset(), max_size)) {
97 return memory.release();
98 }
99 // Try to reinit using the default map_size.
100 if (memory->Init(name(), offset(), map_size)) {
101 return memory.release();
102 }
103 set_elf_start_offset(0);
104 return nullptr;
105 }
106 return memory.release();
107 }
108
109 // No elf at offset, try to init as if the whole file is an elf.
110 if (memory->Init(name(), 0) && Elf::IsValidElf(memory.get())) {
111 set_elf_offset(offset());
112 // Need to check how to set the elf start offset. If this map is not
113 // the r-x map of a r-- map, then use the real offset value. Otherwise,
114 // use 0.
115 if (prev_real_map() == nullptr || prev_real_map()->offset() != 0 ||
116 prev_real_map()->flags() != PROT_READ || prev_real_map()->name() != name()) {
117 set_elf_start_offset(offset());
118 }
119 return memory.release();
120 }
121
122 // See if the map previous to this one contains a read-only map
123 // that represents the real start of the elf data.
124 if (InitFileMemoryFromPreviousReadOnlyMap(memory.get())) {
125 return memory.release();
126 }
127
128 // Failed to find elf at start of file or at read-only map, return
129 // file object from the current map.
130 if (memory->Init(name(), offset(), map_size)) {
131 return memory.release();
132 }
133 return nullptr;
134 }
135
CreateMemory(const std::shared_ptr<Memory> & process_memory)136 Memory* MapInfo::CreateMemory(const std::shared_ptr<Memory>& process_memory) {
137 if (end() <= start()) {
138 return nullptr;
139 }
140
141 set_elf_offset(0);
142
143 // Fail on device maps.
144 if (flags() & MAPS_FLAGS_DEVICE_MAP) {
145 return nullptr;
146 }
147
148 // First try and use the file associated with the info.
149 if (!name().empty()) {
150 Memory* memory = GetFileMemory();
151 if (memory != nullptr) {
152 return memory;
153 }
154 }
155
156 if (process_memory == nullptr) {
157 return nullptr;
158 }
159
160 set_memory_backed_elf(true);
161
162 // Need to verify that this elf is valid. It's possible that
163 // only part of the elf file to be mapped into memory is in the executable
164 // map. In this case, there will be another read-only map that includes the
165 // first part of the elf file. This is done if the linker rosegment
166 // option is used.
167 std::unique_ptr<MemoryRange> memory(new MemoryRange(process_memory, start(), end() - start(), 0));
168 if (Elf::IsValidElf(memory.get())) {
169 // Might need to peek at the next map to create a memory object that
170 // includes that map too.
171 if (offset() != 0 || name().empty() || next_real_map() == nullptr ||
172 offset() >= next_real_map()->offset() || next_real_map()->name() != name()) {
173 return memory.release();
174 }
175
176 // There is a possibility that the elf object has already been created
177 // in the next map. Since this should be a very uncommon path, just
178 // redo the work. If this happens, the elf for this map will eventually
179 // be discarded.
180 MemoryRanges* ranges = new MemoryRanges;
181 ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), 0));
182 ranges->Insert(new MemoryRange(process_memory, next_real_map()->start(),
183 next_real_map()->end() - next_real_map()->start(),
184 next_real_map()->offset() - offset()));
185
186 return ranges;
187 }
188
189 // Find the read-only map by looking at the previous map. The linker
190 // doesn't guarantee that this invariant will always be true. However,
191 // if that changes, there is likely something else that will change and
192 // break something.
193 if (offset() == 0 || name().empty() || prev_real_map() == nullptr ||
194 prev_real_map()->name() != name() || prev_real_map()->offset() >= offset()) {
195 set_memory_backed_elf(false);
196 return nullptr;
197 }
198
199 // Make sure that relative pc values are corrected properly.
200 set_elf_offset(offset() - prev_real_map()->offset());
201 // Use this as the elf start offset, otherwise, you always get offsets into
202 // the r-x section, which is not quite the right information.
203 set_elf_start_offset(prev_real_map()->offset());
204
205 MemoryRanges* ranges = new MemoryRanges;
206 ranges->Insert(new MemoryRange(process_memory, prev_real_map()->start(),
207 prev_real_map()->end() - prev_real_map()->start(), 0));
208 ranges->Insert(new MemoryRange(process_memory, start(), end() - start(), elf_offset()));
209
210 return ranges;
211 }
212
GetElf(const std::shared_ptr<Memory> & process_memory,ArchEnum expected_arch)213 Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, ArchEnum expected_arch) {
214 {
215 // Make sure no other thread is trying to add the elf to this map.
216 std::lock_guard<std::mutex> guard(elf_mutex());
217
218 if (elf().get() != nullptr) {
219 return elf().get();
220 }
221
222 bool locked = false;
223 if (Elf::CachingEnabled() && !name().empty()) {
224 Elf::CacheLock();
225 locked = true;
226 if (Elf::CacheGet(this)) {
227 Elf::CacheUnlock();
228 return elf().get();
229 }
230 }
231
232 Memory* memory = CreateMemory(process_memory);
233 if (locked) {
234 if (Elf::CacheAfterCreateMemory(this)) {
235 delete memory;
236 Elf::CacheUnlock();
237 return elf().get();
238 }
239 }
240 elf().reset(new Elf(memory));
241 // If the init fails, keep the elf around as an invalid object so we
242 // don't try to reinit the object.
243 elf()->Init();
244 if (elf()->valid() && expected_arch != elf()->arch()) {
245 // Make the elf invalid, mismatch between arch and expected arch.
246 elf()->Invalidate();
247 }
248
249 if (locked) {
250 Elf::CacheAdd(this);
251 Elf::CacheUnlock();
252 }
253 }
254
255 if (!elf()->valid()) {
256 set_elf_start_offset(offset());
257 } else if (prev_real_map() != nullptr && elf_start_offset() != offset() &&
258 prev_real_map()->offset() == elf_start_offset() && prev_real_map()->name() == name()) {
259 // If there is a read-only map then a read-execute map that represents the
260 // same elf object, make sure the previous map is using the same elf
261 // object if it hasn't already been set.
262 std::lock_guard<std::mutex> guard(prev_real_map()->elf_mutex());
263 if (prev_real_map()->elf().get() == nullptr) {
264 prev_real_map()->set_elf(elf());
265 prev_real_map()->set_memory_backed_elf(memory_backed_elf());
266 } else {
267 // Discard this elf, and use the elf from the previous map instead.
268 set_elf(prev_real_map()->elf());
269 }
270 }
271 return elf().get();
272 }
273
GetFunctionName(uint64_t addr,SharedString * name,uint64_t * func_offset)274 bool MapInfo::GetFunctionName(uint64_t addr, SharedString* name, uint64_t* func_offset) {
275 {
276 // Make sure no other thread is trying to update this elf object.
277 std::lock_guard<std::mutex> guard(elf_mutex());
278 if (elf() == nullptr) {
279 return false;
280 }
281 }
282 // No longer need the lock, once the elf object is created, it is not deleted
283 // until this object is deleted.
284 return elf()->GetFunctionName(addr, name, func_offset);
285 }
286
GetLoadBias(const std::shared_ptr<Memory> & process_memory)287 uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
288 int64_t cur_load_bias = load_bias().load();
289 if (cur_load_bias != INT64_MAX) {
290 return cur_load_bias;
291 }
292
293 {
294 // Make sure no other thread is trying to add the elf to this map.
295 std::lock_guard<std::mutex> guard(elf_mutex());
296 if (elf() != nullptr) {
297 if (elf()->valid()) {
298 cur_load_bias = elf()->GetLoadBias();
299 set_load_bias(cur_load_bias);
300 return cur_load_bias;
301 } else {
302 set_load_bias(0);
303 return 0;
304 }
305 }
306 }
307
308 // Call lightweight static function that will only read enough of the
309 // elf data to get the load bias.
310 std::unique_ptr<Memory> memory(CreateMemory(process_memory));
311 cur_load_bias = Elf::GetLoadBias(memory.get());
312 set_load_bias(cur_load_bias);
313 return cur_load_bias;
314 }
315
~MapInfo()316 MapInfo::~MapInfo() {
317 ElfFields* elf_fields = elf_fields_.load();
318 if (elf_fields != nullptr) {
319 delete elf_fields->build_id_.load();
320 delete elf_fields;
321 }
322 }
323
GetBuildID()324 SharedString MapInfo::GetBuildID() {
325 SharedString* id = build_id().load();
326 if (id != nullptr) {
327 return *id;
328 }
329
330 // No need to lock, at worst if multiple threads do this at the same
331 // time it should be detected and only one thread should win and
332 // save the data.
333
334 // Now need to see if the elf object exists.
335 // Make sure no other thread is trying to add the elf to this map.
336 elf_mutex().lock();
337 Elf* elf_obj = elf().get();
338 elf_mutex().unlock();
339 std::string result;
340 if (elf_obj != nullptr) {
341 result = elf_obj->GetBuildID();
342 } else {
343 // This will only work if we can get the file associated with this memory.
344 // If this is only available in memory, then the section name information
345 // is not present and we will not be able to find the build id info.
346 std::unique_ptr<Memory> memory(GetFileMemory());
347 if (memory != nullptr) {
348 result = Elf::GetBuildID(memory.get());
349 }
350 }
351 return SetBuildID(std::move(result));
352 }
353
SetBuildID(std::string && new_build_id)354 SharedString MapInfo::SetBuildID(std::string&& new_build_id) {
355 std::unique_ptr<SharedString> new_build_id_ptr(new SharedString(std::move(new_build_id)));
356 SharedString* expected_id = nullptr;
357 // Strong version since we need to reliably return the stored pointer.
358 if (build_id().compare_exchange_strong(expected_id, new_build_id_ptr.get())) {
359 // Value saved, so make sure the memory is not freed.
360 return *new_build_id_ptr.release();
361 } else {
362 // The expected value is set to the stored value on failure.
363 return *expected_id;
364 }
365 }
366
GetElfFields()367 MapInfo::ElfFields& MapInfo::GetElfFields() {
368 ElfFields* elf_fields = elf_fields_.load(std::memory_order_acquire);
369 if (elf_fields != nullptr) {
370 return *elf_fields;
371 }
372 // Allocate and initialize the field in thread-safe way.
373 std::unique_ptr<ElfFields> desired(new ElfFields());
374 ElfFields* expected = nullptr;
375 // Strong version is reliable. Weak version might randomly return false.
376 if (elf_fields_.compare_exchange_strong(expected, desired.get())) {
377 return *desired.release(); // Success: we transferred the pointer ownership to the field.
378 } else {
379 return *expected; // Failure: 'expected' is updated to the value set by the other thread.
380 }
381 }
382
GetPrintableBuildID()383 std::string MapInfo::GetPrintableBuildID() {
384 std::string raw_build_id = GetBuildID();
385 if (raw_build_id.empty()) {
386 return "";
387 }
388 std::string printable_build_id;
389 for (const char& c : raw_build_id) {
390 // Use %hhx to avoid sign extension on abis that have signed chars.
391 printable_build_id += android::base::StringPrintf("%02hhx", c);
392 }
393 return printable_build_id;
394 }
395
396 } // namespace unwindstack
397