• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <inttypes.h>
32 #include <pthread.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <sys/types.h>
38 
39 #include <mutex>
40 
41 #include <memory_trace/MemoryTrace.h>
42 
43 #include "Config.h"
44 #include "DebugData.h"
45 #include "Nanotime.h"
46 #include "RecordData.h"
47 #include "debug_disable.h"
48 #include "debug_log.h"
49 
50 struct ThreadData {
ThreadDataThreadData51   ThreadData(RecordData* record_data) : record_data(record_data) {}
52 
53   RecordData* record_data = nullptr;
54   size_t count = 0;
55 };
56 
ThreadKeyDelete(void * data)57 void RecordData::ThreadKeyDelete(void* data) {
58   ThreadData* thread_data = reinterpret_cast<ThreadData*>(data);
59 
60   thread_data->count++;
61 
62   // This should be the last time we are called.
63   if (thread_data->count == 4) {
64     ScopedDisableDebugCalls disable;
65 
66     memory_trace::Entry* entry = thread_data->record_data->InternalReserveEntry();
67     if (entry != nullptr) {
68       *entry = memory_trace::Entry{
69           .tid = gettid(), .type = memory_trace::THREAD_DONE, .end_ns = Nanotime()};
70     }
71     delete thread_data;
72   } else {
73     pthread_setspecific(thread_data->record_data->key(), data);
74   }
75 }
76 
77 RecordData* RecordData::record_obj_ = nullptr;
78 
WriteData(int,siginfo_t *,void *)79 void RecordData::WriteData(int, siginfo_t*, void*) {
80   // Dump from here, the function must not allocate so this is safe.
81   record_obj_->WriteEntries();
82 }
83 
WriteEntriesOnExit()84 void RecordData::WriteEntriesOnExit() {
85   if (record_obj_ == nullptr) return;
86 
87   // Append the current pid to the file name to avoid multiple processes
88   // writing to the same file.
89   std::string file(record_obj_->file());
90   file += "." + std::to_string(getpid());
91   record_obj_->WriteEntries(file);
92 }
93 
WriteEntries()94 void RecordData::WriteEntries() {
95   WriteEntries(file_);
96 }
97 
WriteEntries(const std::string & file)98 void RecordData::WriteEntries(const std::string& file) {
99   std::lock_guard<std::mutex> entries_lock(entries_lock_);
100   if (cur_index_ == 0) {
101     info_log("No alloc entries to write.");
102     return;
103   }
104 
105   int dump_fd = open(file.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC | O_NOFOLLOW, 0755);
106   if (dump_fd == -1) {
107     error_log("Cannot create record alloc file %s: %s", file.c_str(), strerror(errno));
108     return;
109   }
110 
111   for (size_t i = 0; i < cur_index_; i++) {
112     if (entries_[i].type == memory_trace::UNKNOWN) {
113       // This can happen if an entry was reserved but not filled in due to some
114       // type of error during the operation.
115       continue;
116     }
117     if (!memory_trace::WriteEntryToFd(dump_fd, entries_[i])) {
118       error_log("Failed to write record alloc information: %s", strerror(errno));
119       break;
120     }
121   }
122   close(dump_fd);
123 
124   // Mark the entries dumped.
125   cur_index_ = 0U;
126 }
127 
RecordData()128 RecordData::RecordData() {
129   pthread_key_create(&key_, ThreadKeyDelete);
130 }
131 
Initialize(const Config & config)132 bool RecordData::Initialize(const Config& config) {
133   record_obj_ = this;
134   struct sigaction64 dump_act = {};
135   dump_act.sa_sigaction = RecordData::WriteData;
136   dump_act.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
137   if (sigaction64(config.record_allocs_signal(), &dump_act, nullptr) != 0) {
138     error_log("Unable to set up record dump signal function: %s", strerror(errno));
139     return false;
140   }
141   pthread_setspecific(key_, nullptr);
142 
143   if (config.options() & VERBOSE) {
144     info_log("%s: Run: 'kill -%d %d' to dump the allocation records.", getprogname(),
145              config.record_allocs_signal(), getpid());
146   }
147 
148   entries_.resize(config.record_allocs_num_entries());
149   cur_index_ = 0U;
150   file_ = config.record_allocs_file();
151 
152   pagemap_fd_ = TEMP_FAILURE_RETRY(open("/proc/self/pagemap", O_RDONLY | O_CLOEXEC));
153   if (pagemap_fd_ == -1) {
154     error_log("Unable to open /proc/self/pagemap: %s", strerror(errno));
155     return false;
156   }
157 
158   return true;
159 }
160 
~RecordData()161 RecordData::~RecordData() {
162   if (pagemap_fd_ != -1) {
163     close(pagemap_fd_);
164   }
165 
166   pthread_key_delete(key_);
167 }
168 
InternalReserveEntry()169 memory_trace::Entry* RecordData::InternalReserveEntry() {
170   std::lock_guard<std::mutex> entries_lock(entries_lock_);
171   if (cur_index_ == entries_.size()) {
172     return nullptr;
173   }
174 
175   memory_trace::Entry* entry = &entries_[cur_index_];
176   entry->type = memory_trace::UNKNOWN;
177   if (++cur_index_ == entries_.size()) {
178     info_log("Maximum number of records added, all new operations will be dropped.");
179   }
180   return entry;
181 }
182 
ReserveEntry()183 memory_trace::Entry* RecordData::ReserveEntry() {
184   void* data = pthread_getspecific(key_);
185   if (data == nullptr) {
186     ThreadData* thread_data = new ThreadData(this);
187     pthread_setspecific(key_, thread_data);
188   }
189 
190   return InternalReserveEntry();
191 }
192 
IsPagePresent(uint64_t page_data)193 static inline bool IsPagePresent(uint64_t page_data) {
194   // Page Present is bit 63
195   return (page_data & (1ULL << 63)) != 0;
196 }
197 
GetPresentBytes(void * ptr,size_t alloc_size)198 int64_t RecordData::GetPresentBytes(void* ptr, size_t alloc_size) {
199   uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
200   if (addr == 0 || alloc_size == 0) {
201     return -1;
202   }
203 
204   uintptr_t page_size = getpagesize();
205   uintptr_t page_size_mask = page_size - 1;
206 
207   size_t start_page = (addr & ~page_size_mask) / page_size;
208   size_t last_page = ((addr + alloc_size - 1) & ~page_size_mask) / page_size;
209 
210   constexpr size_t kMaxReadPages = 1024;
211   uint64_t page_data[kMaxReadPages];
212 
213   int64_t present_bytes = 0;
214   size_t cur_page = start_page;
215   while (cur_page <= last_page) {
216     size_t num_pages = last_page - cur_page + 1;
217     size_t last_page_index;
218     if (num_pages > kMaxReadPages) {
219       num_pages = kMaxReadPages;
220       last_page_index = num_pages;
221     } else {
222       // Handle the last page differently, so do not handle it in the loop.
223       last_page_index = num_pages - 1;
224     }
225     ssize_t bytes_read =
226         pread64(pagemap_fd_, page_data, num_pages * sizeof(uint64_t), cur_page * sizeof(uint64_t));
227     if (bytes_read <= 0) {
228       error_log("Failed to read page data: %s", strerror(errno));
229       return -1;
230     }
231 
232     size_t page_index = 0;
233     // Handling the first page is special, handle it separately.
234     if (cur_page == start_page) {
235       if (IsPagePresent(page_data[0])) {
236         present_bytes = page_size - (addr & page_size_mask);
237         if (present_bytes >= alloc_size) {
238           // The allocation fits on a single page and that page is present.
239           return alloc_size;
240         }
241       } else if (start_page == last_page) {
242         // Only one page that isn't present.
243         return 0;
244       }
245       page_index = 1;
246     }
247 
248     for (; page_index < last_page_index; page_index++) {
249       if (IsPagePresent(page_data[page_index])) {
250         present_bytes += page_size;
251       }
252     }
253 
254     cur_page += last_page_index;
255 
256     // Check the last page in the allocation.
257     if (cur_page == last_page) {
258       if (IsPagePresent(page_data[num_pages - 1])) {
259         present_bytes += ((addr + alloc_size - 1) & page_size_mask) + 1;
260       }
261       return present_bytes;
262     }
263   }
264 
265   return present_bytes;
266 }
267