• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <cstring>
17 #include <algorithm>
18 #include <iterator>
19 #include <sstream>
20 #include <fstream>
21 #include <functional>
22 #include <unordered_map>
23 #include "utils/logger.h"
24 #include "os/stacktrace.h"
25 #include "mem/alloc_tracker.h"
26 
27 namespace panda {
28 
29 static constexpr size_t NUM_SKIP_FRAMES = 1;
30 static constexpr size_t ARENA_SIZE = 4096;
31 static constexpr size_t ENTRY_HDR_SIZE = sizeof(int32_t);
32 
GetDumpFilePath()33 static const char *GetDumpFilePath()
34 {
35 #if defined(PANDA_TARGET_MOBILE)
36     return "/data/local/tmp/memdump.bin";
37 #else
38     return "memdump.bin";
39 #endif
40 }
41 
Write(uint32_t val,std::ostream & out)42 static void Write(uint32_t val, std::ostream &out)
43 {
44     out.write(reinterpret_cast<char *>(&val), sizeof(val));
45 }
46 
Write(const std::string & str,std::ostream & out)47 static void Write(const std::string &str, std::ostream &out)
48 {
49     Write(static_cast<uint32_t>(str.size()), out);
50     out.write(str.c_str(), str.length());
51 }
52 
CalcHash(const std::vector<uintptr_t> & st)53 static size_t CalcHash(const std::vector<uintptr_t> &st)
54 {
55     size_t hash = 0;
56     std::hash<uintptr_t> addr_hash;
57     for (uintptr_t addr : st) {
58         hash |= addr_hash(addr);
59     }
60     return hash;
61 }
62 
63 // On a phone getting a stacktrace is expensive operation.
64 // An application doesn't launch in timeout and gets killed.
65 // This function is aimed to skip getting stacktraces for some allocations.
66 #if defined(PANDA_TARGET_MOBILE)
SkipStacktrace(size_t num)67 static bool SkipStacktrace(size_t num)
68 {
69     static constexpr size_t FREQUENCY = 5;
70     return num % FREQUENCY != 0;
71 }
72 #else
SkipStacktrace(size_t num)73 static bool SkipStacktrace([[maybe_unused]] size_t num)
74 {
75     return false;
76 }
77 #endif
78 
TrackAlloc(void * addr,size_t size,SpaceType space)79 void DetailAllocTracker::TrackAlloc(void *addr, size_t size, SpaceType space)
80 {
81     if (addr == nullptr) {
82         return;
83     }
84     Stacktrace stacktrace = SkipStacktrace(++alloc_counter_) ? Stacktrace() : GetStacktrace();
85     os::memory::LockHolder lock(mutex_);
86 
87     uint32_t stacktrace_id = stacktraces_.size();
88     if (stacktrace.size() > NUM_SKIP_FRAMES) {
89         stacktraces_.emplace_back(stacktrace.begin() + NUM_SKIP_FRAMES, stacktrace.end());
90     } else {
91         stacktraces_.emplace_back(stacktrace);
92     }
93     if (cur_arena_.size() < sizeof(AllocInfo)) {
94         AllocArena();
95     }
96     auto info = new (cur_arena_.data()) AllocInfo(cur_id_++, size, static_cast<uint32_t>(space), stacktrace_id);
97     cur_arena_ = cur_arena_.SubSpan(sizeof(AllocInfo));
98     cur_allocs_.insert({addr, info});
99 }
100 
TrackFree(void * addr)101 void DetailAllocTracker::TrackFree(void *addr)
102 {
103     if (addr == nullptr) {
104         return;
105     }
106     os::memory::LockHolder lock(mutex_);
107     auto it = cur_allocs_.find(addr);
108     ASSERT(it != cur_allocs_.end());
109     AllocInfo *alloc = it->second;
110     cur_allocs_.erase(it);
111     if (cur_arena_.size() < sizeof(FreeInfo)) {
112         AllocArena();
113     }
114     new (cur_arena_.data()) FreeInfo(alloc->GetId());
115     cur_arena_ = cur_arena_.SubSpan(sizeof(FreeInfo));
116 }
117 
AllocArena()118 void DetailAllocTracker::AllocArena()
119 {
120     if (cur_arena_.size() >= ENTRY_HDR_SIZE) {
121         *reinterpret_cast<uint32_t *>(cur_arena_.data()) = 0;
122     }
123     arenas_.emplace_back(new uint8_t[ARENA_SIZE]);
124     cur_arena_ = Span<uint8_t>(arenas_.back().get(), arenas_.back().get() + ARENA_SIZE);
125 }
126 
Dump()127 void DetailAllocTracker::Dump()
128 {
129     LOG(ERROR, RUNTIME) << "DetailAllocTracker::Dump to " << GetDumpFilePath();
130     std::ofstream out(GetDumpFilePath(), std::ios::out | std::ios::binary | std::ios::trunc);
131     if (!out) {
132         LOG(ERROR, RUNTIME) << "DetailAllocTracker: Cannot open " << GetDumpFilePath()
133                             << " for writing: " << strerror(errno) << "."
134                             << "\nCheck the directory has write permissions or"
135                             << " selinux is disabled.";
136     }
137     Dump(out);
138     LOG(ERROR, RUNTIME) << "DetailAllocTracker: dump file has been written";
139 }
140 
Dump(std::ostream & out)141 void DetailAllocTracker::Dump(std::ostream &out)
142 {
143     os::memory::LockHolder lock(mutex_);
144 
145     Write(0, out);  // number of items, will be updated later
146     Write(0, out);  // number of stacktraces, will be updated later
147 
148     std::map<uint32_t, uint32_t> id_map;
149     uint32_t num_stacks = WriteStacks(out, &id_map);
150 
151     // Write end marker to the current arena
152     if (cur_arena_.size() >= ENTRY_HDR_SIZE) {
153         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
154         *reinterpret_cast<uint32_t *>(cur_arena_.data()) = 0;
155     }
156     uint32_t num_items = 0;
157     for (auto &arena : arenas_) {
158         uint8_t *ptr = arena.get();
159         size_t pos = 0;
160         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
161         while (pos < ARENA_SIZE && *reinterpret_cast<uint32_t *>(ptr + pos) != 0) {
162             // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
163             uint32_t tag = *reinterpret_cast<uint32_t *>(ptr + pos);
164             if (tag == ALLOC_TAG) {
165                 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
166                 auto alloc = reinterpret_cast<AllocInfo *>(ptr + pos);
167                 Write(alloc->GetTag(), out);
168                 Write(alloc->GetId(), out);
169                 Write(alloc->GetSize(), out);
170                 Write(alloc->GetSpace(), out);
171                 Write(id_map[alloc->GetStacktraceId()], out);
172                 pos += sizeof(AllocInfo);
173             } else if (tag == FREE_TAG) {
174                 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
175                 auto info = reinterpret_cast<FreeInfo *>(ptr + pos);
176                 Write(info->GetTag(), out);
177                 Write(info->GetAllocId(), out);
178                 pos += sizeof(FreeInfo);
179             } else {
180                 UNREACHABLE();
181             }
182             ++num_items;
183         }
184     }
185 
186     out.seekp(0);
187     Write(num_items, out);
188     Write(num_stacks, out);
189 }
190 
DumpMemLeaks(std::ostream & out)191 void DetailAllocTracker::DumpMemLeaks(std::ostream &out)
192 {
193     static constexpr size_t MAX_ENTRIES_TO_REPORT = 10;
194 
195     os::memory::LockHolder lock(mutex_);
196     size_t num = 0;
197     out << "found " << cur_allocs_.size() << " leaks\n";
198     for (auto &entry : cur_allocs_) {
199         out << "Allocation of " << entry.second->GetSize() << " is allocated at\n";
200         uint32_t stacktrace_id = entry.second->GetStacktraceId();
201         auto it = stacktraces_.begin();
202         std::advance(it, stacktrace_id);
203         PrintStack(*it, out);
204         if (++num > MAX_ENTRIES_TO_REPORT) {
205             break;
206         }
207     }
208 }
209 
WriteStacks(std::ostream & out,std::map<uint32_t,uint32_t> * id_map)210 uint32_t DetailAllocTracker::WriteStacks(std::ostream &out, std::map<uint32_t, uint32_t> *id_map)
211 {
212     class Key {
213     public:
214         explicit Key(const Stacktrace *stacktrace) : stacktrace_(stacktrace), hash_(CalcHash(*stacktrace)) {}
215 
216         DEFAULT_COPY_SEMANTIC(Key);
217         DEFAULT_NOEXCEPT_MOVE_SEMANTIC(Key);
218 
219         ~Key() = default;
220 
221         bool operator==(const Key &k) const
222         {
223             return *stacktrace_ == *k.stacktrace_;
224         }
225 
226         size_t GetHash() const
227         {
228             return hash_;
229         }
230 
231     private:
232         const Stacktrace *stacktrace_ = nullptr;
233         size_t hash_ = 0U;
234     };
235 
236     struct KeyHash {
237         size_t operator()(const Key &k) const
238         {
239             return k.GetHash();
240         }
241     };
242 
243     std::unordered_map<Key, uint32_t, KeyHash> alloc_stacks;
244     uint32_t stacktrace_id = 0;
245     uint32_t deduplicated_id = 0;
246     for (Stacktrace &stacktrace : stacktraces_) {
247         Key akey(&stacktrace);
248         auto res = alloc_stacks.insert({akey, deduplicated_id});
249         if (res.second) {
250             std::stringstream str;
251             PrintStack(stacktrace, str);
252             Write(str.str(), out);
253             id_map->insert({stacktrace_id, deduplicated_id});
254             ++deduplicated_id;
255         } else {
256             uint32_t id = res.first->second;
257             id_map->insert({stacktrace_id, id});
258         }
259         ++stacktrace_id;
260     }
261     return deduplicated_id;
262 }
263 
264 }  // namespace panda
265