• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PANDA_PROFILING_DATA_H
17 #define PANDA_PROFILING_DATA_H
18 
19 #include "macros.h"
20 #include <array>
21 #include <atomic>
22 #include <numeric>
23 
24 #include <cstdint>
25 
26 #include "runtime/include/mem/panda_containers.h"
27 
28 namespace panda {
29 
30 class Class;
31 
32 class CallSiteInlineCache {
33 public:
34     static constexpr size_t CLASSES_COUNT = 4;
35     static constexpr uintptr_t MEGAMORPHIC_FLAG = static_cast<uintptr_t>(-1);
36 
From(void * mem,PandaVector<uint32_t> vcalls)37     static Span<CallSiteInlineCache> From(void *mem, PandaVector<uint32_t> vcalls)
38     {
39         auto inlineCaches = reinterpret_cast<CallSiteInlineCache *>(mem);
40         auto ics = Span<CallSiteInlineCache>(inlineCaches, vcalls.size());
41         for (size_t i = 0; i < vcalls.size(); i++) {
42             ics[i].Init(vcalls[i]);
43         }
44         return ics;
45     }
46 
Init(uintptr_t pc)47     void Init(uintptr_t pc)
48     {
49         SetBytecodePc(pc);
50         std::fill(classes_.begin(), classes_.end(), nullptr);
51     }
52 
UpdateInlineCaches(Class * cls)53     void UpdateInlineCaches(Class *cls)
54     {
55         for (uint32_t i = 0; i < classes_.size();) {
56             auto *classAtomic = reinterpret_cast<std::atomic<Class *> *>(&(classes_[i]));
57             // Atomic with acquire order reason: data race with classes_ with dependecies on reads after the load which
58             // should become visible
59             auto storedClass = classAtomic->load(std::memory_order_acquire);
60             // Check that the call is already megamorphic
61             if (i == 0 && storedClass == reinterpret_cast<Class *>(MEGAMORPHIC_FLAG)) {
62                 return;
63             }
64             if (storedClass == cls) {
65                 return;
66             }
67             if (storedClass == nullptr) {
68                 if (!classAtomic->compare_exchange_weak(storedClass, cls, std::memory_order_acq_rel)) {
69                     continue;
70                 }
71                 return;
72             }
73             i++;
74         }
75         // Megamorphic call, disable devirtualization for this call site.
76         auto *classAtomic = reinterpret_cast<std::atomic<Class *> *>(&(classes_[0]));
77         // Atomic with release order reason: data race with classes_ with dependecies on writes before the store which
78         // should become visible acquire
79         classAtomic->store(reinterpret_cast<Class *>(MEGAMORPHIC_FLAG), std::memory_order_release);
80     }
81 
GetBytecodePc()82     auto GetBytecodePc() const
83     {
84         // Atomic with acquire order reason: data race with bytecode_pc_ with dependecies on reads after the load which
85         // should become visible
86         return bytecodePc_.load(std::memory_order_acquire);
87     }
88 
SetBytecodePc(uintptr_t pc)89     void SetBytecodePc(uintptr_t pc)
90     {
91         // Atomic with release order reason: data race with bytecode_pc_ with dependecies on writes before the store
92         // which should become visible acquire
93         bytecodePc_.store(pc, std::memory_order_release);
94     }
95 
GetClassesCopy()96     std::vector<Class *> GetClassesCopy()
97     {
98         std::vector<Class *> result;
99         for (uint32_t i = 0; i < classes_.size();) {
100             auto *classAtomic = reinterpret_cast<std::atomic<Class *> const *>(&(classes_[i]));
101             // Atomic with acquire order reason: data race with classes_ with dependecies on reads after the load which
102             // should become visible
103             auto storedClass = classAtomic->load(std::memory_order_acquire);
104             if (storedClass != nullptr) {
105                 result.push_back(storedClass);
106             }
107             i++;
108         }
109         return result;
110     }
111 
GetClassesCount()112     size_t GetClassesCount() const
113     {
114         size_t classesCount = 0;
115         for (uint32_t i = 0; i < classes_.size();) {
116             auto *classAtomic = reinterpret_cast<std::atomic<Class *> const *>(&(classes_[i]));
117             // Atomic with acquire order reason: data race with classes_ with dependecies on reads after the load which
118             // should become visible
119             auto storedClass = classAtomic->load(std::memory_order_acquire);
120             if (storedClass != nullptr) {
121                 classesCount++;
122             }
123             i++;
124         }
125         return classesCount;
126     }
127 
IsMegamorphic(Class * cls)128     static bool IsMegamorphic(Class *cls)
129     {
130         auto *classAtomic = reinterpret_cast<std::atomic<Class *> *>(&cls);
131         // Atomic with acquire order reason: data race with classes_ with dependecies on reads after the load which
132         // should become visible
133         return classAtomic->load(std::memory_order_acquire) == reinterpret_cast<Class *>(MEGAMORPHIC_FLAG);
134     }
135 
136 private:
137     std::atomic_uintptr_t bytecodePc_;
138     std::array<Class *, CLASSES_COUNT> classes_ {};
139 };
140 
141 class BranchData {
142 public:
From(void * mem,PandaVector<uint32_t> branches)143     static Span<BranchData> From(void *mem, PandaVector<uint32_t> branches)
144     {
145         auto branchData = reinterpret_cast<BranchData *>(mem);
146         auto span = Span<BranchData>(branchData, branches.size());
147         for (size_t i = 0; i < branches.size(); i++) {
148             span[i].Init(branches[i]);
149         }
150         return span;
151     }
152 
Init(uintptr_t pc)153     void Init(uintptr_t pc)
154     {
155         // Atomic with relaxed order reason: data race with pc_
156         pc_.store(pc, std::memory_order_relaxed);
157         // Atomic with relaxed order reason: data race with taken_counter_
158         takenCounter_.store(0, std::memory_order_relaxed);
159         // Atomic with relaxed order reason: data race with not_taken_counter_
160         notTakenCounter_.store(0, std::memory_order_relaxed);
161     }
162 
GetPc()163     uintptr_t GetPc() const
164     {
165         // Atomic with relaxed order reason: data race with pc_
166         return pc_.load(std::memory_order_relaxed);
167     }
168 
GetTakenCounter()169     int64_t GetTakenCounter() const
170     {
171         // Atomic with relaxed order reason: data race with taken_counter_
172         return takenCounter_.load(std::memory_order_relaxed);
173     }
174 
GetNotTakenCounter()175     int64_t GetNotTakenCounter() const
176     {
177         // Atomic with relaxed order reason: data race with not_taken_counter_
178         return notTakenCounter_.load(std::memory_order_relaxed);
179     }
180 
IncrementTaken()181     void IncrementTaken()
182     {
183         // Atomic with relaxed order reason: data race with taken_counter_
184         takenCounter_.fetch_add(1, std::memory_order_relaxed);
185     }
186 
IncrementNotTaken()187     void IncrementNotTaken()
188     {
189         // Atomic with relaxed order reason: data race with not_taken_counter_
190         notTakenCounter_.fetch_add(1, std::memory_order_relaxed);
191     }
192 
193 private:
194     std::atomic_uintptr_t pc_;
195     std::atomic_llong takenCounter_;
196     std::atomic_llong notTakenCounter_;
197 };
198 
199 class ThrowData {
200 public:
From(void * mem,PandaVector<uint32_t> throws)201     static Span<ThrowData> From(void *mem, PandaVector<uint32_t> throws)
202     {
203         auto throwData = reinterpret_cast<ThrowData *>(mem);
204         auto span = Span<ThrowData>(throwData, throws.size());
205         for (size_t i = 0; i < throws.size(); i++) {
206             span[i].Init(throws[i]);
207         }
208         return span;
209     }
210 
Init(uintptr_t pc)211     void Init(uintptr_t pc)
212     {
213         // Atomic with relaxed order reason: data race with pc_
214         pc_.store(pc, std::memory_order_relaxed);
215         // Atomic with relaxed order reason: data race with taken_counter_
216         takenCounter_.store(0, std::memory_order_relaxed);
217     }
218 
GetPc()219     uintptr_t GetPc() const
220     {
221         // Atomic with relaxed order reason: data race with pc_
222         return pc_.load(std::memory_order_relaxed);
223     }
224 
GetTakenCounter()225     int64_t GetTakenCounter() const
226     {
227         // Atomic with relaxed order reason: data race with taken_counter_
228         return takenCounter_.load(std::memory_order_relaxed);
229     }
230 
IncrementTaken()231     void IncrementTaken()
232     {
233         // Atomic with relaxed order reason: data race with taken_counter_
234         takenCounter_.fetch_add(1, std::memory_order_relaxed);
235     }
236 
237 private:
238     std::atomic_uintptr_t pc_;
239     std::atomic_llong takenCounter_;
240 };
241 
242 class ProfilingData {
243 public:
ProfilingData(Span<CallSiteInlineCache> inlineCaches,Span<BranchData> branchData,Span<ThrowData> throwData)244     explicit ProfilingData(Span<CallSiteInlineCache> inlineCaches, Span<BranchData> branchData,
245                            Span<ThrowData> throwData)
246         : inlineCaches_(inlineCaches), branchData_(branchData), throwData_(throwData)
247     {
248     }
249 
GetInlineCaches()250     Span<CallSiteInlineCache> GetInlineCaches()
251     {
252         return inlineCaches_;
253     }
254 
FindInlineCache(uintptr_t pc)255     CallSiteInlineCache *FindInlineCache(uintptr_t pc)
256     {
257         auto ics = GetInlineCaches();
258         auto ic = std::lower_bound(ics.begin(), ics.end(), pc,
259                                    [](const auto &a, uintptr_t counter) { return a.GetBytecodePc() < counter; });
260         return (ic == ics.end() || ic->GetBytecodePc() != pc) ? nullptr : &*ic;
261     }
262 
UpdateInlineCaches(uintptr_t pc,Class * cls)263     void UpdateInlineCaches(uintptr_t pc, Class *cls)
264     {
265         auto ic = FindInlineCache(pc);
266         ASSERT(ic != nullptr);
267         if (ic != nullptr) {
268             ic->UpdateInlineCaches(cls);
269         }
270     }
271 
UpdateBranchTaken(uintptr_t pc)272     void UpdateBranchTaken(uintptr_t pc)
273     {
274         auto branch = FindBranchData(pc);
275         ASSERT(branch != nullptr);
276         branch->IncrementTaken();
277     }
278 
UpdateBranchNotTaken(uintptr_t pc)279     void UpdateBranchNotTaken(uintptr_t pc)
280     {
281         auto branch = FindBranchData(pc);
282         ASSERT(branch != nullptr);
283         branch->IncrementNotTaken();
284     }
285 
GetBranchTakenCounter(uintptr_t pc)286     int64_t GetBranchTakenCounter(uintptr_t pc)
287     {
288         auto branch = FindBranchData(pc);
289         ASSERT(branch != nullptr);
290         return branch->GetTakenCounter();
291     }
292 
GetBranchNotTakenCounter(uintptr_t pc)293     int64_t GetBranchNotTakenCounter(uintptr_t pc)
294     {
295         auto branch = FindBranchData(pc);
296         ASSERT(branch != nullptr);
297         return branch->GetNotTakenCounter();
298     }
299 
UpdateThrowTaken(uintptr_t pc)300     void UpdateThrowTaken(uintptr_t pc)
301     {
302         auto thr0w = FindThrowData(pc);
303         ASSERT(thr0w != nullptr);
304         thr0w->IncrementTaken();
305     }
306 
GetThrowTakenCounter(uintptr_t pc)307     int64_t GetThrowTakenCounter(uintptr_t pc)
308     {
309         auto thr0w = FindThrowData(pc);
310         ASSERT(thr0w != nullptr);
311         return thr0w->GetTakenCounter();
312     }
313 
314 private:
FindBranchData(uintptr_t fromPc)315     BranchData *FindBranchData(uintptr_t fromPc)
316     {
317         auto it = std::lower_bound(branchData_.begin(), branchData_.end(), fromPc,
318                                    [](const auto &a, uintptr_t counter) { return a.GetPc() < counter; });
319         if (it == branchData_.end() || it->GetPc() != fromPc) {
320             return nullptr;
321         }
322 
323         return &*it;
324     }
FindThrowData(uintptr_t fromPc)325     ThrowData *FindThrowData(uintptr_t fromPc)
326     {
327         if (throwData_.empty()) {
328             return nullptr;
329         }
330         auto it = std::lower_bound(throwData_.begin(), throwData_.end(), fromPc,
331                                    [](const auto &a, uintptr_t counter) { return a.GetPc() < counter; });
332         if (it == throwData_.end() || it->GetPc() != fromPc) {
333             return nullptr;
334         }
335 
336         return &*it;
337     }
338 
339     Span<CallSiteInlineCache> inlineCaches_;
340     Span<BranchData> branchData_;
341     Span<ThrowData> throwData_;
342 };
343 
344 }  // namespace panda
345 
346 #endif  // PANDA_PROFILING_DATA_H
347