• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "base_mem_stats.h"
17 
18 #include "os/mutex.h"
19 #include "utils/logger.h"
20 #include "utils/type_helpers.h"
21 
22 #include <numeric>
23 namespace panda {
24 
RecordAllocateRaw(size_t size,SpaceType type_mem)25 void BaseMemStats::RecordAllocateRaw(size_t size, SpaceType type_mem)
26 {
27     ASSERT(!IsHeapSpace(type_mem));
28     RecordAllocate(size, type_mem);
29 }
30 
RecordAllocate(size_t size,SpaceType type_mem)31 void BaseMemStats::RecordAllocate(size_t size, SpaceType type_mem)
32 {
33     auto index = helpers::ToUnderlying(type_mem);
34     // Atomic with acq_rel order reason: data race with allocated_ with dependecies on reads after the load and on
35     // writes before the store
36     allocated_[index].fetch_add(size, std::memory_order_acq_rel);
37 }
38 
RecordMoved(size_t size,SpaceType type_mem)39 void BaseMemStats::RecordMoved(size_t size, SpaceType type_mem)
40 {
41     auto index = helpers::ToUnderlying(type_mem);
42     // Atomic with acq_rel order reason: data race with allocated_ with dependecies on reads after the load and on
43     // writes before the store
44     uint64_t old_value = allocated_[index].fetch_sub(size, std::memory_order_acq_rel);
45     (void)old_value;
46     ASSERT(old_value >= size);
47 }
48 
RecordFreeRaw(size_t size,SpaceType type_mem)49 void BaseMemStats::RecordFreeRaw(size_t size, SpaceType type_mem)
50 {
51     ASSERT(!IsHeapSpace(type_mem));
52     RecordFree(size, type_mem);
53 }
54 
RecordFree(size_t size,SpaceType type_mem)55 void BaseMemStats::RecordFree(size_t size, SpaceType type_mem)
56 {
57     auto index = helpers::ToUnderlying(type_mem);
58     // Atomic with acq_rel order reason: data race with allocated_ with dependecies on reads after the load and on
59     // writes before the store
60     freed_[index].fetch_add(size, std::memory_order_acq_rel);
61 }
62 
GetAllocated(SpaceType type_mem) const63 uint64_t BaseMemStats::GetAllocated(SpaceType type_mem) const
64 {
65     // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load which should
66     // become visible
67     return allocated_[helpers::ToUnderlying(type_mem)].load(std::memory_order_acquire);
68 }
69 
GetFreed(SpaceType type_mem) const70 uint64_t BaseMemStats::GetFreed(SpaceType type_mem) const
71 {
72     // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load which should
73     // become visible
74     return freed_[helpers::ToUnderlying(type_mem)].load(std::memory_order_acquire);
75 }
76 
GetAllocatedHeap() const77 uint64_t BaseMemStats::GetAllocatedHeap() const
78 {
79     uint64_t result = 0;
80     for (size_t index = 0; index < SPACE_TYPE_SIZE; index++) {
81         SpaceType type = ToSpaceType(index);
82         if (IsHeapSpace(type)) {
83             // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load
84             // which should become visible
85             result += allocated_[index].load(std::memory_order_acquire);
86         }
87     }
88     return result;
89 }
90 
GetFreedHeap() const91 uint64_t BaseMemStats::GetFreedHeap() const
92 {
93     uint64_t result = 0;
94     for (size_t index = 0; index < SPACE_TYPE_SIZE; index++) {
95         SpaceType type = ToSpaceType(index);
96         if (IsHeapSpace(type)) {
97             // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load
98             // which should become visible
99             result += freed_[index].load(std::memory_order_acquire);
100         }
101     }
102     return result;
103 }
104 
GetFootprintHeap() const105 uint64_t BaseMemStats::GetFootprintHeap() const
106 {
107     return helpers::UnsignedDifferenceUint64(GetAllocatedHeap(), GetFreedHeap());
108 }
109 
GetFootprint(SpaceType type_mem) const110 uint64_t BaseMemStats::GetFootprint(SpaceType type_mem) const
111 {
112     auto index = helpers::ToUnderlying(type_mem);
113     // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load which should
114     // become visible
115     LOG_IF(allocated_[index].load(std::memory_order_acquire) < freed_[index].load(std::memory_order_acquire), FATAL, GC)
116         << "Allocated < Freed (mem type = " << std::dec
117         << static_cast<size_t>(index)
118         // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load which
119         // should become visible
120         << "): " << allocated_[index].load(std::memory_order_acquire)
121         << " < "
122         // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load which
123         // should become visible
124         << freed_[index].load(std::memory_order_acquire);
125     // Atomic with acquire order reason: data race with allocated_ with dependecies on reads after the load which should
126     // become visible
127     return allocated_[index].load(std::memory_order_acquire) - freed_[index].load(std::memory_order_acquire);
128 }
129 
GetTotalFootprint() const130 uint64_t BaseMemStats::GetTotalFootprint() const
131 {
132     return std::accumulate(begin(allocated_), end(allocated_), 0UL) - std::accumulate(begin(freed_), end(freed_), 0UL);
133 }
134 
135 }  // namespace panda
136