• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "inner_event.h"
17 
18 #include <condition_variable>
19 #include <mutex>
20 #include <vector>
21 
22 #include "event_handler_utils.h"
23 #include "singleton.h"
24 
25 DEFINE_HILOG_LABEL("InnerEvent");
26 
27 namespace OHOS {
28 namespace AppExecFwk {
29 namespace {
30 static constexpr int DATETIME_STRING_LENGTH = 80;
31 static constexpr int MAX_MS_LENGTH = 3;
32 static constexpr int MS_PER_SECOND = 1000;
33 
34 class WaiterImp final : public InnerEvent::Waiter {
35 public:
WaiterImp()36     WaiterImp(){};
~WaiterImp()37     ~WaiterImp() override{};
38     DISALLOW_COPY_AND_MOVE(WaiterImp);
39 
Wait()40     void Wait() final
41     {
42         std::unique_lock<std::mutex> lock(mutex_);
43         while (!finished_) {
44             ++waitingCount_;
45             condition_.wait(lock);
46             --waitingCount_;
47         }
48     }
49 
Notify()50     void Notify() final
51     {
52         std::lock_guard<std::mutex> lock(mutex_);
53         finished_ = true;
54         if (waitingCount_ > 0) {
55             condition_.notify_all();
56         }
57     }
58 
59 private:
60     std::mutex mutex_;
61     std::condition_variable condition_;
62     uint32_t waitingCount_ {0};
63     bool finished_ {false};
64 };
65 }  // unnamed namespace
66 
67 // Implementation for event pool.
68 class InnerEventPool : public DelayedRefSingleton<InnerEventPool> {
69     DECLARE_DELAYED_REF_SINGLETON(InnerEventPool);
70 
71 public:
72     DISALLOW_COPY_AND_MOVE(InnerEventPool);
73 
Get()74     InnerEvent::Pointer Get()
75     {
76         size_t newPeakUsingCount = 0;
77 
78         {
79             // Check whether pool is empty.
80             std::lock_guard<std::mutex> lock(poolLock_);
81             ++usingCount_;
82             if (!events_.empty()) {
83                 auto event = std::move(events_.back());
84                 events_.pop_back();
85                 return InnerEvent::Pointer(event.release(), Drop);
86             }
87 
88             // Update peak using events count.
89             if (usingCount_ >= nextPeakUsingCount_) {
90                 if (UINT32_MAX - nextPeakUsingCount_ > MAX_BUFFER_POOL_SIZE) {
91                     nextPeakUsingCount_ += MAX_BUFFER_POOL_SIZE;
92                 } else {
93                     nextPeakUsingCount_ = UINT32_MAX;
94                 }
95 
96                 newPeakUsingCount = usingCount_;
97             }
98         }
99 
100         // Print the new peak using count of inner events
101         if (newPeakUsingCount > 0) {
102             HILOGD("Peak using count of inner events is up to %{public}zu", newPeakUsingCount);
103         }
104 
105         // Allocate new memory, while pool is empty.
106         return InnerEvent::Pointer(new InnerEvent, Drop);
107     }
108 
109 private:
Drop(InnerEvent * event)110     static void Drop(InnerEvent *event)
111     {
112         if (event == nullptr) {
113             return;
114         }
115 
116         auto destructor = [](InnerEvent *event) {
117             if (event != nullptr) {
118                 delete event;
119             }
120         };
121 
122         // Clear content of the event
123         event->ClearEvent();
124         // Put event into event buffer pool
125         GetInstance().Put(InnerEvent::Pointer(event, destructor));
126     }
127 
Put(InnerEvent::Pointer && event)128     void Put(InnerEvent::Pointer &&event)
129     {
130         // Check whether pool is full.
131         std::lock_guard<std::mutex> lock(poolLock_);
132         --usingCount_;
133         if (events_.size() < MAX_BUFFER_POOL_SIZE) {
134             events_.push_back(std::move(event));
135         }
136     }
137 
138     static const size_t MAX_BUFFER_POOL_SIZE = 64;
139 
140     std::mutex poolLock_;
141     std::vector<InnerEvent::Pointer> events_;
142 
143     // Used to statistical peak value of count of using inner events.
144     size_t usingCount_ {0};
145     size_t nextPeakUsingCount_ {MAX_BUFFER_POOL_SIZE};
146 };
147 
InnerEventPool()148 InnerEventPool::InnerEventPool() : poolLock_(), events_()
149 {
150     // Reserve enough memory
151     std::lock_guard<std::mutex> lock(poolLock_);
152     events_.reserve(MAX_BUFFER_POOL_SIZE);
153 }
154 
~InnerEventPool()155 InnerEventPool::~InnerEventPool()
156 {
157     // Release all memory in the poll
158     std::lock_guard<std::mutex> lock(poolLock_);
159     events_.clear();
160 }
161 
Get()162 InnerEvent::Pointer InnerEvent::Get()
163 {
164     auto event = InnerEventPool::GetInstance().Get();
165     return event;
166 }
167 
Get(uint32_t innerEventId,int64_t param)168 InnerEvent::Pointer InnerEvent::Get(uint32_t innerEventId, int64_t param)
169 {
170     auto event = InnerEventPool::GetInstance().Get();
171     if (event != nullptr) {
172         event->innerEventId_ = innerEventId;
173         event->param_ = param;
174     }
175     return event;
176 }
177 
Get(const Callback & callback,const std::string & name)178 InnerEvent::Pointer InnerEvent::Get(const Callback &callback, const std::string &name)
179 {
180     // Returns nullptr while callback is invalid.
181     if (!callback) {
182         HILOGW("Failed to create inner event with an invalid callback");
183         return InnerEvent::Pointer(nullptr, nullptr);
184     }
185 
186     auto event = InnerEventPool::GetInstance().Get();
187     if (event != nullptr) {
188         event->taskCallback_ = callback;
189         event->taskName_ = name;
190     }
191     return event;
192 }
193 
ClearEvent()194 void InnerEvent::ClearEvent()
195 {
196     // Wake up all waiting threads.
197     if (waiter_) {
198         waiter_->Notify();
199         waiter_.reset();
200     }
201 
202     if (HasTask()) {
203         // Clear members for task
204         taskCallback_ = nullptr;
205         taskName_.clear();
206     } else {
207         // Clear members for event
208         if (smartPtrDtor_) {
209             smartPtrDtor_(smartPtr_);
210             smartPtrDtor_ = nullptr;
211             smartPtr_ = nullptr;
212             smartPtrTypeId_ = 0;
213         }
214     }
215 
216     if (hiTraceId_) {
217         hiTraceId_.reset();
218     }
219 
220     // Clear owner
221     owner_.reset();
222 }
223 
WarnSmartPtrCastMismatch()224 void InnerEvent::WarnSmartPtrCastMismatch()
225 {
226     HILOGE("Type of the shared_ptr, weak_ptr or unique_ptr mismatched");
227 }
228 
CreateWaiter()229 const std::shared_ptr<InnerEvent::Waiter> &InnerEvent::CreateWaiter()
230 {
231     waiter_ = std::make_shared<WaiterImp>();
232     return waiter_;
233 }
234 
HasWaiter() const235 bool InnerEvent::HasWaiter() const
236 {
237     return (waiter_ != nullptr);
238 }
239 
GetOrCreateTraceId()240 const std::shared_ptr<HiTraceId> InnerEvent::GetOrCreateTraceId()
241 {
242     if (hiTraceId_) {
243         return hiTraceId_;
244     }
245 
246     auto traceId = HiTraceChain::GetId();
247     if (!traceId.IsValid()) {
248         return nullptr;
249     }
250 
251     hiTraceId_ = std::make_shared<HiTraceId>(HiTraceChain::CreateSpan());
252     return hiTraceId_;
253 }
254 
GetTraceId()255 const std::shared_ptr<HiTraceId> InnerEvent::GetTraceId()
256 {
257     return hiTraceId_;
258 }
259 
DumpTimeToString(const std::chrono::system_clock::time_point & time)260 std::string InnerEvent::DumpTimeToString(const std::chrono::system_clock::time_point &time)
261 {
262     auto tp = std::chrono::time_point_cast<std::chrono::milliseconds>(time);
263     auto tt = std::chrono::system_clock::to_time_t(time);
264     auto ms = tp.time_since_epoch().count() % MS_PER_SECOND;
265     auto msString = std::to_string(ms);
266     if (msString.length() < MAX_MS_LENGTH) {
267         msString = std::string(MAX_MS_LENGTH - msString.length(), '0') + msString;
268     }
269     struct tm curTime = {0};
270     localtime_r(&tt, &curTime);
271     char sysTime[DATETIME_STRING_LENGTH];
272     std::strftime(sysTime, sizeof(char) * DATETIME_STRING_LENGTH, "%Y-%m-%d %I:%M:%S.", &curTime);
273     return std::string(sysTime) + msString;
274 }
275 
DumpTimeToString(const TimePoint & time)276 std::string InnerEvent::DumpTimeToString(const TimePoint &time)
277 {
278     auto tp = std::chrono::system_clock::now() +
279         std::chrono::duration_cast<std::chrono::milliseconds>(time - std::chrono::steady_clock::now());
280     return DumpTimeToString(tp);
281 }
282 
Dump()283 std::string InnerEvent::Dump()
284 {
285     std::string content;
286 
287     content.append("Event { ");
288     if (!owner_.expired()) {
289         content.append("send thread = " + std::to_string(senderKernelThreadId_));
290         content.append(", send time = " + DumpTimeToString(sendTime_));
291         content.append(", handle time = " + DumpTimeToString(handleTime_));
292         if (HasTask()) {
293             content.append(", task name = " + taskName_);
294         } else {
295             content.append(", id = " + std::to_string(innerEventId_));
296         }
297         if (param_ != 0) {
298             content.append(", param = " + std::to_string(param_));
299         }
300     } else {
301         content.append("No handler");
302     }
303     content.append(" }" + LINE_SEPARATOR);
304 
305     return content;
306 }
307 }  // namespace AppExecFwk
308 }  // namespace OHOS
309