• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "inner_event.h"
17 
18 #include <condition_variable>
19 #include <mutex>
20 #include <vector>
21 
22 #include "event_handler_utils.h"
23 #include "singleton.h"
24 
25 DEFINE_HILOG_LABEL("InnerEvent");
26 
27 namespace OHOS {
28 namespace AppExecFwk {
29 namespace {
30 class WaiterImp final : public InnerEvent::Waiter {
31 public:
WaiterImp()32     WaiterImp(){};
~WaiterImp()33     ~WaiterImp() override{};
34     DISALLOW_COPY_AND_MOVE(WaiterImp);
35 
Wait()36     void Wait() final
37     {
38         std::unique_lock<std::mutex> lock(mutex_);
39         while (!finished_) {
40             ++waitingCount_;
41             condition_.wait(lock);
42             --waitingCount_;
43         }
44     }
45 
Notify()46     void Notify() final
47     {
48         std::lock_guard<std::mutex> lock(mutex_);
49         finished_ = true;
50         if (waitingCount_ > 0) {
51             condition_.notify_all();
52         }
53     }
54 
55 private:
56     std::mutex mutex_;
57     std::condition_variable condition_;
58     uint32_t waitingCount_ {0};
59     bool finished_ {false};
60 };
61 }  // unnamed namespace
62 
63 // Implementation for event pool.
64 class InnerEventPool : public DelayedRefSingleton<InnerEventPool> {
65     DECLARE_DELAYED_REF_SINGLETON(InnerEventPool);
66 
67 public:
68     DISALLOW_COPY_AND_MOVE(InnerEventPool);
69 
Get()70     InnerEvent::Pointer Get()
71     {
72         size_t newPeakUsingCount = 0;
73 
74         {
75             // Check whether pool is empty.
76             std::lock_guard<std::mutex> lock(poolLock_);
77             ++usingCount_;
78             if (!events_.empty()) {
79                 auto event = std::move(events_.back());
80                 events_.pop_back();
81                 return InnerEvent::Pointer(event.release(), Drop);
82             }
83 
84             // Update peak using events count.
85             if (usingCount_ >= nextPeakUsingCount_) {
86                 if (UINT32_MAX - nextPeakUsingCount_ > MAX_BUFFER_POOL_SIZE) {
87                     nextPeakUsingCount_ += MAX_BUFFER_POOL_SIZE;
88                 } else {
89                     nextPeakUsingCount_ = UINT32_MAX;
90                 }
91 
92                 newPeakUsingCount = usingCount_;
93             }
94         }
95 
96         // Print the new peak using count of inner events
97         if (newPeakUsingCount > 0) {
98             HILOGD("Peak using count of inner events is up to %{public}zu", newPeakUsingCount);
99         }
100 
101         // Allocate new memory, while pool is empty.
102         return InnerEvent::Pointer(new InnerEvent, Drop);
103     }
104 
105 private:
Drop(InnerEvent * event)106     static void Drop(InnerEvent *event)
107     {
108         if (event == nullptr) {
109             return;
110         }
111 
112         auto destructor = [](InnerEvent *event) {
113             if (event != nullptr) {
114                 delete event;
115             }
116         };
117 
118         // Clear content of the event
119         event->ClearEvent();
120         // Put event into event buffer pool
121         GetInstance().Put(InnerEvent::Pointer(event, destructor));
122     }
123 
Put(InnerEvent::Pointer && event)124     void Put(InnerEvent::Pointer &&event)
125     {
126         // Check whether pool is full.
127         std::lock_guard<std::mutex> lock(poolLock_);
128         --usingCount_;
129         if (events_.size() < MAX_BUFFER_POOL_SIZE) {
130             events_.push_back(std::move(event));
131         }
132     }
133 
134     static const size_t MAX_BUFFER_POOL_SIZE = 64;
135 
136     std::mutex poolLock_;
137     std::vector<InnerEvent::Pointer> events_;
138 
139     // Used to statistical peak value of count of using inner events.
140     size_t usingCount_ {0};
141     size_t nextPeakUsingCount_ {MAX_BUFFER_POOL_SIZE};
142 };
143 
InnerEventPool()144 InnerEventPool::InnerEventPool() : poolLock_(), events_()
145 {
146     // Reserve enough memory
147     std::lock_guard<std::mutex> lock(poolLock_);
148     events_.reserve(MAX_BUFFER_POOL_SIZE);
149 }
150 
~InnerEventPool()151 InnerEventPool::~InnerEventPool()
152 {
153     // Release all memory in the poll
154     std::lock_guard<std::mutex> lock(poolLock_);
155     events_.clear();
156 }
157 
Get()158 InnerEvent::Pointer InnerEvent::Get()
159 {
160     auto event = InnerEventPool::GetInstance().Get();
161     return event;
162 }
163 
Get(uint32_t innerEventId,int64_t param)164 InnerEvent::Pointer InnerEvent::Get(uint32_t innerEventId, int64_t param)
165 {
166     auto event = InnerEventPool::GetInstance().Get();
167     event->innerEventId_ = innerEventId;
168     event->param_ = param;
169     return event;
170 }
171 
Get(const Callback & callback,const std::string & name)172 InnerEvent::Pointer InnerEvent::Get(const Callback &callback, const std::string &name)
173 {
174     // Returns nullptr while callback is invalid.
175     if (!callback) {
176         HILOGW("Failed to create inner event with an invalid callback");
177         return InnerEvent::Pointer(nullptr, nullptr);
178     }
179 
180     auto event = InnerEventPool::GetInstance().Get();
181     event->taskCallback_ = callback;
182     event->taskName_ = name;
183     return event;
184 }
185 
ClearEvent()186 void InnerEvent::ClearEvent()
187 {
188     // Wake up all waiting threads.
189     if (waiter_) {
190         waiter_->Notify();
191         waiter_.reset();
192     }
193 
194     if (HasTask()) {
195         // Clear members for task
196         taskCallback_ = nullptr;
197         taskName_.clear();
198     } else {
199         // Clear members for event
200         if (smartPtrDtor_) {
201             smartPtrDtor_(smartPtr_);
202             smartPtrDtor_ = nullptr;
203             smartPtr_ = nullptr;
204             smartPtrTypeId_ = 0;
205         }
206     }
207 
208     if (hiTraceId_) {
209         hiTraceId_.reset();
210     }
211 
212     // Clear owner
213     owner_.reset();
214 }
215 
WarnSmartPtrCastMismatch()216 void InnerEvent::WarnSmartPtrCastMismatch()
217 {
218     HILOGE("Type of the shared_ptr, weak_ptr or unique_ptr mismatched");
219 }
220 
CreateWaiter()221 const std::shared_ptr<InnerEvent::Waiter> &InnerEvent::CreateWaiter()
222 {
223     waiter_ = std::make_shared<WaiterImp>();
224     return waiter_;
225 }
226 
HasWaiter() const227 bool InnerEvent::HasWaiter() const
228 {
229     return (waiter_ != nullptr);
230 }
231 
GetOrCreateTraceId()232 const std::shared_ptr<HiTraceId> InnerEvent::GetOrCreateTraceId()
233 {
234     if (hiTraceId_) {
235         return hiTraceId_;
236     }
237 
238     auto traceId = HiTrace::GetId();
239     if (!traceId.IsValid()) {
240         return nullptr;
241     }
242 
243     hiTraceId_ = std::make_shared<HiTraceId>(HiTrace::CreateSpan());
244     return hiTraceId_;
245 }
246 
GetTraceId()247 const std::shared_ptr<HiTraceId> InnerEvent::GetTraceId()
248 {
249     return hiTraceId_;
250 }
251 
Dump()252 std::string InnerEvent::Dump()
253 {
254     std::string content;
255 
256     content.append("Event { ");
257     if (!owner_.expired()) {
258         if (HasTask()) {
259             content.append("task name = " + taskName_);
260         } else {
261             content.append("id = " + std::to_string(innerEventId_));
262         }
263         if (param_ != 0) {
264             content.append(", param = " + std::to_string(param_));
265         }
266     } else {
267         content = "No handler";
268     }
269     content.append(" }" + LINE_SEPARATOR);
270 
271     return content;
272 }
273 }  // namespace AppExecFwk
274 }  // namespace OHOS
275