1 /*
2 * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "transaction/rs_unmarshal_thread.h"
17
18 #include "app_mgr_client.h"
19 #include "ffrt_inner.h"
20 #include "hisysevent.h"
21 #include "pipeline/render_thread/rs_base_render_util.h"
22 #include "pipeline/main_thread/rs_main_thread.h"
23 #include "pipeline/rs_unmarshal_task_manager.h"
24 #include "platform/common/rs_log.h"
25 #include "platform/common/rs_system_properties.h"
26 #include "transaction/rs_transaction_data.h"
27 #include "res_sched_client.h"
28 #include "res_type.h"
29 #include "rs_frame_report.h"
30 #include "rs_profiler.h"
31 #include "command/rs_node_command.h"
32 #include "command/rs_canvas_node_command.h"
33 #include "recording/draw_cmd_list.h"
34 #include "rs_trace.h"
35 #include "platform/common/rs_hisysevent.h"
36
37 #ifdef RES_SCHED_ENABLE
38 #include "qos.h"
39 #endif
40
41 namespace OHOS::Rosen {
42 namespace {
43 constexpr size_t TRANSACTION_DATA_ALARM_COUNT = 10000;
44 constexpr size_t TRANSACTION_DATA_KILL_COUNT = 20000;
45 const char* TRANSACTION_REPORT_NAME = "IPC_DATA_OVER_ERROR";
46
GetAppMgrClient()47 const std::shared_ptr<AppExecFwk::AppMgrClient> GetAppMgrClient()
48 {
49 static std::shared_ptr<AppExecFwk::AppMgrClient> appMgrClient =
50 std::make_shared<AppExecFwk::AppMgrClient>();
51 return appMgrClient;
52 }
53 }
54
Instance()55 RSUnmarshalThread& RSUnmarshalThread::Instance()
56 {
57 static RSUnmarshalThread instance;
58 return instance;
59 }
60
Start()61 void RSUnmarshalThread::Start()
62 {
63 queue_ = std::make_shared<ffrt::queue>(
64 static_cast<ffrt::queue_type>(ffrt_inner_queue_type_t::ffrt_queue_eventhandler_adapter), "RSUnmarshalThread",
65 ffrt::queue_attr().qos(ffrt::qos_user_interactive));
66 }
67
PostTask(const std::function<void ()> & task,const std::string & name)68 void RSUnmarshalThread::PostTask(const std::function<void()>& task, const std::string& name)
69 {
70 if (queue_) {
71 queue_->submit(
72 std::move(task), ffrt::task_attr()
73 .name(name.c_str())
74 .delay(0)
75 .priority(static_cast<ffrt_queue_priority_t>(ffrt_inner_queue_priority_immediate)));
76 }
77 }
78
RemoveTask(const std::string & name)79 void RSUnmarshalThread::RemoveTask(const std::string& name)
80 {
81 if (queue_) {
82 ffrt_queue_t* queue = reinterpret_cast<ffrt_queue_t*>(queue_.get());
83 ffrt_queue_cancel_by_name(*queue, name.c_str());
84 }
85 }
86
RecvParcel(std::shared_ptr<MessageParcel> & parcel,bool isNonSystemAppCalling,pid_t callingPid,std::unique_ptr<AshmemFdWorker> ashmemFdWorker,std::shared_ptr<AshmemFlowControlUnit> ashmemFlowControlUnit,uint32_t parcelNumber)87 void RSUnmarshalThread::RecvParcel(std::shared_ptr<MessageParcel>& parcel, bool isNonSystemAppCalling, pid_t callingPid,
88 std::unique_ptr<AshmemFdWorker> ashmemFdWorker, std::shared_ptr<AshmemFlowControlUnit> ashmemFlowControlUnit,
89 uint32_t parcelNumber)
90 {
91 if (!queue_ || !parcel) {
92 RS_LOGE("RSUnmarshalThread::RecvParcel has nullptr, queue_: %{public}d, parcel: %{public}d",
93 (!queue_), (!parcel));
94 return;
95 }
96 bool isPendingUnmarshal = (parcel->GetDataSize() > MIN_PENDING_REQUEST_SYNC_DATA_SIZE);
97 RSTaskMessage::RSTask task = [this, parcel = parcel, isPendingUnmarshal, isNonSystemAppCalling, callingPid,
98 ashmemFdWorker = std::shared_ptr(std::move(ashmemFdWorker)), ashmemFlowControlUnit, parcelNumber]() {
99 RSMarshallingHelper::SetCallingPid(callingPid);
100 AshmemFdContainer::SetIsUnmarshalThread(true);
101 if (ashmemFdWorker) {
102 ashmemFdWorker->PushFdsToContainer();
103 }
104 RsFrameReport::GetInstance().ReportUnmarshalData(unmarshalTid_, parcel->GetDataSize());
105 auto transData = RSBaseRenderUtil::ParseTransactionData(*parcel, parcelNumber);
106 RsFrameReport::GetInstance().ReportUnmarshalData(unmarshalTid_, 0);
107 if (ashmemFdWorker) {
108 // ashmem parcel fds will be closed in ~AshmemFdWorker() instead of ~MessageParcel()
109 parcel->FlushBuffer();
110 ashmemFdWorker->EnableManualCloseFds();
111 }
112 if (!transData) {
113 return;
114 }
115 if (isNonSystemAppCalling) {
116 const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
117 if (!transData->IsCallingPidValid(callingPid, nodeMap)) {
118 RS_LOGE("RSUnmarshalThread::RecvParcel IsCallingPidValid check failed");
119 }
120 bool shouldDrop = ReportTransactionDataStatistics(callingPid, transData.get(), isNonSystemAppCalling);
121 if (shouldDrop) {
122 RS_LOGW("RSUnmarshalThread::RecvParcel data droped");
123 return;
124 }
125 }
126 RS_PROFILER_ON_PARCEL_RECEIVE(parcel.get(), transData.get());
127 int64_t time = transData == nullptr ? 0 : static_cast<int64_t>(transData->GetTimestamp());
128 if (transData && transData->GetDVSyncUpdate()) {
129 RSMainThread::Instance()->DVSyncUpdate(transData->GetDVSyncTime(), transData->GetTimestamp());
130 }
131 {
132 std::lock_guard<std::mutex> lock(transactionDataMutex_);
133 cachedTransactionDataMap_[transData->GetSendingPid()].emplace_back(std::move(transData));
134 }
135 if (isPendingUnmarshal) {
136 RSMainThread::Instance()->RequestNextVSync("UI", time);
137 } else {
138 const auto &now = std::chrono::steady_clock::now().time_since_epoch();
139 int64_t currentTime = std::chrono::duration_cast<std::chrono::nanoseconds>(now).count();
140 constexpr int64_t ONE_PERIOD = 8333333;
141 if (currentTime - time > ONE_PERIOD && time != 0) {
142 RSMainThread::Instance()->RequestNextVSync("UI", time);
143 }
144 }
145 RSMainThread::Instance()->NotifyUnmarshalTask(time);
146 // ashmem parcel flow control ends in the destructor of ashmemFlowControlUnit
147 };
148 {
149 PostTask(task);
150 /* a task has been posted, it means cachedTransactionDataMap_ will not been empty.
151 * so set willHaveCachedData_ to true
152 */
153 std::lock_guard<std::mutex> lock(transactionDataMutex_);
154 willHaveCachedData_ = true;
155 }
156 if (!isPendingUnmarshal) {
157 RSMainThread::Instance()->RequestNextVSync();
158 }
159 }
160
GetCachedTransactionData()161 TransactionDataMap RSUnmarshalThread::GetCachedTransactionData()
162 {
163 TransactionDataMap transactionData;
164 {
165 std::lock_guard<std::mutex> lock(transactionDataMutex_);
166 std::swap(transactionData, cachedTransactionDataMap_);
167 willHaveCachedData_ = false;
168 }
169 return transactionData;
170 }
171
CachedTransactionDataEmpty()172 bool RSUnmarshalThread::CachedTransactionDataEmpty()
173 {
174 std::lock_guard<std::mutex> lock(transactionDataMutex_);
175 /* we need consider both whether cachedTransactionDataMap_ is empty now
176 * and whether cachedTransactionDataMap_ will be empty later
177 */
178 return cachedTransactionDataMap_.empty() && !willHaveCachedData_;
179 }
180
IsHaveCmdList(const std::unique_ptr<RSCommand> & cmd) const181 bool RSUnmarshalThread::IsHaveCmdList(const std::unique_ptr<RSCommand>& cmd) const
182 {
183 if (!cmd) {
184 return false;
185 }
186 bool haveCmdList = false;
187 switch (cmd->GetType()) {
188 case RSCommandType::RS_NODE:
189 if (cmd->GetSubType() == RSNodeCommandType::UPDATE_MODIFIER_DRAW_CMD_LIST_NG ||
190 cmd->GetSubType() == RSNodeCommandType::ADD_MODIFIER_NG) {
191 haveCmdList = true;
192 }
193 break;
194 case RSCommandType::CANVAS_NODE:
195 if (cmd->GetSubType() == RSCanvasNodeCommandType::CANVAS_NODE_UPDATE_RECORDING) {
196 haveCmdList = true;
197 }
198 break;
199 default:
200 break;
201 }
202 return haveCmdList;
203 }
204
ReportTransactionDataStatistics(pid_t pid,RSTransactionData * transactionData,bool isNonSystemAppCalling)205 bool RSUnmarshalThread::ReportTransactionDataStatistics(pid_t pid,
206 RSTransactionData* transactionData,
207 bool isNonSystemAppCalling)
208 {
209 size_t preCount = 0;
210 size_t totalCount = 0;
211 size_t opCount = 0;
212 if (!transactionData) {
213 return false;
214 }
215 opCount = transactionData->GetCommandCount();
216 auto& payload_temp = transactionData->GetPayload();
217 for (auto& item_temp : payload_temp) {
218 auto& cmd = std::get<2>(item_temp);
219 if (!cmd) {
220 continue;
221 }
222 if (IsHaveCmdList(cmd)) {
223 auto drawCmdList = cmd->GetDrawCmdList();
224 if (drawCmdList) {
225 opCount += drawCmdList->GetOpItemSize();
226 }
227 }
228 }
229 {
230 std::unique_lock<std::mutex> lock(statisticsMutex_);
231 preCount = transactionDataStatistics_[pid];
232 totalCount = preCount + opCount;
233 transactionDataStatistics_[pid] = totalCount;
234
235 if (totalCount < TRANSACTION_DATA_ALARM_COUNT) {
236 return false;
237 }
238 }
239 const auto appMgrClient = GetAppMgrClient();
240 if (!appMgrClient) {
241 RS_LOGW("Get global variable AppMgrClient failed");
242 return false;
243 }
244 int32_t uid = 0;
245 std::string bundleName;
246 appMgrClient->GetBundleNameByPid(pid, bundleName, uid);
247
248 RS_TRACE_NAME_FMT("RSUnmarshalThread::ReportTransactionDataStatistics HiSysEventWrite pid[%d] uid[%d]"
249 " bundleName[%s] opCount[%zu] exceeded[%d]",
250 pid, uid, bundleName.c_str(), totalCount, totalCount > TRANSACTION_DATA_KILL_COUNT);
251 RSHiSysEvent::EventWrite(RSEventName::IPC_DATA_OVER_ERROR, RSEventType::RS_STATISTIC, "PID", pid, "UID", uid,
252 "BUNDLE_NAME", bundleName, "TRANSACTION_DATA_SIZE", totalCount);
253 RS_LOGW("TransactionDataStatistics pid[%{public}d] uid[%{public}d]"
254 " bundleName[%{public}s] opCount[%{public}zu] exceeded[%{public}d]",
255 pid, uid, bundleName.c_str(), totalCount, totalCount > TRANSACTION_DATA_KILL_COUNT);
256
257 bool terminateEnabled = RSSystemProperties::GetTransactionTerminateEnabled();
258 if (!isNonSystemAppCalling || !terminateEnabled) {
259 return false;
260 }
261 if (totalCount > TRANSACTION_DATA_KILL_COUNT && preCount <= TRANSACTION_DATA_KILL_COUNT) {
262 int res = appMgrClient->KillApplicationByUid(bundleName, uid);
263 return res == AppExecFwk::RESULT_OK;
264 }
265 return false;
266 }
267
ClearTransactionDataStatistics()268 void RSUnmarshalThread::ClearTransactionDataStatistics()
269 {
270 std::unique_lock<std::mutex> lock(statisticsMutex_);
271 transactionDataStatistics_.clear();
272 }
273 }
274