1 /*
2 * Copyright (C) 2019, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <aidl/android/os/BnPullAtomCallback.h>
18 #include <aidl/android/os/IPullAtomResultReceiver.h>
19 #include <aidl/android/os/IStatsd.h>
20 #include <aidl/android/util/StatsEventParcel.h>
21 #include <android/binder_auto_utils.h>
22 #include <android/binder_ibinder.h>
23 #include <android/binder_manager.h>
24 #include <stats_event.h>
25 #include <stats_pull_atom_callback.h>
26
27 #include <map>
28 #include <queue>
29 #include <thread>
30 #include <vector>
31
32 using Status = ::ndk::ScopedAStatus;
33 using aidl::android::os::BnPullAtomCallback;
34 using aidl::android::os::IPullAtomResultReceiver;
35 using aidl::android::os::IStatsd;
36 using aidl::android::util::StatsEventParcel;
37 using ::ndk::SharedRefBase;
38
39 struct AStatsEventList {
40 std::vector<AStatsEvent*> data;
41 };
42
AStatsEventList_addStatsEvent(AStatsEventList * pull_data)43 AStatsEvent* AStatsEventList_addStatsEvent(AStatsEventList* pull_data) {
44 AStatsEvent* event = AStatsEvent_obtain();
45 pull_data->data.push_back(event);
46 return event;
47 }
48
49 constexpr int64_t DEFAULT_COOL_DOWN_MILLIS = 1000LL; // 1 second.
50 constexpr int64_t DEFAULT_TIMEOUT_MILLIS = 1500LL; // 1.5 seconds.
51
52 struct AStatsManager_PullAtomMetadata {
53 int64_t cool_down_millis;
54 int64_t timeout_millis;
55 std::vector<int32_t> additive_fields;
56 };
57
AStatsManager_PullAtomMetadata_obtain()58 AStatsManager_PullAtomMetadata* AStatsManager_PullAtomMetadata_obtain() {
59 AStatsManager_PullAtomMetadata* metadata = new AStatsManager_PullAtomMetadata();
60 metadata->cool_down_millis = DEFAULT_COOL_DOWN_MILLIS;
61 metadata->timeout_millis = DEFAULT_TIMEOUT_MILLIS;
62 metadata->additive_fields = std::vector<int32_t>();
63 return metadata;
64 }
65
AStatsManager_PullAtomMetadata_release(AStatsManager_PullAtomMetadata * metadata)66 void AStatsManager_PullAtomMetadata_release(AStatsManager_PullAtomMetadata* metadata) {
67 delete metadata;
68 }
69
AStatsManager_PullAtomMetadata_setCoolDownMillis(AStatsManager_PullAtomMetadata * metadata,int64_t cool_down_millis)70 void AStatsManager_PullAtomMetadata_setCoolDownMillis(AStatsManager_PullAtomMetadata* metadata,
71 int64_t cool_down_millis) {
72 metadata->cool_down_millis = cool_down_millis;
73 }
74
AStatsManager_PullAtomMetadata_getCoolDownMillis(AStatsManager_PullAtomMetadata * metadata)75 int64_t AStatsManager_PullAtomMetadata_getCoolDownMillis(AStatsManager_PullAtomMetadata* metadata) {
76 return metadata->cool_down_millis;
77 }
78
AStatsManager_PullAtomMetadata_setTimeoutMillis(AStatsManager_PullAtomMetadata * metadata,int64_t timeout_millis)79 void AStatsManager_PullAtomMetadata_setTimeoutMillis(AStatsManager_PullAtomMetadata* metadata,
80 int64_t timeout_millis) {
81 metadata->timeout_millis = timeout_millis;
82 }
83
AStatsManager_PullAtomMetadata_getTimeoutMillis(AStatsManager_PullAtomMetadata * metadata)84 int64_t AStatsManager_PullAtomMetadata_getTimeoutMillis(AStatsManager_PullAtomMetadata* metadata) {
85 return metadata->timeout_millis;
86 }
87
AStatsManager_PullAtomMetadata_setAdditiveFields(AStatsManager_PullAtomMetadata * metadata,int32_t * additive_fields,int32_t num_fields)88 void AStatsManager_PullAtomMetadata_setAdditiveFields(AStatsManager_PullAtomMetadata* metadata,
89 int32_t* additive_fields,
90 int32_t num_fields) {
91 metadata->additive_fields.assign(additive_fields, additive_fields + num_fields);
92 }
93
AStatsManager_PullAtomMetadata_getNumAdditiveFields(AStatsManager_PullAtomMetadata * metadata)94 int32_t AStatsManager_PullAtomMetadata_getNumAdditiveFields(
95 AStatsManager_PullAtomMetadata* metadata) {
96 return metadata->additive_fields.size();
97 }
98
AStatsManager_PullAtomMetadata_getAdditiveFields(AStatsManager_PullAtomMetadata * metadata,int32_t * fields)99 void AStatsManager_PullAtomMetadata_getAdditiveFields(AStatsManager_PullAtomMetadata* metadata,
100 int32_t* fields) {
101 std::copy(metadata->additive_fields.begin(), metadata->additive_fields.end(), fields);
102 }
103
104 class StatsPullAtomCallbackInternal : public BnPullAtomCallback {
105 public:
StatsPullAtomCallbackInternal(const AStatsManager_PullAtomCallback callback,void * cookie,const int64_t coolDownMillis,const int64_t timeoutMillis,const std::vector<int32_t> additiveFields)106 StatsPullAtomCallbackInternal(const AStatsManager_PullAtomCallback callback, void* cookie,
107 const int64_t coolDownMillis, const int64_t timeoutMillis,
108 const std::vector<int32_t> additiveFields)
109 : mCallback(callback),
110 mCookie(cookie),
111 mCoolDownMillis(coolDownMillis),
112 mTimeoutMillis(timeoutMillis),
113 mAdditiveFields(additiveFields) {}
114
onPullAtom(int32_t atomTag,const std::shared_ptr<IPullAtomResultReceiver> & resultReceiver)115 Status onPullAtom(int32_t atomTag,
116 const std::shared_ptr<IPullAtomResultReceiver>& resultReceiver) override {
117 AStatsEventList statsEventList;
118 int successInt = mCallback(atomTag, &statsEventList, mCookie);
119 bool success = successInt == AStatsManager_PULL_SUCCESS;
120
121 // Convert stats_events into StatsEventParcels.
122 std::vector<StatsEventParcel> parcels;
123
124 // Resolves fuzz build failure in b/161575591.
125 #if defined(__ANDROID_APEX__) || defined(LIB_STATS_PULL_TESTS_FLAG)
126 for (int i = 0; i < statsEventList.data.size(); i++) {
127 size_t size;
128 uint8_t* buffer = AStatsEvent_getBuffer(statsEventList.data[i], &size);
129
130 StatsEventParcel p;
131 // vector.assign() creates a copy, but this is inevitable unless
132 // stats_event.h/c uses a vector as opposed to a buffer.
133 p.buffer.assign(buffer, buffer + size);
134 parcels.push_back(std::move(p));
135 }
136 #endif
137
138 Status status = resultReceiver->pullFinished(atomTag, success, parcels);
139 if (!status.isOk()) {
140 std::vector<StatsEventParcel> emptyParcels;
141 resultReceiver->pullFinished(atomTag, /*success=*/false, emptyParcels);
142 }
143 for (int i = 0; i < statsEventList.data.size(); i++) {
144 AStatsEvent_release(statsEventList.data[i]);
145 }
146 return Status::ok();
147 }
148
getCoolDownMillis() const149 int64_t getCoolDownMillis() const { return mCoolDownMillis; }
getTimeoutMillis() const150 int64_t getTimeoutMillis() const { return mTimeoutMillis; }
getAdditiveFields() const151 const std::vector<int32_t>& getAdditiveFields() const { return mAdditiveFields; }
152
153 private:
154 const AStatsManager_PullAtomCallback mCallback;
155 void* mCookie;
156 const int64_t mCoolDownMillis;
157 const int64_t mTimeoutMillis;
158 const std::vector<int32_t> mAdditiveFields;
159 };
160
161 /**
162 * @brief pullersMutex is used to guard simultaneous access to pullers from below threads
163 * Main thread
164 * - AStatsManager_setPullAtomCallback()
165 * - AStatsManager_clearPullAtomCallback()
166 * Binder thread:
167 * - StatsdProvider::binderDied()
168 */
169 static std::mutex pullersMutex;
170
171 static std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> pullers;
172
173 class StatsdProvider {
174 public:
StatsdProvider()175 StatsdProvider() : mDeathRecipient(AIBinder_DeathRecipient_new(binderDied)) {
176 }
177
~StatsdProvider()178 ~StatsdProvider() {
179 resetStatsService();
180 }
181
getStatsService()182 std::shared_ptr<IStatsd> getStatsService() {
183 // There are host unit tests which are using libstatspull
184 // Since we do not have statsd on host - the getStatsService() is no-op and
185 // should return nullptr
186 #ifdef __ANDROID__
187 std::lock_guard<std::mutex> lock(mStatsdMutex);
188 if (!mStatsd) {
189 // Fetch statsd
190 ::ndk::SpAIBinder binder(AServiceManager_getService("stats"));
191 mStatsd = IStatsd::fromBinder(binder);
192 if (mStatsd) {
193 AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), this);
194 }
195 }
196 #endif // __ANDROID__
197 return mStatsd;
198 }
199
resetStatsService()200 void resetStatsService() {
201 std::lock_guard<std::mutex> lock(mStatsdMutex);
202 mStatsd = nullptr;
203 }
204
binderDied(void * cookie)205 static void binderDied(void* cookie) {
206 StatsdProvider* statsProvider = static_cast<StatsdProvider*>(cookie);
207 statsProvider->resetStatsService();
208
209 std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
210 if (statsService == nullptr) {
211 return;
212 }
213
214 // Since we do not want to make an IPC with the lock held, we first create a
215 // copy of the data with the lock held before iterating through the map.
216 std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> pullersCopy;
217 {
218 std::lock_guard<std::mutex> lock(pullersMutex);
219 pullersCopy = pullers;
220 }
221 for (const auto& it : pullersCopy) {
222 statsService->registerNativePullAtomCallback(it.first, it.second->getCoolDownMillis(),
223 it.second->getTimeoutMillis(),
224 it.second->getAdditiveFields(), it.second);
225 }
226 }
227
228 private:
229 /**
230 * @brief mStatsdMutex is used to guard simultaneous access to mStatsd from below threads:
231 * Work thread
232 * - registerStatsPullAtomCallbackBlocking()
233 * - unregisterStatsPullAtomCallbackBlocking()
234 * Binder thread:
235 * - StatsdProvider::binderDied()
236 */
237 std::mutex mStatsdMutex;
238 std::shared_ptr<IStatsd> mStatsd;
239 ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
240 };
241
242 static std::shared_ptr<StatsdProvider> statsProvider = std::make_shared<StatsdProvider>();
243
registerStatsPullAtomCallbackBlocking(int32_t atomTag,std::shared_ptr<StatsdProvider> statsProvider,std::shared_ptr<StatsPullAtomCallbackInternal> cb)244 void registerStatsPullAtomCallbackBlocking(int32_t atomTag,
245 std::shared_ptr<StatsdProvider> statsProvider,
246 std::shared_ptr<StatsPullAtomCallbackInternal> cb) {
247 const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
248 if (statsService == nullptr) {
249 // Statsd not available
250 return;
251 }
252
253 statsService->registerNativePullAtomCallback(
254 atomTag, cb->getCoolDownMillis(), cb->getTimeoutMillis(), cb->getAdditiveFields(), cb);
255 }
256
unregisterStatsPullAtomCallbackBlocking(int32_t atomTag,std::shared_ptr<StatsdProvider> statsProvider)257 void unregisterStatsPullAtomCallbackBlocking(int32_t atomTag,
258 std::shared_ptr<StatsdProvider> statsProvider) {
259 const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
260 if (statsService == nullptr) {
261 // Statsd not available
262 return;
263 }
264
265 statsService->unregisterNativePullAtomCallback(atomTag);
266 }
267
268 class CallbackOperationsHandler {
269 struct Cmd {
270 enum Type { CMD_REGISTER, CMD_UNREGISTER };
271
272 Type type;
273 int atomTag;
274 std::shared_ptr<StatsPullAtomCallbackInternal> callback;
275 };
276
277 public:
~CallbackOperationsHandler()278 ~CallbackOperationsHandler() {
279 for (auto& workThread : mWorkThreads) {
280 if (workThread.joinable()) {
281 mCondition.notify_one();
282 workThread.join();
283 }
284 }
285 }
286
getInstance()287 static CallbackOperationsHandler& getInstance() {
288 static CallbackOperationsHandler handler;
289 return handler;
290 }
291
registerCallback(int atomTag,std::shared_ptr<StatsPullAtomCallbackInternal> callback)292 void registerCallback(int atomTag, std::shared_ptr<StatsPullAtomCallbackInternal> callback) {
293 auto registerCmd = std::make_unique<Cmd>();
294 registerCmd->type = Cmd::CMD_REGISTER;
295 registerCmd->atomTag = atomTag;
296 registerCmd->callback = std::move(callback);
297 pushToQueue(std::move(registerCmd));
298
299 std::thread registerThread(&CallbackOperationsHandler::processCommands, this,
300 statsProvider);
301 mWorkThreads.push_back(std::move(registerThread));
302 }
303
unregisterCallback(int atomTag)304 void unregisterCallback(int atomTag) {
305 auto unregisterCmd = std::make_unique<Cmd>();
306 unregisterCmd->type = Cmd::CMD_UNREGISTER;
307 unregisterCmd->atomTag = atomTag;
308 pushToQueue(std::move(unregisterCmd));
309
310 std::thread unregisterThread(&CallbackOperationsHandler::processCommands, this,
311 statsProvider);
312 mWorkThreads.push_back(std::move(unregisterThread));
313 }
314
315 private:
316 std::vector<std::thread> mWorkThreads;
317
318 std::condition_variable mCondition;
319 std::mutex mMutex;
320 std::queue<std::unique_ptr<Cmd>> mCmdQueue;
321
CallbackOperationsHandler()322 CallbackOperationsHandler() {
323 }
324
pushToQueue(std::unique_ptr<Cmd> cmd)325 void pushToQueue(std::unique_ptr<Cmd> cmd) {
326 {
327 std::unique_lock<std::mutex> lock(mMutex);
328 mCmdQueue.push(std::move(cmd));
329 }
330 mCondition.notify_one();
331 }
332
processCommands(std::shared_ptr<StatsdProvider> statsProvider)333 void processCommands(std::shared_ptr<StatsdProvider> statsProvider) {
334 /**
335 * First trying to obtain stats service instance
336 * This is a blocking call, which waits on service readiness
337 */
338 const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
339
340 /**
341 * To guarantee sequential commands processing we need to lock mutex queue
342 */
343 std::unique_lock<std::mutex> lock(mMutex);
344 /**
345 * This should never really block in practice, since the command was already queued
346 * from the main thread by registerCallback or unregisterCallback.
347 * We are putting command to the queue, and only after a worker thread is created,
348 * which will pop a single command from a queue and will be terminated after processing.
349 * It makes producer/consumer as 1:1 match
350 */
351 if (mCmdQueue.empty()) {
352 mCondition.wait(lock, [this] { return !this->mCmdQueue.empty(); });
353 }
354
355 std::unique_ptr<Cmd> cmd = std::move(mCmdQueue.front());
356 mCmdQueue.pop();
357
358 if (!statsService) {
359 // Statsd not available - dropping command request
360 return;
361 }
362
363 switch (cmd->type) {
364 case Cmd::CMD_REGISTER: {
365 registerStatsPullAtomCallbackBlocking(cmd->atomTag, statsProvider, cmd->callback);
366 break;
367 }
368 case Cmd::CMD_UNREGISTER: {
369 unregisterStatsPullAtomCallbackBlocking(cmd->atomTag, statsProvider);
370 break;
371 }
372 }
373 }
374 };
375
AStatsManager_setPullAtomCallback(int32_t atom_tag,AStatsManager_PullAtomMetadata * metadata,AStatsManager_PullAtomCallback callback,void * cookie)376 void AStatsManager_setPullAtomCallback(int32_t atom_tag, AStatsManager_PullAtomMetadata* metadata,
377 AStatsManager_PullAtomCallback callback, void* cookie) {
378 int64_t coolDownMillis =
379 metadata == nullptr ? DEFAULT_COOL_DOWN_MILLIS : metadata->cool_down_millis;
380 int64_t timeoutMillis = metadata == nullptr ? DEFAULT_TIMEOUT_MILLIS : metadata->timeout_millis;
381
382 std::vector<int32_t> additiveFields;
383 if (metadata != nullptr) {
384 additiveFields = metadata->additive_fields;
385 }
386
387 std::shared_ptr<StatsPullAtomCallbackInternal> callbackBinder =
388 SharedRefBase::make<StatsPullAtomCallbackInternal>(callback, cookie, coolDownMillis,
389 timeoutMillis, additiveFields);
390
391 {
392 std::lock_guard<std::mutex> lock(pullersMutex);
393 // Always add to the map. If statsd is dead, we will add them when it comes back.
394 pullers[atom_tag] = callbackBinder;
395 }
396
397 CallbackOperationsHandler::getInstance().registerCallback(atom_tag, callbackBinder);
398 }
399
AStatsManager_clearPullAtomCallback(int32_t atom_tag)400 void AStatsManager_clearPullAtomCallback(int32_t atom_tag) {
401 {
402 std::lock_guard<std::mutex> lock(pullersMutex);
403 // Always remove the puller from our map.
404 // If statsd is down, we will not register it when it comes back.
405 pullers.erase(atom_tag);
406 }
407
408 CallbackOperationsHandler::getInstance().unregisterCallback(atom_tag);
409 }
410