1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "chre/core/event_loop.h"
18 #include <cinttypes>
19 #include <cstdint>
20
21 #include "chre/core/event.h"
22 #include "chre/core/event_loop_manager.h"
23 #include "chre/core/nanoapp.h"
24 #include "chre/platform/assert.h"
25 #include "chre/platform/context.h"
26 #include "chre/platform/fatal_error.h"
27 #include "chre/platform/system_time.h"
28 #include "chre/util/conditional_lock_guard.h"
29 #include "chre/util/lock_guard.h"
30 #include "chre/util/system/debug_dump.h"
31 #include "chre/util/system/event_callbacks.h"
32 #include "chre/util/system/stats_container.h"
33 #include "chre/util/time.h"
34 #include "chre_api/chre/version.h"
35
36 namespace chre {
37
38 // Out of line declaration required for nonintegral static types
39 constexpr Nanoseconds EventLoop::kIntervalWakeupBucket;
40
41 namespace {
42
43 #ifndef CHRE_STATIC_EVENT_LOOP
44 using DynamicMemoryPool =
45 SynchronizedExpandableMemoryPool<Event, CHRE_EVENT_PER_BLOCK,
46 CHRE_MAX_EVENT_BLOCKS>;
47 #endif
48 // TODO(b/264108686): Make this a compile time parameter.
49 // How many low priority event to remove if the event queue is full
50 // and a new event needs to be pushed.
51 constexpr size_t targetLowPriorityEventRemove = 4;
52
53 /**
54 * Populates a chreNanoappInfo structure using info from the given Nanoapp
55 * instance.
56 *
57 * @param app A potentially null pointer to the Nanoapp to read from
58 * @param info The structure to populate - should not be null, but this function
59 * will handle that input
60 *
61 * @return true if neither app nor info were null, and info was populated
62 */
populateNanoappInfo(const Nanoapp * app,struct chreNanoappInfo * info)63 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
64 bool success = false;
65
66 if (app != nullptr && info != nullptr) {
67 info->appId = app->getAppId();
68 info->version = app->getAppVersion();
69 info->instanceId = app->getInstanceId();
70 if (app->getTargetApiVersion() >= CHRE_API_VERSION_1_8) {
71 CHRE_ASSERT(app->getRpcServices().size() <= Nanoapp::kMaxRpcServices);
72 info->rpcServiceCount =
73 static_cast<uint8_t>(app->getRpcServices().size());
74 info->rpcServices = app->getRpcServices().data();
75 memset(&info->reserved, 0, sizeof(info->reserved));
76 }
77 success = true;
78 }
79
80 return success;
81 }
82
83 #ifndef CHRE_STATIC_EVENT_LOOP
84 /**
85 * @return true if a event is a low priority event.
86 * Note: data and extraData are needed here to match the
87 * matching function signature. Both are not used here, but
88 * are used in other applications of
89 * SegmentedQueue::removeMatchedFromBack.
90 */
isLowPriorityEvent(Event * event,void *,void *)91 bool isLowPriorityEvent(Event *event, void * /* data */,
92 void * /* extraData */) {
93 CHRE_ASSERT_NOT_NULL(event);
94 return event->isLowPriority;
95 }
96
deallocateFromMemoryPool(Event * event,void * memoryPool)97 void deallocateFromMemoryPool(Event *event, void *memoryPool) {
98 static_cast<DynamicMemoryPool *>(memoryPool)->deallocate(event);
99 }
100 #endif
101
102 } // anonymous namespace
103
findNanoappInstanceIdByAppId(uint64_t appId,uint16_t * instanceId) const104 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
105 uint16_t *instanceId) const {
106 CHRE_ASSERT(instanceId != nullptr);
107 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
108
109 bool found = false;
110 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
111 if (app->getAppId() == appId) {
112 *instanceId = app->getInstanceId();
113 found = true;
114 break;
115 }
116 }
117
118 return found;
119 }
120
forEachNanoapp(NanoappCallbackFunction * callback,void * data)121 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
122 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
123
124 for (const UniquePtr<Nanoapp> &nanoapp : mNanoapps) {
125 callback(nanoapp.get(), data);
126 }
127 }
128
invokeMessageFreeFunction(uint64_t appId,chreMessageFreeFunction * freeFunction,void * message,size_t messageSize)129 void EventLoop::invokeMessageFreeFunction(uint64_t appId,
130 chreMessageFreeFunction *freeFunction,
131 void *message, size_t messageSize) {
132 Nanoapp *nanoapp = lookupAppByAppId(appId);
133 if (nanoapp == nullptr) {
134 LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
135 } else {
136 auto prevCurrentApp = mCurrentApp;
137 mCurrentApp = nanoapp;
138 freeFunction(message, messageSize);
139 mCurrentApp = prevCurrentApp;
140 }
141 }
142
run()143 void EventLoop::run() {
144 LOGI("EventLoop start");
145
146 while (mRunning) {
147 // Events are delivered in a single stage: they arrive in the inbound event
148 // queue mEvents (potentially posted from another thread), then within
149 // this context these events are distributed to all interested Nanoapps,
150 // with their free callback invoked after distribution.
151 mEventPoolUsage.addValue(static_cast<uint32_t>(mEvents.size()));
152
153 // mEvents.pop() will be a blocking call if mEvents.empty()
154 Event *event = mEvents.pop();
155 // Need size() + 1 since the to-be-processed event has already been removed.
156 mPowerControlManager.preEventLoopProcess(mEvents.size() + 1);
157 distributeEvent(event);
158
159 mPowerControlManager.postEventLoopProcess(mEvents.size());
160 }
161
162 // Purge the main queue of events pending distribution. All nanoapps should be
163 // prevented from sending events or messages at this point via
164 // currentNanoappIsStopping() returning true.
165 while (!mEvents.empty()) {
166 freeEvent(mEvents.pop());
167 }
168
169 // Unload all running nanoapps
170 while (!mNanoapps.empty()) {
171 unloadNanoappAtIndex(mNanoapps.size() - 1);
172 }
173
174 LOGI("Exiting EventLoop");
175 }
176
startNanoapp(UniquePtr<Nanoapp> & nanoapp)177 bool EventLoop::startNanoapp(UniquePtr<Nanoapp> &nanoapp) {
178 CHRE_ASSERT(!nanoapp.isNull());
179 bool success = false;
180 auto *eventLoopManager = EventLoopManagerSingleton::get();
181 EventLoop &eventLoop = eventLoopManager->getEventLoop();
182 uint16_t existingInstanceId;
183
184 if (nanoapp.isNull()) {
185 // no-op, invalid argument
186 } else if (nanoapp->getTargetApiVersion() <
187 CHRE_FIRST_SUPPORTED_API_VERSION) {
188 LOGE("Incompatible nanoapp (target ver 0x%" PRIx32
189 ", first supported ver 0x%" PRIx32 ")",
190 nanoapp->getTargetApiVersion(),
191 static_cast<uint32_t>(CHRE_FIRST_SUPPORTED_API_VERSION));
192 } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
193 &existingInstanceId)) {
194 LOGE("App with ID 0x%016" PRIx64 " already exists as instance ID %" PRIu16,
195 nanoapp->getAppId(), existingInstanceId);
196 } else {
197 Nanoapp *newNanoapp = nanoapp.get();
198 {
199 LockGuard<Mutex> lock(mNanoappsLock);
200 success = mNanoapps.push_back(std::move(nanoapp));
201 // After this point, nanoapp is null as we've transferred ownership into
202 // mNanoapps.back() - use newNanoapp to reference it
203 }
204 if (!success) {
205 LOG_OOM();
206 } else {
207 mCurrentApp = newNanoapp;
208 success = newNanoapp->start();
209 mCurrentApp = nullptr;
210 if (!success) {
211 LOGE("Nanoapp %" PRIu16 " failed to start",
212 newNanoapp->getInstanceId());
213 unloadNanoapp(newNanoapp->getInstanceId(),
214 /*allowSystemNanoappUnload=*/true,
215 /*nanoappStarted=*/false);
216 } else {
217 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
218 }
219 }
220 }
221
222 return success;
223 }
224
unloadNanoapp(uint16_t instanceId,bool allowSystemNanoappUnload,bool nanoappStarted)225 bool EventLoop::unloadNanoapp(uint16_t instanceId,
226 bool allowSystemNanoappUnload,
227 bool nanoappStarted) {
228 bool unloaded = false;
229
230 for (size_t i = 0; i < mNanoapps.size(); i++) {
231 if (instanceId == mNanoapps[i]->getInstanceId()) {
232 if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
233 LOGE("Refusing to unload system nanoapp");
234 } else {
235 // Make sure all messages sent by this nanoapp at least have their
236 // associated free callback processing pending in the event queue (i.e.
237 // there are no messages pending delivery to the host)
238 EventLoopManagerSingleton::get()
239 ->getHostCommsManager()
240 .flushNanoappMessagesAndTransactions(mNanoapps[i]->getAppId());
241
242 // Mark that this nanoapp is stopping early, so it can't send events or
243 // messages during the nanoapp event queue flush
244 mStoppingNanoapp = mNanoapps[i].get();
245
246 if (nanoappStarted) {
247 // Distribute all inbound events we have at this time - here we're
248 // interested in handling any message free callbacks generated by
249 // flushInboundEventQueue()
250 flushInboundEventQueue();
251
252 // Post the unload event now (so we can reference the Nanoapp instance
253 // directly), but nanoapps won't get it until after the unload
254 // completes. No need to notify status change if nanoapps failed to
255 // start.
256 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);
257 }
258
259 // Finally, we are at a point where there should not be any pending
260 // events or messages sent by the app that could potentially reference
261 // the nanoapp's memory, so we are safe to unload it
262 unloadNanoappAtIndex(i, nanoappStarted);
263 mStoppingNanoapp = nullptr;
264
265 LOGD("Unloaded nanoapp with instanceId %" PRIu16, instanceId);
266 unloaded = true;
267 }
268 break;
269 }
270 }
271
272 return unloaded;
273 }
274
removeLowPriorityEventsFromBack(size_t removeNum)275 bool EventLoop::removeLowPriorityEventsFromBack([[maybe_unused]] size_t removeNum) {
276 #ifdef CHRE_STATIC_EVENT_LOOP
277 return false;
278 #else
279 if (removeNum == 0) {
280 return true;
281 }
282
283 size_t numRemovedEvent =
284 mEvents.removeMatchedFromBack(isLowPriorityEvent, /* data= */ nullptr,
285 /* extraData= */ nullptr, removeNum,
286 deallocateFromMemoryPool, &mEventPool);
287 if (numRemovedEvent == 0 || numRemovedEvent == SIZE_MAX) {
288 LOGW("Cannot remove any low priority event");
289 } else {
290 mNumDroppedLowPriEvents += numRemovedEvent;
291 }
292 return numRemovedEvent > 0;
293 #endif
294 }
295
hasNoSpaceForHighPriorityEvent()296 bool EventLoop::hasNoSpaceForHighPriorityEvent() {
297 return mEventPool.full() &&
298 !removeLowPriorityEventsFromBack(targetLowPriorityEventRemove);
299 }
300
deliverEventSync(uint16_t nanoappInstanceId,uint16_t eventType,void * eventData)301 bool EventLoop::deliverEventSync(uint16_t nanoappInstanceId,
302 uint16_t eventType,
303 void *eventData) {
304 Event event(eventType, eventData,
305 /* freeCallback= */ nullptr,
306 /* isLowPriority= */ false,
307 /* senderInstanceId= */ kSystemInstanceId,
308 /* targetInstanceId= */ nanoappInstanceId,
309 kDefaultTargetGroupMask);
310 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
311 if (app->getInstanceId() == nanoappInstanceId) {
312 deliverNextEvent(app, &event);
313 return true;
314 }
315 }
316
317 return false;
318 }
319
320 // TODO(b/264108686): Refactor this function and postSystemEvent
postEventOrDie(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint16_t targetInstanceId,uint16_t targetGroupMask)321 void EventLoop::postEventOrDie(uint16_t eventType, void *eventData,
322 chreEventCompleteFunction *freeCallback,
323 uint16_t targetInstanceId,
324 uint16_t targetGroupMask) {
325 if (mRunning) {
326 if (hasNoSpaceForHighPriorityEvent() ||
327 !allocateAndPostEvent(eventType, eventData, freeCallback,
328 /* isLowPriority= */ false, kSystemInstanceId,
329 targetInstanceId, targetGroupMask)) {
330 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16, eventType);
331 }
332 } else if (freeCallback != nullptr) {
333 freeCallback(eventType, eventData);
334 }
335 }
336
postSystemEvent(uint16_t eventType,void * eventData,SystemEventCallbackFunction * callback,void * extraData)337 bool EventLoop::postSystemEvent(uint16_t eventType, void *eventData,
338 SystemEventCallbackFunction *callback,
339 void *extraData) {
340 if (!mRunning) {
341 return false;
342 }
343
344 if (hasNoSpaceForHighPriorityEvent()) {
345 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16
346 ": Full of high priority "
347 "events",
348 eventType);
349 }
350
351 Event *event = mEventPool.allocate(eventType, eventData, callback, extraData);
352 if (event == nullptr || !mEvents.push(event)) {
353 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16
354 ": out of memory",
355 eventType);
356 }
357
358 return true;
359 }
360
postLowPriorityEventOrFree(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint16_t senderInstanceId,uint16_t targetInstanceId,uint16_t targetGroupMask)361 bool EventLoop::postLowPriorityEventOrFree(
362 uint16_t eventType, void *eventData,
363 chreEventCompleteFunction *freeCallback, uint16_t senderInstanceId,
364 uint16_t targetInstanceId, uint16_t targetGroupMask) {
365 bool eventPosted = false;
366
367 if (mRunning) {
368 eventPosted =
369 allocateAndPostEvent(eventType, eventData, freeCallback,
370 /* isLowPriority= */ true, senderInstanceId,
371 targetInstanceId, targetGroupMask);
372 if (!eventPosted) {
373 LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu16,
374 eventType, targetInstanceId);
375 ++mNumDroppedLowPriEvents;
376 }
377 }
378
379 if (!eventPosted && freeCallback != nullptr) {
380 freeCallback(eventType, eventData);
381 }
382
383 return eventPosted;
384 }
385
stop()386 void EventLoop::stop() {
387 auto callback = [](uint16_t /*type*/, void *data, void * /*extraData*/) {
388 auto *obj = static_cast<EventLoop *>(data);
389 obj->onStopComplete();
390 };
391
392 // Stop accepting new events and tell the main loop to finish
393 postSystemEvent(static_cast<uint16_t>(SystemCallbackType::Shutdown),
394 /*eventData=*/this, callback, /*extraData=*/nullptr);
395 }
396
onStopComplete()397 void EventLoop::onStopComplete() {
398 mRunning = false;
399 }
400
findNanoappByInstanceId(uint16_t instanceId) const401 Nanoapp *EventLoop::findNanoappByInstanceId(uint16_t instanceId) const {
402 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
403 return lookupAppByInstanceId(instanceId);
404 }
405
populateNanoappInfoForAppId(uint64_t appId,struct chreNanoappInfo * info) const406 bool EventLoop::populateNanoappInfoForAppId(
407 uint64_t appId, struct chreNanoappInfo *info) const {
408 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
409 Nanoapp *app = lookupAppByAppId(appId);
410 return populateNanoappInfo(app, info);
411 }
412
populateNanoappInfoForInstanceId(uint16_t instanceId,struct chreNanoappInfo * info) const413 bool EventLoop::populateNanoappInfoForInstanceId(
414 uint16_t instanceId, struct chreNanoappInfo *info) const {
415 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
416 Nanoapp *app = lookupAppByInstanceId(instanceId);
417 return populateNanoappInfo(app, info);
418 }
419
currentNanoappIsStopping() const420 bool EventLoop::currentNanoappIsStopping() const {
421 return (mCurrentApp == mStoppingNanoapp || !mRunning);
422 }
423
logStateToBuffer(DebugDumpWrapper & debugDump) const424 void EventLoop::logStateToBuffer(DebugDumpWrapper &debugDump) const {
425 debugDump.print("\nEvent Loop:\n");
426 debugDump.print(" Max event pool usage: %" PRIu32 "/%zu\n",
427 mEventPoolUsage.getMax(), kMaxEventCount);
428 debugDump.print(" Number of low priority events dropped: %" PRIu32 "\n",
429 mNumDroppedLowPriEvents);
430 debugDump.print(" Mean event pool usage: %" PRIu32 "/%zu\n",
431 mEventPoolUsage.getMean(), kMaxEventCount);
432
433 Nanoseconds timeSince =
434 SystemTime::getMonotonicTime() - mTimeLastWakeupBucketCycled;
435 uint64_t timeSinceMins =
436 timeSince.toRawNanoseconds() / kOneMinuteInNanoseconds;
437 uint64_t durationMins =
438 kIntervalWakeupBucket.toRawNanoseconds() / kOneMinuteInNanoseconds;
439 debugDump.print(" Nanoapp host wakeup tracking: cycled %" PRIu64
440 " mins ago, bucketDuration=%" PRIu64 "mins\n",
441 timeSinceMins, durationMins);
442
443 debugDump.print("\nNanoapps:\n");
444
445 if (mNanoapps.size()) {
446 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
447 app->logStateToBuffer(debugDump);
448 }
449
450 mNanoapps[0]->logMemAndComputeHeader(debugDump);
451 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
452 app->logMemAndComputeEntry(debugDump);
453 }
454
455 mNanoapps[0]->logMessageHistoryHeader(debugDump);
456 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
457 app->logMessageHistoryEntry(debugDump);
458 }
459 }
460 }
461
allocateAndPostEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,bool isLowPriority,uint16_t senderInstanceId,uint16_t targetInstanceId,uint16_t targetGroupMask)462 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
463 chreEventCompleteFunction *freeCallback,
464 bool isLowPriority,
465 uint16_t senderInstanceId,
466 uint16_t targetInstanceId,
467 uint16_t targetGroupMask) {
468 bool success = false;
469
470 Event *event =
471 mEventPool.allocate(eventType, eventData, freeCallback, isLowPriority,
472 senderInstanceId, targetInstanceId, targetGroupMask);
473 if (event != nullptr) {
474 success = mEvents.push(event);
475 }
476 if (!success) {
477 LOG_OOM();
478 }
479
480 return success;
481 }
482
deliverNextEvent(const UniquePtr<Nanoapp> & app,Event * event)483 void EventLoop::deliverNextEvent(const UniquePtr<Nanoapp> &app, Event *event) {
484 // TODO: cleaner way to set/clear this? RAII-style?
485 mCurrentApp = app.get();
486 app->processEvent(event);
487 mCurrentApp = nullptr;
488 }
489
distributeEvent(Event * event)490 void EventLoop::distributeEvent(Event *event) {
491 bool eventDelivered = false;
492 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
493 if ((event->targetInstanceId == chre::kBroadcastInstanceId &&
494 app->isRegisteredForBroadcastEvent(event)) ||
495 event->targetInstanceId == app->getInstanceId()) {
496 eventDelivered = true;
497 deliverNextEvent(app, event);
498 }
499 }
500 // Log if an event unicast to a nanoapp isn't delivered, as this is could be
501 // a bug (e.g. something isn't properly keeping track of when nanoapps are
502 // unloaded), though it could just be a harmless transient issue (e.g. race
503 // condition with nanoapp unload, where we post an event to a nanoapp just
504 // after queues are flushed while it's unloading)
505 if (!eventDelivered && event->targetInstanceId != kBroadcastInstanceId &&
506 event->targetInstanceId != kSystemInstanceId) {
507 LOGW("Dropping event 0x%" PRIx16 " from instanceId %" PRIu16 "->%" PRIu16,
508 event->eventType, event->senderInstanceId, event->targetInstanceId);
509 }
510 CHRE_ASSERT(event->isUnreferenced());
511 freeEvent(event);
512 }
513
flushInboundEventQueue()514 void EventLoop::flushInboundEventQueue() {
515 while (!mEvents.empty()) {
516 distributeEvent(mEvents.pop());
517 }
518 }
519
freeEvent(Event * event)520 void EventLoop::freeEvent(Event *event) {
521 if (event->hasFreeCallback()) {
522 // TODO: find a better way to set the context to the creator of the event
523 mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
524 event->invokeFreeCallback();
525 mCurrentApp = nullptr;
526 }
527
528 mEventPool.deallocate(event);
529 }
530
lookupAppByAppId(uint64_t appId) const531 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
532 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
533 if (app->getAppId() == appId) {
534 return app.get();
535 }
536 }
537
538 return nullptr;
539 }
540
lookupAppByInstanceId(uint16_t instanceId) const541 Nanoapp *EventLoop::lookupAppByInstanceId(uint16_t instanceId) const {
542 // The system instance ID always has nullptr as its Nanoapp pointer, so can
543 // skip iterating through the nanoapp list for that case
544 if (instanceId != kSystemInstanceId) {
545 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
546 if (app->getInstanceId() == instanceId) {
547 return app.get();
548 }
549 }
550 }
551
552 return nullptr;
553 }
554
notifyAppStatusChange(uint16_t eventType,const Nanoapp & nanoapp)555 void EventLoop::notifyAppStatusChange(uint16_t eventType,
556 const Nanoapp &nanoapp) {
557 auto *info = memoryAlloc<chreNanoappInfo>();
558 if (info == nullptr) {
559 LOG_OOM();
560 } else {
561 info->appId = nanoapp.getAppId();
562 info->version = nanoapp.getAppVersion();
563 info->instanceId = nanoapp.getInstanceId();
564
565 postEventOrDie(eventType, info, freeEventDataCallback);
566 }
567 }
568
unloadNanoappAtIndex(size_t index,bool nanoappStarted)569 void EventLoop::unloadNanoappAtIndex(size_t index, bool nanoappStarted) {
570 const UniquePtr<Nanoapp> &nanoapp = mNanoapps[index];
571
572 // Lock here to prevent the nanoapp instance from being accessed between the
573 // time it is ended and fully erased
574 LockGuard<Mutex> lock(mNanoappsLock);
575
576 // Let the app know it's going away
577 mCurrentApp = nanoapp.get();
578
579 // nanoappEnd() is not invoked for nanoapps that return false in
580 // nanoappStart(), per CHRE API
581 if (nanoappStarted) {
582 nanoapp->end();
583 }
584
585 // Cleanup resources.
586 #ifdef CHRE_WIFI_SUPPORT_ENABLED
587 const uint32_t numDisabledWifiSubscriptions =
588 EventLoopManagerSingleton::get()
589 ->getWifiRequestManager()
590 .disableAllSubscriptions(nanoapp.get());
591 logDanglingResources("WIFI subscriptions", numDisabledWifiSubscriptions);
592 #endif // CHRE_WIFI_SUPPORT_ENABLED
593
594 #ifdef CHRE_GNSS_SUPPORT_ENABLED
595 const uint32_t numDisabledGnssSubscriptions =
596 EventLoopManagerSingleton::get()
597 ->getGnssManager()
598 .disableAllSubscriptions(nanoapp.get());
599 logDanglingResources("GNSS subscriptions", numDisabledGnssSubscriptions);
600 #endif // CHRE_GNSS_SUPPORT_ENABLED
601
602 #ifdef CHRE_SENSORS_SUPPORT_ENABLED
603 const uint32_t numDisabledSensorSubscriptions =
604 EventLoopManagerSingleton::get()
605 ->getSensorRequestManager()
606 .disableAllSubscriptions(nanoapp.get());
607 logDanglingResources("Sensor subscriptions", numDisabledSensorSubscriptions);
608 #endif // CHRE_SENSORS_SUPPORT_ENABLED
609
610 #ifdef CHRE_AUDIO_SUPPORT_ENABLED
611 const uint32_t numDisabledAudioRequests =
612 EventLoopManagerSingleton::get()
613 ->getAudioRequestManager()
614 .disableAllAudioRequests(nanoapp.get());
615 logDanglingResources("Audio requests", numDisabledAudioRequests);
616 #endif // CHRE_AUDIO_SUPPORT_ENABLED
617
618 #ifdef CHRE_BLE_SUPPORT_ENABLED
619 const uint32_t numDisabledBleScans = EventLoopManagerSingleton::get()
620 ->getBleRequestManager()
621 .disableActiveScan(nanoapp.get());
622 logDanglingResources("BLE scan", numDisabledBleScans);
623 #endif // CHRE_BLE_SUPPORT_ENABLED
624
625 const uint32_t numCancelledTimers =
626 getTimerPool().cancelAllNanoappTimers(nanoapp.get());
627 logDanglingResources("timers", numCancelledTimers);
628
629 const uint32_t numFreedBlocks =
630 EventLoopManagerSingleton::get()->getMemoryManager().nanoappFreeAll(
631 nanoapp.get());
632 logDanglingResources("heap blocks", numFreedBlocks);
633
634 // Destroy the Nanoapp instance
635 mNanoapps.erase(index);
636
637 mCurrentApp = nullptr;
638 }
639
handleNanoappWakeupBuckets()640 void EventLoop::handleNanoappWakeupBuckets() {
641 Nanoseconds now = SystemTime::getMonotonicTime();
642 Nanoseconds duration = now - mTimeLastWakeupBucketCycled;
643 if (duration > kIntervalWakeupBucket) {
644 mTimeLastWakeupBucketCycled = now;
645 for (auto &nanoapp : mNanoapps) {
646 nanoapp->cycleWakeupBuckets(now);
647 }
648 }
649 }
650
logDanglingResources(const char * name,uint32_t count)651 void EventLoop::logDanglingResources(const char *name, uint32_t count) {
652 if (count > 0) {
653 LOGE("App 0x%016" PRIx64 " had %" PRIu32 " remaining %s at unload",
654 mCurrentApp->getAppId(), count, name);
655 }
656 }
657
658 } // namespace chre
659