1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "chre/core/event_loop.h"
18 #include <cinttypes>
19 #include <cstdint>
20
21 #include "chre/core/event.h"
22 #include "chre/core/event_loop_manager.h"
23 #include "chre/core/nanoapp.h"
24 #include "chre/platform/assert.h"
25 #include "chre/platform/context.h"
26 #include "chre/platform/fatal_error.h"
27 #include "chre/platform/log.h"
28 #include "chre/platform/system_time.h"
29 #include "chre/util/conditional_lock_guard.h"
30 #include "chre/util/lock_guard.h"
31 #include "chre/util/system/debug_dump.h"
32 #include "chre/util/system/event_callbacks.h"
33 #include "chre/util/system/stats_container.h"
34 #include "chre/util/time.h"
35 #include "chre_api/chre/version.h"
36
37 namespace chre {
38
39 // Out of line declaration required for nonintegral static types
40 constexpr Nanoseconds EventLoop::kIntervalWakeupBucket;
41
42 namespace {
43
44 #ifndef CHRE_STATIC_EVENT_LOOP
45 using DynamicMemoryPool =
46 SynchronizedExpandableMemoryPool<Event, CHRE_EVENT_PER_BLOCK,
47 CHRE_MAX_EVENT_BLOCKS>;
48 #endif
49 // TODO(b/264108686): Make this a compile time parameter.
50 // How many low priority event to remove if the event queue is full
51 // and a new event needs to be pushed.
52 constexpr size_t targetLowPriorityEventRemove = 4;
53
54 /**
55 * Populates a chreNanoappInfo structure using info from the given Nanoapp
56 * instance.
57 *
58 * @param app A potentially null pointer to the Nanoapp to read from
59 * @param info The structure to populate - should not be null, but this function
60 * will handle that input
61 *
62 * @return true if neither app nor info were null, and info was populated
63 */
populateNanoappInfo(const Nanoapp * app,struct chreNanoappInfo * info)64 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
65 bool success = false;
66
67 if (app != nullptr && info != nullptr) {
68 info->appId = app->getAppId();
69 info->version = app->getAppVersion();
70 info->instanceId = app->getInstanceId();
71 if (app->getTargetApiVersion() >= CHRE_API_VERSION_1_8) {
72 CHRE_ASSERT(app->getRpcServices().size() <= Nanoapp::kMaxRpcServices);
73 info->rpcServiceCount =
74 static_cast<uint8_t>(app->getRpcServices().size());
75 info->rpcServices = app->getRpcServices().data();
76 memset(&info->reserved, 0, sizeof(info->reserved));
77 }
78 success = true;
79 }
80
81 return success;
82 }
83
84 #ifndef CHRE_STATIC_EVENT_LOOP
85 /**
86 * @return true if a event is a low priority event.
87 */
isLowPriorityEvent(Event * event)88 bool isLowPriorityEvent(Event *event) {
89 CHRE_ASSERT_NOT_NULL(event);
90 return event->isLowPriority;
91 }
92
deallocateFromMemoryPool(Event * event,void * memoryPool)93 void deallocateFromMemoryPool(Event *event, void *memoryPool) {
94 static_cast<DynamicMemoryPool *>(memoryPool)->deallocate(event);
95 }
96 #endif
97
98 } // anonymous namespace
99
findNanoappInstanceIdByAppId(uint64_t appId,uint16_t * instanceId) const100 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
101 uint16_t *instanceId) const {
102 CHRE_ASSERT(instanceId != nullptr);
103 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
104
105 bool found = false;
106 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
107 if (app->getAppId() == appId) {
108 *instanceId = app->getInstanceId();
109 found = true;
110 break;
111 }
112 }
113
114 return found;
115 }
116
forEachNanoapp(NanoappCallbackFunction * callback,void * data)117 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
118 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
119
120 for (const UniquePtr<Nanoapp> &nanoapp : mNanoapps) {
121 callback(nanoapp.get(), data);
122 }
123 }
124
invokeMessageFreeFunction(uint64_t appId,chreMessageFreeFunction * freeFunction,void * message,size_t messageSize)125 void EventLoop::invokeMessageFreeFunction(uint64_t appId,
126 chreMessageFreeFunction *freeFunction,
127 void *message, size_t messageSize) {
128 Nanoapp *nanoapp = lookupAppByAppId(appId);
129 if (nanoapp == nullptr) {
130 LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
131 } else {
132 auto prevCurrentApp = mCurrentApp;
133 mCurrentApp = nanoapp;
134 freeFunction(message, messageSize);
135 mCurrentApp = prevCurrentApp;
136 }
137 }
138
run()139 void EventLoop::run() {
140 LOGI("EventLoop start");
141
142 while (mRunning) {
143 // Events are delivered in a single stage: they arrive in the inbound event
144 // queue mEvents (potentially posted from another thread), then within
145 // this context these events are distributed to all interested Nanoapps,
146 // with their free callback invoked after distribution.
147 mEventPoolUsage.addValue(static_cast<uint32_t>(mEvents.size()));
148
149 // mEvents.pop() will be a blocking call if mEvents.empty()
150 Event *event = mEvents.pop();
151 // Need size() + 1 since the to-be-processed event has already been removed.
152 mPowerControlManager.preEventLoopProcess(mEvents.size() + 1);
153 distributeEvent(event);
154
155 mPowerControlManager.postEventLoopProcess(mEvents.size());
156 }
157
158 // Purge the main queue of events pending distribution. All nanoapps should be
159 // prevented from sending events or messages at this point via
160 // currentNanoappIsStopping() returning true.
161 while (!mEvents.empty()) {
162 freeEvent(mEvents.pop());
163 }
164
165 // Unload all running nanoapps
166 while (!mNanoapps.empty()) {
167 unloadNanoappAtIndex(mNanoapps.size() - 1);
168 }
169
170 LOGI("Exiting EventLoop");
171 }
172
startNanoapp(UniquePtr<Nanoapp> & nanoapp)173 bool EventLoop::startNanoapp(UniquePtr<Nanoapp> &nanoapp) {
174 CHRE_ASSERT(!nanoapp.isNull());
175 bool success = false;
176 auto *eventLoopManager = EventLoopManagerSingleton::get();
177 EventLoop &eventLoop = eventLoopManager->getEventLoop();
178 uint16_t existingInstanceId;
179
180 if (nanoapp.isNull()) {
181 // no-op, invalid argument
182 } else if (nanoapp->getTargetApiVersion() <
183 CHRE_FIRST_SUPPORTED_API_VERSION) {
184 LOGE("Incompatible nanoapp (target ver 0x%" PRIx32
185 ", first supported ver 0x%" PRIx32 ")",
186 nanoapp->getTargetApiVersion(),
187 static_cast<uint32_t>(CHRE_FIRST_SUPPORTED_API_VERSION));
188 } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
189 &existingInstanceId)) {
190 LOGE("App with ID 0x%016" PRIx64 " already exists as instance ID %" PRIu16,
191 nanoapp->getAppId(), existingInstanceId);
192 } else if (!mNanoapps.prepareForPush()) {
193 LOG_OOM();
194 } else {
195 nanoapp->setInstanceId(eventLoopManager->getNextInstanceId());
196 LOGD("Instance ID %" PRIu16 " assigned to app ID 0x%016" PRIx64,
197 nanoapp->getInstanceId(), nanoapp->getAppId());
198
199 Nanoapp *newNanoapp = nanoapp.get();
200 {
201 LockGuard<Mutex> lock(mNanoappsLock);
202 mNanoapps.push_back(std::move(nanoapp));
203 // After this point, nanoapp is null as we've transferred ownership into
204 // mNanoapps.back() - use newNanoapp to reference it
205 }
206
207 mCurrentApp = newNanoapp;
208 success = newNanoapp->start();
209 mCurrentApp = nullptr;
210 if (!success) {
211 // TODO: to be fully safe, need to purge/flush any events and messages
212 // sent by the nanoapp here (but don't call nanoappEnd). For now, we just
213 // destroy the Nanoapp instance.
214 LOGE("Nanoapp %" PRIu16 " failed to start", newNanoapp->getInstanceId());
215
216 // Note that this lock protects against concurrent read and modification
217 // of mNanoapps, but we are assured that no new nanoapps were added since
218 // we pushed the new nanoapp
219 LockGuard<Mutex> lock(mNanoappsLock);
220 mNanoapps.pop_back();
221 } else {
222 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
223 }
224 }
225
226 return success;
227 }
228
unloadNanoapp(uint16_t instanceId,bool allowSystemNanoappUnload)229 bool EventLoop::unloadNanoapp(uint16_t instanceId,
230 bool allowSystemNanoappUnload) {
231 bool unloaded = false;
232
233 for (size_t i = 0; i < mNanoapps.size(); i++) {
234 if (instanceId == mNanoapps[i]->getInstanceId()) {
235 if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
236 LOGE("Refusing to unload system nanoapp");
237 } else {
238 // Make sure all messages sent by this nanoapp at least have their
239 // associated free callback processing pending in the event queue (i.e.
240 // there are no messages pending delivery to the host)
241 EventLoopManagerSingleton::get()
242 ->getHostCommsManager()
243 .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId());
244
245 // Mark that this nanoapp is stopping early, so it can't send events or
246 // messages during the nanoapp event queue flush
247 mStoppingNanoapp = mNanoapps[i].get();
248
249 // Distribute all inbound events we have at this time - here we're
250 // interested in handling any message free callbacks generated by
251 // flushInboundEventQueue()
252 flushInboundEventQueue();
253
254 // Post the unload event now (so we can reference the Nanoapp instance
255 // directly), but nanoapps won't get it until after the unload completes
256 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);
257
258 // Finally, we are at a point where there should not be any pending
259 // events or messages sent by the app that could potentially reference
260 // the nanoapp's memory, so we are safe to unload it
261 unloadNanoappAtIndex(i);
262 mStoppingNanoapp = nullptr;
263
264 LOGD("Unloaded nanoapp with instanceId %" PRIu16, instanceId);
265 unloaded = true;
266 }
267 break;
268 }
269 }
270
271 return unloaded;
272 }
273
removeLowPriorityEventsFromBack(size_t removeNum)274 bool EventLoop::removeLowPriorityEventsFromBack(size_t removeNum) {
275 #ifdef CHRE_STATIC_EVENT_LOOP
276 return false;
277 #else
278 if (removeNum == 0) {
279 return true;
280 }
281
282 size_t numRemovedEvent = mEvents.removeMatchedFromBack(
283 isLowPriorityEvent, removeNum, deallocateFromMemoryPool, &mEventPool);
284 if (numRemovedEvent == 0 || numRemovedEvent == SIZE_MAX) {
285 LOGW("Cannot remove any low priority event");
286 } else {
287 mNumDroppedLowPriEvents += numRemovedEvent;
288 }
289 return numRemovedEvent > 0;
290 #endif
291 }
292
hasNoSpaceForHighPriorityEvent()293 bool EventLoop::hasNoSpaceForHighPriorityEvent() {
294 return mEventPool.full() &&
295 !removeLowPriorityEventsFromBack(targetLowPriorityEventRemove);
296 }
297
298 // TODO(b/264108686): Refactor this function and postSystemEvent
postEventOrDie(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint16_t targetInstanceId,uint16_t targetGroupMask)299 void EventLoop::postEventOrDie(uint16_t eventType, void *eventData,
300 chreEventCompleteFunction *freeCallback,
301 uint16_t targetInstanceId,
302 uint16_t targetGroupMask) {
303 if (mRunning) {
304 if (hasNoSpaceForHighPriorityEvent() ||
305 !allocateAndPostEvent(eventType, eventData, freeCallback,
306 false /*isLowPriority*/, kSystemInstanceId,
307 targetInstanceId, targetGroupMask)) {
308 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16, eventType);
309 }
310 } else if (freeCallback != nullptr) {
311 freeCallback(eventType, eventData);
312 }
313 }
314
postSystemEvent(uint16_t eventType,void * eventData,SystemEventCallbackFunction * callback,void * extraData)315 bool EventLoop::postSystemEvent(uint16_t eventType, void *eventData,
316 SystemEventCallbackFunction *callback,
317 void *extraData) {
318 if (!mRunning) {
319 return false;
320 }
321
322 if (hasNoSpaceForHighPriorityEvent()) {
323 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16
324 ": Full of high priority "
325 "events",
326 eventType);
327 }
328
329 Event *event = mEventPool.allocate(eventType, eventData, callback, extraData);
330 if (event == nullptr || !mEvents.push(event)) {
331 FATAL_ERROR("Failed to post critical system event 0x%" PRIx16
332 ": out of memory",
333 eventType);
334 }
335
336 return true;
337 }
338
postLowPriorityEventOrFree(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint16_t senderInstanceId,uint16_t targetInstanceId,uint16_t targetGroupMask)339 bool EventLoop::postLowPriorityEventOrFree(
340 uint16_t eventType, void *eventData,
341 chreEventCompleteFunction *freeCallback, uint16_t senderInstanceId,
342 uint16_t targetInstanceId, uint16_t targetGroupMask) {
343 bool eventPosted = false;
344
345 if (mRunning) {
346 #ifdef CHRE_STATIC_EVENT_LOOP
347 if (mEventPool.getFreeBlockCount() > kMinReservedHighPriorityEventCount)
348 #else
349 if (mEventPool.getFreeSpaceCount() > kMinReservedHighPriorityEventCount)
350 #endif
351 {
352 eventPosted = allocateAndPostEvent(
353 eventType, eventData, freeCallback, true /*isLowPriority*/,
354 senderInstanceId, targetInstanceId, targetGroupMask);
355 if (!eventPosted) {
356 LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu16,
357 eventType, targetInstanceId);
358 ++mNumDroppedLowPriEvents;
359 }
360 }
361 }
362
363 if (!eventPosted && freeCallback != nullptr) {
364 freeCallback(eventType, eventData);
365 }
366
367 return eventPosted;
368 }
369
stop()370 void EventLoop::stop() {
371 auto callback = [](uint16_t /*type*/, void *data, void * /*extraData*/) {
372 auto *obj = static_cast<EventLoop *>(data);
373 obj->onStopComplete();
374 };
375
376 // Stop accepting new events and tell the main loop to finish
377 postSystemEvent(static_cast<uint16_t>(SystemCallbackType::Shutdown),
378 /*eventData=*/this, callback, /*extraData=*/nullptr);
379 }
380
onStopComplete()381 void EventLoop::onStopComplete() {
382 mRunning = false;
383 }
384
findNanoappByInstanceId(uint16_t instanceId) const385 Nanoapp *EventLoop::findNanoappByInstanceId(uint16_t instanceId) const {
386 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
387 return lookupAppByInstanceId(instanceId);
388 }
389
populateNanoappInfoForAppId(uint64_t appId,struct chreNanoappInfo * info) const390 bool EventLoop::populateNanoappInfoForAppId(
391 uint64_t appId, struct chreNanoappInfo *info) const {
392 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
393 Nanoapp *app = lookupAppByAppId(appId);
394 return populateNanoappInfo(app, info);
395 }
396
populateNanoappInfoForInstanceId(uint16_t instanceId,struct chreNanoappInfo * info) const397 bool EventLoop::populateNanoappInfoForInstanceId(
398 uint16_t instanceId, struct chreNanoappInfo *info) const {
399 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
400 Nanoapp *app = lookupAppByInstanceId(instanceId);
401 return populateNanoappInfo(app, info);
402 }
403
currentNanoappIsStopping() const404 bool EventLoop::currentNanoappIsStopping() const {
405 return (mCurrentApp == mStoppingNanoapp || !mRunning);
406 }
407
logStateToBuffer(DebugDumpWrapper & debugDump) const408 void EventLoop::logStateToBuffer(DebugDumpWrapper &debugDump) const {
409 debugDump.print("\nEvent Loop:\n");
410 debugDump.print(" Max event pool usage: %" PRIu32 "/%zu\n",
411 mEventPoolUsage.getMax(), kMaxEventCount);
412 debugDump.print(" Number of low priority events dropped: %" PRIu32 "\n",
413 mNumDroppedLowPriEvents);
414 debugDump.print(" Mean event pool usage: %" PRIu32 "/%zu\n",
415 mEventPoolUsage.getMean(), kMaxEventCount);
416
417 Nanoseconds timeSince =
418 SystemTime::getMonotonicTime() - mTimeLastWakeupBucketCycled;
419 uint64_t timeSinceMins =
420 timeSince.toRawNanoseconds() / kOneMinuteInNanoseconds;
421 uint64_t durationMins =
422 kIntervalWakeupBucket.toRawNanoseconds() / kOneMinuteInNanoseconds;
423 debugDump.print(" Nanoapp host wakeup tracking: cycled %" PRIu64
424 "mins ago, bucketDuration=%" PRIu64 "mins\n",
425 timeSinceMins, durationMins);
426
427 debugDump.print("\nNanoapps:\n");
428 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
429 app->logStateToBuffer(debugDump);
430 }
431 }
432
allocateAndPostEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,bool isLowPriority,uint16_t senderInstanceId,uint16_t targetInstanceId,uint16_t targetGroupMask)433 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
434 chreEventCompleteFunction *freeCallback,
435 bool isLowPriority,
436 uint16_t senderInstanceId,
437 uint16_t targetInstanceId,
438 uint16_t targetGroupMask) {
439 bool success = false;
440
441 Event *event =
442 mEventPool.allocate(eventType, eventData, freeCallback, isLowPriority,
443 senderInstanceId, targetInstanceId, targetGroupMask);
444 if (event != nullptr) {
445 success = mEvents.push(event);
446 }
447
448 return success;
449 }
450
deliverNextEvent(const UniquePtr<Nanoapp> & app,Event * event)451 void EventLoop::deliverNextEvent(const UniquePtr<Nanoapp> &app, Event *event) {
452 // TODO: cleaner way to set/clear this? RAII-style?
453 mCurrentApp = app.get();
454 app->processEvent(event);
455 mCurrentApp = nullptr;
456 }
457
distributeEvent(Event * event)458 void EventLoop::distributeEvent(Event *event) {
459 bool eventDelivered = false;
460 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
461 if ((event->targetInstanceId == chre::kBroadcastInstanceId &&
462 app->isRegisteredForBroadcastEvent(event)) ||
463 event->targetInstanceId == app->getInstanceId()) {
464 eventDelivered = true;
465 deliverNextEvent(app, event);
466 }
467 }
468 // Log if an event unicast to a nanoapp isn't delivered, as this is could be
469 // a bug (e.g. something isn't properly keeping track of when nanoapps are
470 // unloaded), though it could just be a harmless transient issue (e.g. race
471 // condition with nanoapp unload, where we post an event to a nanoapp just
472 // after queues are flushed while it's unloading)
473 if (!eventDelivered && event->targetInstanceId != kBroadcastInstanceId &&
474 event->targetInstanceId != kSystemInstanceId) {
475 LOGW("Dropping event 0x%" PRIx16 " from instanceId %" PRIu16 "->%" PRIu16,
476 event->eventType, event->senderInstanceId, event->targetInstanceId);
477 }
478 CHRE_ASSERT(event->isUnreferenced());
479 freeEvent(event);
480 }
481
flushInboundEventQueue()482 void EventLoop::flushInboundEventQueue() {
483 while (!mEvents.empty()) {
484 distributeEvent(mEvents.pop());
485 }
486 }
487
freeEvent(Event * event)488 void EventLoop::freeEvent(Event *event) {
489 if (event->hasFreeCallback()) {
490 // TODO: find a better way to set the context to the creator of the event
491 mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
492 event->invokeFreeCallback();
493 mCurrentApp = nullptr;
494 }
495
496 mEventPool.deallocate(event);
497 }
498
lookupAppByAppId(uint64_t appId) const499 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
500 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
501 if (app->getAppId() == appId) {
502 return app.get();
503 }
504 }
505
506 return nullptr;
507 }
508
lookupAppByInstanceId(uint16_t instanceId) const509 Nanoapp *EventLoop::lookupAppByInstanceId(uint16_t instanceId) const {
510 // The system instance ID always has nullptr as its Nanoapp pointer, so can
511 // skip iterating through the nanoapp list for that case
512 if (instanceId != kSystemInstanceId) {
513 for (const UniquePtr<Nanoapp> &app : mNanoapps) {
514 if (app->getInstanceId() == instanceId) {
515 return app.get();
516 }
517 }
518 }
519
520 return nullptr;
521 }
522
notifyAppStatusChange(uint16_t eventType,const Nanoapp & nanoapp)523 void EventLoop::notifyAppStatusChange(uint16_t eventType,
524 const Nanoapp &nanoapp) {
525 auto *info = memoryAlloc<chreNanoappInfo>();
526 if (info == nullptr) {
527 LOG_OOM();
528 } else {
529 info->appId = nanoapp.getAppId();
530 info->version = nanoapp.getAppVersion();
531 info->instanceId = nanoapp.getInstanceId();
532
533 postEventOrDie(eventType, info, freeEventDataCallback);
534 }
535 }
536
unloadNanoappAtIndex(size_t index)537 void EventLoop::unloadNanoappAtIndex(size_t index) {
538 const UniquePtr<Nanoapp> &nanoapp = mNanoapps[index];
539
540 // Lock here to prevent the nanoapp instance from being accessed between the
541 // time it is ended and fully erased
542 LockGuard<Mutex> lock(mNanoappsLock);
543
544 // Let the app know it's going away
545 mCurrentApp = nanoapp.get();
546 nanoapp->end();
547
548 // Cleanup resources.
549 #ifdef CHRE_WIFI_SUPPORT_ENABLED
550 const uint32_t numDisabledWifiSubscriptions =
551 EventLoopManagerSingleton::get()
552 ->getWifiRequestManager()
553 .disableAllSubscriptions(nanoapp.get());
554 logDanglingResources("WIFI subscriptions", numDisabledWifiSubscriptions);
555 #endif // CHRE_WIFI_SUPPORT_ENABLED
556
557 #ifdef CHRE_GNSS_SUPPORT_ENABLED
558 const uint32_t numDisabledGnssSubscriptions =
559 EventLoopManagerSingleton::get()
560 ->getGnssManager()
561 .disableAllSubscriptions(nanoapp.get());
562 logDanglingResources("GNSS subscriptions", numDisabledGnssSubscriptions);
563 #endif // CHRE_GNSS_SUPPORT_ENABLED
564
565 #ifdef CHRE_SENSORS_SUPPORT_ENABLED
566 const uint32_t numDisabledSensorSubscriptions =
567 EventLoopManagerSingleton::get()
568 ->getSensorRequestManager()
569 .disableAllSubscriptions(nanoapp.get());
570 logDanglingResources("Sensor subscriptions", numDisabledSensorSubscriptions);
571 #endif // CHRE_SENSORS_SUPPORT_ENABLED
572
573 #ifdef CHRE_AUDIO_SUPPORT_ENABLED
574 const uint32_t numDisabledAudioRequests =
575 EventLoopManagerSingleton::get()
576 ->getAudioRequestManager()
577 .disableAllAudioRequests(nanoapp.get());
578 logDanglingResources("Audio requests", numDisabledAudioRequests);
579 #endif // CHRE_AUDIO_SUPPORT_ENABLED
580
581 #ifdef CHRE_BLE_SUPPORT_ENABLED
582 const uint32_t numDisabledBleScans = EventLoopManagerSingleton::get()
583 ->getBleRequestManager()
584 .disableActiveScan(nanoapp.get());
585 logDanglingResources("BLE scan", numDisabledBleScans);
586 #endif // CHRE_BLE_SUPPORT_ENABLED
587
588 const uint32_t numCancelledTimers =
589 getTimerPool().cancelAllNanoappTimers(nanoapp.get());
590 logDanglingResources("timers", numCancelledTimers);
591
592 const uint32_t numFreedBlocks =
593 EventLoopManagerSingleton::get()->getMemoryManager().nanoappFreeAll(
594 nanoapp.get());
595 logDanglingResources("heap blocks", numFreedBlocks);
596
597 mCurrentApp = nullptr;
598
599 // Destroy the Nanoapp instance
600 mNanoapps.erase(index);
601 }
602
handleNanoappWakeupBuckets()603 void EventLoop::handleNanoappWakeupBuckets() {
604 Nanoseconds now = SystemTime::getMonotonicTime();
605 Nanoseconds duration = now - mTimeLastWakeupBucketCycled;
606 if (duration > kIntervalWakeupBucket) {
607 size_t numBuckets = static_cast<size_t>(
608 duration.toRawNanoseconds() / kIntervalWakeupBucket.toRawNanoseconds());
609 mTimeLastWakeupBucketCycled = now;
610 for (auto &nanoapp : mNanoapps) {
611 nanoapp->cycleWakeupBuckets(numBuckets);
612 }
613 }
614 }
615
logDanglingResources(const char * name,uint32_t count)616 void EventLoop::logDanglingResources(const char *name, uint32_t count) {
617 if (count > 0) {
618 LOGE("App 0x%016" PRIx64 " had %" PRIu32 " remaining %s at unload",
619 mCurrentApp->getAppId(), count, name);
620 }
621 }
622
623 } // namespace chre
624