• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "chre/core/event_loop.h"
18 
19 #include "chre/core/event.h"
20 #include "chre/core/event_loop_manager.h"
21 #include "chre/core/nanoapp.h"
22 #include "chre/platform/context.h"
23 #include "chre/platform/fatal_error.h"
24 #include "chre/platform/log.h"
25 #include "chre/platform/system_time.h"
26 #include "chre/util/conditional_lock_guard.h"
27 #include "chre/util/lock_guard.h"
28 #include "chre/util/system/debug_dump.h"
29 #include "chre/util/time.h"
30 #include "chre_api/chre/version.h"
31 
32 namespace chre {
33 
34 // Out of line declaration required for nonintegral static types
35 constexpr Nanoseconds EventLoop::kIntervalWakeupBucket;
36 
37 namespace {
38 
39 /**
40  * Populates a chreNanoappInfo structure using info from the given Nanoapp
41  * instance.
42  *
43  * @param app A potentially null pointer to the Nanoapp to read from
44  * @param info The structure to populate - should not be null, but this function
45  *        will handle that input
46  *
47  * @return true if neither app nor info were null, and info was populated
48  */
populateNanoappInfo(const Nanoapp * app,struct chreNanoappInfo * info)49 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
50   bool success = false;
51 
52   if (app != nullptr && info != nullptr) {
53     info->appId = app->getAppId();
54     info->version = app->getAppVersion();
55     info->instanceId = app->getInstanceId();
56     success = true;
57   }
58 
59   return success;
60 }
61 
62 }  // anonymous namespace
63 
findNanoappInstanceIdByAppId(uint64_t appId,uint32_t * instanceId) const64 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
65                                              uint32_t *instanceId) const {
66   CHRE_ASSERT(instanceId != nullptr);
67   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
68 
69   bool found = false;
70   for (const UniquePtr<Nanoapp> &app : mNanoapps) {
71     if (app->getAppId() == appId) {
72       *instanceId = app->getInstanceId();
73       found = true;
74       break;
75     }
76   }
77 
78   return found;
79 }
80 
forEachNanoapp(NanoappCallbackFunction * callback,void * data)81 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
82   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
83 
84   for (const UniquePtr<Nanoapp> &nanoapp : mNanoapps) {
85     callback(nanoapp.get(), data);
86   }
87 }
88 
invokeMessageFreeFunction(uint64_t appId,chreMessageFreeFunction * freeFunction,void * message,size_t messageSize)89 void EventLoop::invokeMessageFreeFunction(uint64_t appId,
90                                           chreMessageFreeFunction *freeFunction,
91                                           void *message, size_t messageSize) {
92   Nanoapp *nanoapp = lookupAppByAppId(appId);
93   if (nanoapp == nullptr) {
94     LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
95   } else {
96     auto prevCurrentApp = mCurrentApp;
97     mCurrentApp = nanoapp;
98     freeFunction(message, messageSize);
99     mCurrentApp = prevCurrentApp;
100   }
101 }
102 
run()103 void EventLoop::run() {
104   LOGI("EventLoop start");
105 
106   bool havePendingEvents = false;
107   while (mRunning) {
108     // Events are delivered in two stages: first they arrive in the inbound
109     // event queue mEvents (potentially posted from another thread), then within
110     // this context these events are distributed to smaller event queues
111     // associated with each Nanoapp that should receive the event. Once the
112     // event is delivered to all interested Nanoapps, its free callback is
113     // invoked.
114     if (!havePendingEvents || !mEvents.empty()) {
115       if (mEvents.size() > mMaxEventPoolUsage) {
116         mMaxEventPoolUsage = mEvents.size();
117       }
118 
119       // mEvents.pop() will be a blocking call if mEvents.empty()
120       distributeEvent(mEvents.pop());
121     }
122 
123     havePendingEvents = deliverEvents();
124 
125     mPowerControlManager.postEventLoopProcess(mEvents.size());
126   }
127 
128   // Deliver any events sitting in Nanoapps' own queues (we could drop them to
129   // exit faster, but this is less code and should complete quickly under normal
130   // conditions), then purge the main queue of events pending distribution. All
131   // nanoapps should be prevented from sending events or messages at this point
132   // via currentNanoappIsStopping() returning true.
133   flushNanoappEventQueues();
134   while (!mEvents.empty()) {
135     freeEvent(mEvents.pop());
136   }
137 
138   // Unload all running nanoapps
139   while (!mNanoapps.empty()) {
140     unloadNanoappAtIndex(mNanoapps.size() - 1);
141   }
142 
143   LOGI("Exiting EventLoop");
144 }
145 
startNanoapp(UniquePtr<Nanoapp> & nanoapp)146 bool EventLoop::startNanoapp(UniquePtr<Nanoapp> &nanoapp) {
147   CHRE_ASSERT(!nanoapp.isNull());
148   bool success = false;
149   auto *eventLoopManager = EventLoopManagerSingleton::get();
150   EventLoop &eventLoop = eventLoopManager->getEventLoop();
151   uint32_t existingInstanceId;
152 
153   if (nanoapp.isNull()) {
154     // no-op, invalid argument
155   } else if (nanoapp->getTargetApiVersion() <
156              CHRE_FIRST_SUPPORTED_API_VERSION) {
157     LOGE("Incompatible nanoapp (target ver 0x%" PRIx32
158          ", first supported ver 0x%" PRIx32 ")",
159          nanoapp->getTargetApiVersion(),
160          static_cast<uint32_t>(CHRE_FIRST_SUPPORTED_API_VERSION));
161   } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
162                                                     &existingInstanceId)) {
163     LOGE("App with ID 0x%016" PRIx64
164          " already exists as instance ID 0x%" PRIx32,
165          nanoapp->getAppId(), existingInstanceId);
166   } else if (!mNanoapps.prepareForPush()) {
167     LOG_OOM();
168   } else {
169     nanoapp->setInstanceId(eventLoopManager->getNextInstanceId());
170     LOGD("Instance ID %" PRIu32 " assigned to app ID 0x%016" PRIx64,
171          nanoapp->getInstanceId(), nanoapp->getAppId());
172 
173     Nanoapp *newNanoapp = nanoapp.get();
174     {
175       LockGuard<Mutex> lock(mNanoappsLock);
176       mNanoapps.push_back(std::move(nanoapp));
177       // After this point, nanoapp is null as we've transferred ownership into
178       // mNanoapps.back() - use newNanoapp to reference it
179     }
180 
181     mCurrentApp = newNanoapp;
182     success = newNanoapp->start();
183     mCurrentApp = nullptr;
184     if (!success) {
185       // TODO: to be fully safe, need to purge/flush any events and messages
186       // sent by the nanoapp here (but don't call nanoappEnd). For now, we just
187       // destroy the Nanoapp instance.
188       LOGE("Nanoapp %" PRIu32 " failed to start", newNanoapp->getInstanceId());
189 
190       // Note that this lock protects against concurrent read and modification
191       // of mNanoapps, but we are assured that no new nanoapps were added since
192       // we pushed the new nanoapp
193       LockGuard<Mutex> lock(mNanoappsLock);
194       mNanoapps.pop_back();
195     } else {
196       notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
197     }
198   }
199 
200   return success;
201 }
202 
unloadNanoapp(uint32_t instanceId,bool allowSystemNanoappUnload)203 bool EventLoop::unloadNanoapp(uint32_t instanceId,
204                               bool allowSystemNanoappUnload) {
205   bool unloaded = false;
206 
207   for (size_t i = 0; i < mNanoapps.size(); i++) {
208     if (instanceId == mNanoapps[i]->getInstanceId()) {
209       if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
210         LOGE("Refusing to unload system nanoapp");
211       } else {
212         // Make sure all messages sent by this nanoapp at least have their
213         // associated free callback processing pending in the event queue (i.e.
214         // there are no messages pending delivery to the host)
215         EventLoopManagerSingleton::get()
216             ->getHostCommsManager()
217             .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId());
218 
219         // Distribute all inbound events we have at this time - here we're
220         // interested in handling any message free callbacks generated by
221         // flushMessagesSentByNanoapp()
222         flushInboundEventQueue();
223 
224         // Mark that this nanoapp is stopping early, so it can't send events or
225         // messages during the nanoapp event queue flush
226         mStoppingNanoapp = mNanoapps[i].get();
227 
228         // Process any pending events, with the intent of ensuring that we free
229         // all events generated by this nanoapp
230         flushNanoappEventQueues();
231 
232         // Post the unload event now (so we can reference the Nanoapp instance
233         // directly), but nanoapps won't get it until after the unload completes
234         notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);
235 
236         // Finally, we are at a point where there should not be any pending
237         // events or messages sent by the app that could potentially reference
238         // the nanoapp's memory, so we are safe to unload it
239         unloadNanoappAtIndex(i);
240         mStoppingNanoapp = nullptr;
241 
242         // TODO: right now we assume that the nanoapp will clean up all of its
243         // resource allocations in its nanoappEnd callback (memory, sensor
244         // subscriptions, etc.), otherwise we're leaking resources. We should
245         // perform resource cleanup automatically here to avoid these types of
246         // potential leaks.
247 
248         LOGD("Unloaded nanoapp with instanceId %" PRIu32, instanceId);
249         unloaded = true;
250       }
251       break;
252     }
253   }
254 
255   return unloaded;
256 }
257 
postEventOrDie(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t targetInstanceId,uint16_t targetGroupMask)258 void EventLoop::postEventOrDie(uint16_t eventType, void *eventData,
259                                chreEventCompleteFunction *freeCallback,
260                                uint32_t targetInstanceId,
261                                uint16_t targetGroupMask) {
262   if (mRunning) {
263     if (!allocateAndPostEvent(eventType, eventData, freeCallback,
264                               kSystemInstanceId, targetInstanceId,
265                               targetGroupMask)) {
266       FATAL_ERROR("Failed to post critical system event 0x%" PRIx16, eventType);
267     }
268   } else if (freeCallback != nullptr) {
269     freeCallback(eventType, eventData);
270   }
271 }
272 
postSystemEvent(uint16_t eventType,void * eventData,SystemEventCallbackFunction * callback,void * extraData)273 bool EventLoop::postSystemEvent(uint16_t eventType, void *eventData,
274                                 SystemEventCallbackFunction *callback,
275                                 void *extraData) {
276   if (mRunning) {
277     Event *event =
278         mEventPool.allocate(eventType, eventData, callback, extraData);
279 
280     if (event == nullptr || !mEvents.push(event)) {
281       FATAL_ERROR("Failed to post critical system event 0x%" PRIx16, eventType);
282     }
283     return true;
284   }
285   return false;
286 }
287 
postLowPriorityEventOrFree(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId,uint16_t targetGroupMask)288 bool EventLoop::postLowPriorityEventOrFree(
289     uint16_t eventType, void *eventData,
290     chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
291     uint32_t targetInstanceId, uint16_t targetGroupMask) {
292   bool eventPosted = false;
293 
294   if (mRunning) {
295     if (mEventPool.getFreeBlockCount() > kMinReservedHighPriorityEventCount) {
296       eventPosted = allocateAndPostEvent(eventType, eventData, freeCallback,
297                                          senderInstanceId, targetInstanceId,
298                                          targetGroupMask);
299       if (!eventPosted) {
300         LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu32,
301              eventType, targetInstanceId);
302       }
303     }
304   }
305 
306   if (!eventPosted && freeCallback != nullptr) {
307     freeCallback(eventType, eventData);
308   }
309 
310   return eventPosted;
311 }
312 
stop()313 void EventLoop::stop() {
314   auto callback = [](uint16_t /*type*/, void *data, void * /*extraData*/) {
315     auto *obj = static_cast<EventLoop *>(data);
316     obj->onStopComplete();
317   };
318 
319   // Stop accepting new events and tell the main loop to finish
320   postSystemEvent(static_cast<uint16_t>(SystemCallbackType::Shutdown),
321                   /*eventData=*/this, callback, /*extraData=*/nullptr);
322 }
323 
onStopComplete()324 void EventLoop::onStopComplete() {
325   mRunning = false;
326 }
327 
findNanoappByInstanceId(uint32_t instanceId) const328 Nanoapp *EventLoop::findNanoappByInstanceId(uint32_t instanceId) const {
329   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
330   return lookupAppByInstanceId(instanceId);
331 }
332 
populateNanoappInfoForAppId(uint64_t appId,struct chreNanoappInfo * info) const333 bool EventLoop::populateNanoappInfoForAppId(
334     uint64_t appId, struct chreNanoappInfo *info) const {
335   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
336   Nanoapp *app = lookupAppByAppId(appId);
337   return populateNanoappInfo(app, info);
338 }
339 
populateNanoappInfoForInstanceId(uint32_t instanceId,struct chreNanoappInfo * info) const340 bool EventLoop::populateNanoappInfoForInstanceId(
341     uint32_t instanceId, struct chreNanoappInfo *info) const {
342   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
343   Nanoapp *app = lookupAppByInstanceId(instanceId);
344   return populateNanoappInfo(app, info);
345 }
346 
currentNanoappIsStopping() const347 bool EventLoop::currentNanoappIsStopping() const {
348   return (mCurrentApp == mStoppingNanoapp || !mRunning);
349 }
350 
logStateToBuffer(DebugDumpWrapper & debugDump) const351 void EventLoop::logStateToBuffer(DebugDumpWrapper &debugDump) const {
352   debugDump.print("\nEvent Loop:\n");
353   debugDump.print("  Max event pool usage: %zu/%zu\n", mMaxEventPoolUsage,
354                   kMaxEventCount);
355 
356   Nanoseconds timeSince =
357       SystemTime::getMonotonicTime() - mTimeLastWakeupBucketCycled;
358   uint64_t timeSinceMins =
359       timeSince.toRawNanoseconds() / kOneMinuteInNanoseconds;
360   uint64_t durationMins =
361       kIntervalWakeupBucket.toRawNanoseconds() / kOneMinuteInNanoseconds;
362   debugDump.print("  Nanoapp host wakeup tracking: cycled %" PRIu64
363                   "mins ago, bucketDuration=%" PRIu64 "mins\n",
364                   timeSinceMins, durationMins);
365 
366   debugDump.print("\nNanoapps:\n");
367   for (const UniquePtr<Nanoapp> &app : mNanoapps) {
368     app->logStateToBuffer(debugDump);
369   }
370 }
371 
allocateAndPostEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId,uint16_t targetGroupMask)372 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
373                                      chreEventCompleteFunction *freeCallback,
374                                      uint32_t senderInstanceId,
375                                      uint32_t targetInstanceId,
376                                      uint16_t targetGroupMask) {
377   bool success = false;
378 
379   Event *event =
380       mEventPool.allocate(eventType, eventData, freeCallback, senderInstanceId,
381                           targetInstanceId, targetGroupMask);
382   if (event != nullptr) {
383     success = mEvents.push(event);
384   }
385 
386   return success;
387 }
388 
deliverEvents()389 bool EventLoop::deliverEvents() {
390   bool havePendingEvents = false;
391 
392   // Do one loop of round-robin. We might want to have some kind of priority or
393   // time sharing in the future, but this should be good enough for now.
394   for (const UniquePtr<Nanoapp> &app : mNanoapps) {
395     if (app->hasPendingEvent()) {
396       havePendingEvents |= deliverNextEvent(app);
397     }
398   }
399 
400   return havePendingEvents;
401 }
402 
deliverNextEvent(const UniquePtr<Nanoapp> & app)403 bool EventLoop::deliverNextEvent(const UniquePtr<Nanoapp> &app) {
404   // TODO: cleaner way to set/clear this? RAII-style?
405   mCurrentApp = app.get();
406   Event *event = app->processNextEvent();
407   mCurrentApp = nullptr;
408 
409   if (event->isUnreferenced()) {
410     freeEvent(event);
411   }
412 
413   return app->hasPendingEvent();
414 }
415 
distributeEvent(Event * event)416 void EventLoop::distributeEvent(Event *event) {
417   for (const UniquePtr<Nanoapp> &app : mNanoapps) {
418     if ((event->targetInstanceId == chre::kBroadcastInstanceId &&
419          app->isRegisteredForBroadcastEvent(event->eventType,
420                                             event->targetAppGroupMask)) ||
421         event->targetInstanceId == app->getInstanceId()) {
422       app->postEvent(event);
423     }
424   }
425 
426   if (event->isUnreferenced()) {
427     // Log if an event unicast to a nanoapp isn't delivered, as this is could be
428     // a bug (e.g. something isn't properly keeping track of when nanoapps are
429     // unloaded), though it could just be a harmless transient issue (e.g. race
430     // condition with nanoapp unload, where we post an event to a nanoapp just
431     // after queues are flushed while it's unloading)
432     if (event->targetInstanceId != kBroadcastInstanceId &&
433         event->targetInstanceId != kSystemInstanceId) {
434       LOGW("Dropping event 0x%" PRIx16 " from instanceId %" PRIu32 "->%" PRIu32,
435            event->eventType, event->senderInstanceId, event->targetInstanceId);
436     }
437     freeEvent(event);
438   }
439 }
440 
flushInboundEventQueue()441 void EventLoop::flushInboundEventQueue() {
442   while (!mEvents.empty()) {
443     distributeEvent(mEvents.pop());
444   }
445 }
446 
flushNanoappEventQueues()447 void EventLoop::flushNanoappEventQueues() {
448   while (deliverEvents())
449     ;
450 }
451 
freeEvent(Event * event)452 void EventLoop::freeEvent(Event *event) {
453   if (event->hasFreeCallback()) {
454     // TODO: find a better way to set the context to the creator of the event
455     mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
456     event->invokeFreeCallback();
457     mCurrentApp = nullptr;
458   }
459 
460   mEventPool.deallocate(event);
461 }
462 
lookupAppByAppId(uint64_t appId) const463 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
464   for (const UniquePtr<Nanoapp> &app : mNanoapps) {
465     if (app->getAppId() == appId) {
466       return app.get();
467     }
468   }
469 
470   return nullptr;
471 }
472 
lookupAppByInstanceId(uint32_t instanceId) const473 Nanoapp *EventLoop::lookupAppByInstanceId(uint32_t instanceId) const {
474   // The system instance ID always has nullptr as its Nanoapp pointer, so can
475   // skip iterating through the nanoapp list for that case
476   if (instanceId != kSystemInstanceId) {
477     for (const UniquePtr<Nanoapp> &app : mNanoapps) {
478       if (app->getInstanceId() == instanceId) {
479         return app.get();
480       }
481     }
482   }
483 
484   return nullptr;
485 }
486 
notifyAppStatusChange(uint16_t eventType,const Nanoapp & nanoapp)487 void EventLoop::notifyAppStatusChange(uint16_t eventType,
488                                       const Nanoapp &nanoapp) {
489   auto *info = memoryAlloc<chreNanoappInfo>();
490   if (info == nullptr) {
491     LOG_OOM();
492   } else {
493     info->appId = nanoapp.getAppId();
494     info->version = nanoapp.getAppVersion();
495     info->instanceId = nanoapp.getInstanceId();
496 
497     postEventOrDie(eventType, info, freeEventDataCallback);
498   }
499 }
500 
unloadNanoappAtIndex(size_t index)501 void EventLoop::unloadNanoappAtIndex(size_t index) {
502   const UniquePtr<Nanoapp> &nanoapp = mNanoapps[index];
503 
504   // Lock here to prevent the nanoapp instance from being accessed between the
505   // time it is ended and fully erased
506   LockGuard<Mutex> lock(mNanoappsLock);
507 
508   // Let the app know it's going away
509   mCurrentApp = nanoapp.get();
510   nanoapp->end();
511   mCurrentApp = nullptr;
512 
513   // Destroy the Nanoapp instance
514   mNanoapps.erase(index);
515 }
516 
handleNanoappWakeupBuckets()517 void EventLoop::handleNanoappWakeupBuckets() {
518   Nanoseconds now = SystemTime::getMonotonicTime();
519   Nanoseconds duration = now - mTimeLastWakeupBucketCycled;
520   if (duration > kIntervalWakeupBucket) {
521     size_t numBuckets = static_cast<size_t>(
522         duration.toRawNanoseconds() / kIntervalWakeupBucket.toRawNanoseconds());
523     mTimeLastWakeupBucketCycled = now;
524     for (auto &nanoapp : mNanoapps) {
525       nanoapp->cycleWakeupBuckets(numBuckets);
526     }
527   }
528 }
529 
530 }  // namespace chre
531