• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "chre/core/event_loop.h"
18 
19 #include "chre/core/event.h"
20 #include "chre/core/event_loop_manager.h"
21 #include "chre/core/nanoapp.h"
22 #include "chre/platform/context.h"
23 #include "chre/platform/fatal_error.h"
24 #include "chre/platform/log.h"
25 #include "chre/util/conditional_lock_guard.h"
26 #include "chre/util/lock_guard.h"
27 #include "chre/util/system/debug_dump.h"
28 #include "chre_api/chre/version.h"
29 
30 namespace chre {
31 
32 namespace {
33 
34 /**
35  * Populates a chreNanoappInfo structure using info from the given Nanoapp
36  * instance.
37  *
38  * @param app A potentially null pointer to the Nanoapp to read from
39  * @param info The structure to populate - should not be null, but this function
40  *        will handle that input
41  *
42  * @return true if neither app nor info were null, and info was populated
43  */
populateNanoappInfo(const Nanoapp * app,struct chreNanoappInfo * info)44 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) {
45   bool success = false;
46 
47   if (app != nullptr && info != nullptr) {
48     info->appId      = app->getAppId();
49     info->version    = app->getAppVersion();
50     info->instanceId = app->getInstanceId();
51     success = true;
52   }
53 
54   return success;
55 }
56 
57 }  // anonymous namespace
58 
findNanoappInstanceIdByAppId(uint64_t appId,uint32_t * instanceId) const59 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId,
60                                              uint32_t *instanceId) const {
61   CHRE_ASSERT(instanceId != nullptr);
62   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
63 
64   bool found = false;
65   for (const UniquePtr<Nanoapp>& app : mNanoapps) {
66     if (app->getAppId() == appId) {
67       *instanceId = app->getInstanceId();
68       found = true;
69       break;
70     }
71   }
72 
73   return found;
74 }
75 
forEachNanoapp(NanoappCallbackFunction * callback,void * data)76 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) {
77   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
78 
79   for (const UniquePtr<Nanoapp>& nanoapp : mNanoapps) {
80     callback(nanoapp.get(), data);
81   }
82 }
83 
invokeMessageFreeFunction(uint64_t appId,chreMessageFreeFunction * freeFunction,void * message,size_t messageSize)84 void EventLoop::invokeMessageFreeFunction(
85     uint64_t appId, chreMessageFreeFunction *freeFunction, void *message,
86     size_t messageSize) {
87   Nanoapp *nanoapp = lookupAppByAppId(appId);
88   if (nanoapp == nullptr) {
89     LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId);
90   } else {
91     auto prevCurrentApp = mCurrentApp;
92     mCurrentApp = nanoapp;
93     freeFunction(message, messageSize);
94     mCurrentApp = prevCurrentApp;
95   }
96 }
97 
run()98 void EventLoop::run() {
99   LOGI("EventLoop start");
100 
101   bool havePendingEvents = false;
102   while (mRunning) {
103     // Events are delivered in two stages: first they arrive in the inbound
104     // event queue mEvents (potentially posted from another thread), then within
105     // this context these events are distributed to smaller event queues
106     // associated with each Nanoapp that should receive the event. Once the
107     // event is delivered to all interested Nanoapps, its free callback is
108     // invoked.
109     if (!havePendingEvents || !mEvents.empty()) {
110       if (mEvents.size() > mMaxEventPoolUsage) {
111         mMaxEventPoolUsage = mEvents.size();
112       }
113 
114       // mEvents.pop() will be a blocking call if mEvents.empty()
115       distributeEvent(mEvents.pop());
116     }
117 
118     havePendingEvents = deliverEvents();
119 
120     mPowerControlManager.postEventLoopProcess(mEvents.size());
121   }
122 
123   // Deliver any events sitting in Nanoapps' own queues (we could drop them to
124   // exit faster, but this is less code and should complete quickly under normal
125   // conditions), then purge the main queue of events pending distribution. All
126   // nanoapps should be prevented from sending events or messages at this point
127   // via currentNanoappIsStopping() returning true.
128   flushNanoappEventQueues();
129   while (!mEvents.empty()) {
130     freeEvent(mEvents.pop());
131   }
132 
133   // Unload all running nanoapps
134   while (!mNanoapps.empty()) {
135     unloadNanoappAtIndex(mNanoapps.size() - 1);
136   }
137 
138   LOGI("Exiting EventLoop");
139 }
140 
startNanoapp(UniquePtr<Nanoapp> & nanoapp)141 bool EventLoop::startNanoapp(UniquePtr<Nanoapp>& nanoapp) {
142   CHRE_ASSERT(!nanoapp.isNull());
143   bool success = false;
144   auto *eventLoopManager = EventLoopManagerSingleton::get();
145   EventLoop& eventLoop = eventLoopManager->getEventLoop();
146   uint32_t existingInstanceId;
147 
148   if (nanoapp.isNull()) {
149     // no-op, invalid argument
150   } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(),
151                                                     &existingInstanceId)) {
152     LOGE("App with ID 0x%016" PRIx64 " already exists as instance ID 0x%"
153          PRIx32, nanoapp->getAppId(), existingInstanceId);
154   } else if (!mNanoapps.prepareForPush()) {
155     LOGE("Failed to allocate space for new nanoapp");
156   } else {
157     nanoapp->setInstanceId(eventLoopManager->getNextInstanceId());
158     LOGD("Instance ID %" PRIu32 " assigned to app ID 0x%016" PRIx64,
159          nanoapp->getInstanceId(), nanoapp->getAppId());
160 
161     Nanoapp *newNanoapp = nanoapp.get();
162     {
163       LockGuard<Mutex> lock(mNanoappsLock);
164       mNanoapps.push_back(std::move(nanoapp));
165       // After this point, nanoapp is null as we've transferred ownership into
166       // mNanoapps.back() - use newNanoapp to reference it
167     }
168 
169     mCurrentApp = newNanoapp;
170     success = newNanoapp->start();
171     mCurrentApp = nullptr;
172     if (!success) {
173       // TODO: to be fully safe, need to purge/flush any events and messages
174       // sent by the nanoapp here (but don't call nanoappEnd). For now, we just
175       // destroy the Nanoapp instance.
176       LOGE("Nanoapp %" PRIu32 " failed to start", newNanoapp->getInstanceId());
177 
178       // Note that this lock protects against concurrent read and modification
179       // of mNanoapps, but we are assured that no new nanoapps were added since
180       // we pushed the new nanoapp
181       LockGuard<Mutex> lock(mNanoappsLock);
182       mNanoapps.pop_back();
183     } else {
184       notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp);
185     }
186   }
187 
188   return success;
189 }
190 
unloadNanoapp(uint32_t instanceId,bool allowSystemNanoappUnload)191 bool EventLoop::unloadNanoapp(uint32_t instanceId,
192                               bool allowSystemNanoappUnload) {
193   bool unloaded = false;
194 
195   for (size_t i = 0; i < mNanoapps.size(); i++) {
196     if (instanceId == mNanoapps[i]->getInstanceId()) {
197       if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) {
198         LOGE("Refusing to unload system nanoapp");
199       } else {
200         // Make sure all messages sent by this nanoapp at least have their
201         // associated free callback processing pending in the event queue (i.e.
202         // there are no messages pending delivery to the host)
203         EventLoopManagerSingleton::get()->getHostCommsManager()
204             .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId());
205 
206         // Distribute all inbound events we have at this time - here we're
207         // interested in handling any message free callbacks generated by
208         // flushMessagesSentByNanoapp()
209         flushInboundEventQueue();
210 
211         // Mark that this nanoapp is stopping early, so it can't send events or
212         // messages during the nanoapp event queue flush
213         mStoppingNanoapp = mNanoapps[i].get();
214 
215         // Process any pending events, with the intent of ensuring that we free
216         // all events generated by this nanoapp
217         flushNanoappEventQueues();
218 
219         // Post the unload event now (so we can reference the Nanoapp instance
220         // directly), but nanoapps won't get it until after the unload completes
221         notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp);
222 
223         // Finally, we are at a point where there should not be any pending
224         // events or messages sent by the app that could potentially reference
225         // the nanoapp's memory, so we are safe to unload it
226         unloadNanoappAtIndex(i);
227         mStoppingNanoapp = nullptr;
228 
229         // TODO: right now we assume that the nanoapp will clean up all of its
230         // resource allocations in its nanoappEnd callback (memory, sensor
231         // subscriptions, etc.), otherwise we're leaking resources. We should
232         // perform resource cleanup automatically here to avoid these types of
233         // potential leaks.
234 
235         LOGD("Unloaded nanoapp with instanceId %" PRIu32, instanceId);
236         unloaded = true;
237       }
238       break;
239     }
240   }
241 
242   return unloaded;
243 }
244 
postEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId)245 bool EventLoop::postEvent(uint16_t eventType, void *eventData,
246     chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
247     uint32_t targetInstanceId) {
248   bool success = false;
249 
250   if (mRunning && (senderInstanceId == kSystemInstanceId ||
251       mEventPool.getFreeBlockCount() > kMinReservedSystemEventCount)) {
252     success = allocateAndPostEvent(eventType, eventData, freeCallback,
253                                    senderInstanceId,targetInstanceId);
254     if (!success) {
255       // This can only happen if the event is a system event type. This
256       // postEvent method will fail if a non-system event is posted when the
257       // memory pool is close to full.
258       FATAL_ERROR("Failed to allocate system event type %" PRIu16, eventType);
259     }
260   }
261 
262   return success;
263 }
264 
postEventOrFree(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId)265 bool EventLoop::postEventOrFree(uint16_t eventType, void *eventData,
266     chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
267     uint32_t targetInstanceId) {
268   bool success = false;
269 
270   if (mRunning) {
271     success = allocateAndPostEvent(eventType, eventData, freeCallback,
272                                    senderInstanceId,targetInstanceId);
273     if (!success) {
274       freeCallback(eventType, eventData);
275       LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu32,
276            eventType, targetInstanceId);
277     }
278   }
279 
280   return success;
281 }
282 
stop()283 void EventLoop::stop() {
284   postEvent(0, nullptr, nullptr, kSystemInstanceId, kSystemInstanceId);
285   // Stop accepting new events and tell the main loop to finish
286   mRunning = false;
287 }
288 
findNanoappByInstanceId(uint32_t instanceId) const289 Nanoapp *EventLoop::findNanoappByInstanceId(uint32_t instanceId) const {
290   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
291   return lookupAppByInstanceId(instanceId);
292 }
293 
populateNanoappInfoForAppId(uint64_t appId,struct chreNanoappInfo * info) const294 bool EventLoop::populateNanoappInfoForAppId(
295     uint64_t appId, struct chreNanoappInfo *info) const {
296   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
297   Nanoapp *app = lookupAppByAppId(appId);
298   return populateNanoappInfo(app, info);
299 }
300 
populateNanoappInfoForInstanceId(uint32_t instanceId,struct chreNanoappInfo * info) const301 bool EventLoop::populateNanoappInfoForInstanceId(
302     uint32_t instanceId, struct chreNanoappInfo *info) const {
303   ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread());
304   Nanoapp *app = lookupAppByInstanceId(instanceId);
305   return populateNanoappInfo(app, info);
306 }
307 
currentNanoappIsStopping() const308 bool EventLoop::currentNanoappIsStopping() const {
309   return (mCurrentApp == mStoppingNanoapp || !mRunning);
310 }
311 
logStateToBuffer(char * buffer,size_t * bufferPos,size_t bufferSize) const312 bool EventLoop::logStateToBuffer(char *buffer, size_t *bufferPos,
313                                  size_t bufferSize) const {
314   bool success = debugDumpPrint(buffer, bufferPos, bufferSize, "\nNanoapps:\n");
315   for (const UniquePtr<Nanoapp>& app : mNanoapps) {
316     success &= app->logStateToBuffer(buffer, bufferPos, bufferSize);
317   }
318 
319   success &= debugDumpPrint(buffer, bufferPos, bufferSize,
320                             "\nEvent Loop:\n");
321   success &= debugDumpPrint(buffer, bufferPos, bufferSize,
322                             "  Max event pool usage: %zu/%zu\n",
323                             mMaxEventPoolUsage, kMaxEventCount);
324   return success;
325 }
326 
allocateAndPostEvent(uint16_t eventType,void * eventData,chreEventCompleteFunction * freeCallback,uint32_t senderInstanceId,uint32_t targetInstanceId)327 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData,
328     chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId,
329     uint32_t targetInstanceId) {
330   bool success = false;
331 
332   Event *event = mEventPool.allocate(eventType, eventData, freeCallback,
333                                      senderInstanceId, targetInstanceId);
334   if (event != nullptr) {
335     success = mEvents.push(event);
336   }
337   return success;
338 }
339 
deliverEvents()340 bool EventLoop::deliverEvents() {
341   bool havePendingEvents = false;
342 
343   // Do one loop of round-robin. We might want to have some kind of priority or
344   // time sharing in the future, but this should be good enough for now.
345   for (const UniquePtr<Nanoapp>& app : mNanoapps) {
346     if (app->hasPendingEvent()) {
347       havePendingEvents |= deliverNextEvent(app);
348     }
349   }
350 
351   return havePendingEvents;
352 }
353 
deliverNextEvent(const UniquePtr<Nanoapp> & app)354 bool EventLoop::deliverNextEvent(const UniquePtr<Nanoapp>& app) {
355   // TODO: cleaner way to set/clear this? RAII-style?
356   mCurrentApp = app.get();
357   Event *event = app->processNextEvent();
358   mCurrentApp = nullptr;
359 
360   if (event->isUnreferenced()) {
361     freeEvent(event);
362   }
363 
364   return app->hasPendingEvent();
365 }
366 
distributeEvent(Event * event)367 void EventLoop::distributeEvent(Event *event) {
368   for (const UniquePtr<Nanoapp>& app : mNanoapps) {
369     if ((event->targetInstanceId == chre::kBroadcastInstanceId
370             && app->isRegisteredForBroadcastEvent(event->eventType))
371         || event->targetInstanceId == app->getInstanceId()) {
372       app->postEvent(event);
373     }
374   }
375 
376   if (event->isUnreferenced()) {
377     // Events sent to the system instance ID are processed via the free callback
378     // and are not expected to be delivered to any nanoapp, so no need to log a
379     // warning in that case
380     if (event->senderInstanceId != kSystemInstanceId) {
381       LOGW("Dropping event 0x%" PRIx16, event->eventType);
382     }
383     freeEvent(event);
384   }
385 }
386 
flushInboundEventQueue()387 void EventLoop::flushInboundEventQueue() {
388   while (!mEvents.empty()) {
389     distributeEvent(mEvents.pop());
390   }
391 }
392 
flushNanoappEventQueues()393 void EventLoop::flushNanoappEventQueues() {
394   while (deliverEvents());
395 }
396 
freeEvent(Event * event)397 void EventLoop::freeEvent(Event *event) {
398   if (event->freeCallback != nullptr) {
399     // TODO: find a better way to set the context to the creator of the event
400     mCurrentApp = lookupAppByInstanceId(event->senderInstanceId);
401     event->freeCallback(event->eventType, event->eventData);
402     mCurrentApp = nullptr;
403   }
404 
405   mEventPool.deallocate(event);
406 }
407 
lookupAppByAppId(uint64_t appId) const408 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const {
409   for (const UniquePtr<Nanoapp>& app : mNanoapps) {
410     if (app->getAppId() == appId) {
411       return app.get();
412     }
413   }
414 
415   return nullptr;
416 }
417 
lookupAppByInstanceId(uint32_t instanceId) const418 Nanoapp *EventLoop::lookupAppByInstanceId(uint32_t instanceId) const {
419   // The system instance ID always has nullptr as its Nanoapp pointer, so can
420   // skip iterating through the nanoapp list for that case
421   if (instanceId != kSystemInstanceId) {
422     for (const UniquePtr<Nanoapp>& app : mNanoapps) {
423       if (app->getInstanceId() == instanceId) {
424         return app.get();
425       }
426     }
427   }
428 
429   return nullptr;
430 }
431 
notifyAppStatusChange(uint16_t eventType,const Nanoapp & nanoapp)432 void EventLoop::notifyAppStatusChange(uint16_t eventType,
433                                       const Nanoapp& nanoapp) {
434   auto *info = memoryAlloc<chreNanoappInfo>();
435   if (info == nullptr) {
436     LOGE("Couldn't alloc app status change event");
437   } else {
438     info->appId      = nanoapp.getAppId();
439     info->version    = nanoapp.getAppVersion();
440     info->instanceId = nanoapp.getInstanceId();
441 
442     postEvent(eventType, info, freeEventDataCallback);
443   }
444 }
445 
unloadNanoappAtIndex(size_t index)446 void EventLoop::unloadNanoappAtIndex(size_t index) {
447   const UniquePtr<Nanoapp>& nanoapp = mNanoapps[index];
448 
449   // Let the app know it's going away
450   mCurrentApp = nanoapp.get();
451   nanoapp->end();
452   mCurrentApp = nullptr;
453 
454   // Destroy the Nanoapp instance
455   {
456     LockGuard<Mutex> lock(mNanoappsLock);
457     mNanoapps.erase(index);
458   }
459 }
460 
461 }  // namespace chre
462