1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <plat/inc/eeData.h>
18 #include <plat/inc/plat.h>
19 #include <plat/inc/bl.h>
20 #include <plat/inc/wdt.h>
21 #include <platform.h>
22 #include <hostIntf.h>
23 #include <inttypes.h>
24 #include <syscall.h>
25 #include <sensors.h>
26 #include <string.h>
27 #include <stdlib.h>
28 #include <stdarg.h>
29 #include <printf.h>
30 #include <eventQ.h>
31 #include <apInt.h>
32 #include <timer.h>
33 #include <osApi.h>
34 #include <seos.h>
35 #include <heap.h>
36 #include <slab.h>
37 #include <cpu.h>
38 #include <util.h>
39 #include <mpu.h>
40 #include <nanohubPacket.h>
41 #include <atomic.h>
42
43 #include <nanohub/nanohub.h>
44 #include <nanohub/crc.h>
45
46 #define NO_NODE (TaskIndex)(-1)
47 #define for_each_task(listHead, task) for (task = osTaskByIdx((listHead)->next); task; task = osTaskByIdx(task->list.next))
48 #define MAKE_NEW_TID(task) task->tid = ((task->tid + TASK_TID_INCREMENT) & TASK_TID_COUNTER_MASK) | \
49 (osTaskIndex(task) & TASK_TID_IDX_MASK);
50 #define TID_TO_TASK_IDX(tid) (tid & TASK_TID_IDX_MASK)
51
52 #define FL_TASK_STOPPED 1
53
54 #define EVT_SUBSCRIBE_TO_EVT 0x00000000
55 #define EVT_UNSUBSCRIBE_TO_EVT 0x00000001
56 #define EVT_DEFERRED_CALLBACK 0x00000002
57 #define EVT_PRIVATE_EVT 0x00000003
58
59 #define EVENT_WITH_ORIGIN(evt, origin) (((evt) & EVT_MASK) | ((origin) << (32 - TASK_TID_BITS)))
60 #define EVENT_GET_ORIGIN(evt) ((evt) >> (32 - TASK_TID_BITS))
61 #define EVENT_GET_EVENT(evt) ((evt) & (EVT_MASK & ~EVENT_TYPE_BIT_DISCARDABLE))
62
63 /*
64 * Since locking is difficult to do right for adding/removing listeners and such
65 * since it can happen in interrupt context and not, and one such operation can
66 * interrupt another, and we do have a working event queue, we enqueue all the
67 * requests and then deal with them in the main code only when the event bubbles
68 * up to the front of the queue. This allows us to not need locks around the
69 * data structures.
70 */
71
72 SET_PACKED_STRUCT_MODE_ON
73 struct TaskList {
74 TaskIndex prev;
75 TaskIndex next;
76 } ATTRIBUTE_PACKED;
77 SET_PACKED_STRUCT_MODE_OFF
78
79 struct Task {
80 /* App entry points */
81 const struct AppHdr *app;
82
83 /* per-platform app info */
84 struct PlatAppInfo platInfo;
85
86 /* for some basic number of subbed events, the array is stored directly here. after that, a heap chunk is used */
87 uint32_t subbedEventsInt[MAX_EMBEDDED_EVT_SUBS];
88 uint32_t *subbedEvents; /* NULL for invalid tasks */
89
90 struct TaskList list;
91
92 /* task pointer will not change throughout task lifetime,
93 * however same task pointer may be reused for a new task; to eliminate the ambiguity,
94 * TID is maintained for each task such that new tasks will be guaranteed to receive different TID */
95 uint16_t tid;
96
97 uint8_t subbedEvtCount;
98 uint8_t subbedEvtListSz;
99 uint8_t flags;
100 uint8_t ioCount;
101
102 };
103
104 struct TaskPool {
105 struct Task data[MAX_TASKS];
106 };
107
108 union InternalThing {
109 struct {
110 uint32_t tid;
111 uint32_t evt;
112 } evtSub;
113 struct {
114 OsDeferCbkF callback;
115 void *cookie;
116 } deferred;
117 struct {
118 uint32_t evtType;
119 void *evtData;
120 TaggedPtr evtFreeInfo;
121 uint32_t toTid;
122 } privateEvt;
123 union OsApiSlabItem osApiItem;
124 };
125
126 static struct TaskPool mTaskPool;
127 static struct EvtQueue *mEvtsInternal;
128 static struct SlabAllocator* mMiscInternalThingsSlab;
129 static struct TaskList mFreeTasks;
130 static struct TaskList mTasks;
131 static struct Task *mCurrentTask;
132 static struct Task *mSystemTask;
133 static TaggedPtr *mCurEvtEventFreeingInfo = NULL; //used as flag for retaining. NULL when none or already retained
134
list_init(struct TaskList * l)135 static inline void list_init(struct TaskList *l)
136 {
137 l->prev = l->next = NO_NODE;
138 }
139
osGetCurrentTask()140 static inline struct Task *osGetCurrentTask()
141 {
142 return mCurrentTask;
143 }
144
osSetCurrentTask(struct Task * task)145 static struct Task *osSetCurrentTask(struct Task *task)
146 {
147 struct Task *old = mCurrentTask;
148 while (true) {
149 old = mCurrentTask;
150 if (atomicCmpXchgPtr((uintptr_t*)&mCurrentTask, (uintptr_t)old, (uintptr_t)task)) {
151 break;
152 }
153 }
154 return old;
155 }
156
157 // beyond this point, noone shall access mCurrentTask directly
158
osTaskTestFlags(struct Task * task,uint32_t mask)159 static inline bool osTaskTestFlags(struct Task *task, uint32_t mask)
160 {
161 return (atomicReadByte(&task->flags) & mask) != 0;
162 }
163
osTaskClrSetFlags(struct Task * task,uint32_t clrMask,uint32_t setMask)164 static inline uint32_t osTaskClrSetFlags(struct Task *task, uint32_t clrMask, uint32_t setMask)
165 {
166 while (true) {
167 uint8_t flags = atomicReadByte(&task->flags);
168 uint8_t newFlags = (flags & ~clrMask) | setMask;
169 if (atomicCmpXchgByte(&task->flags, flags, newFlags))
170 return newFlags;
171 }
172 }
173
osTaskAddIoCount(struct Task * task,int32_t delta)174 static inline uint32_t osTaskAddIoCount(struct Task *task, int32_t delta)
175 {
176 uint8_t count = atomicAddByte(&task->ioCount, delta);
177
178 count += delta; // old value is returned, so we add it again
179
180 return count;
181 }
182
osTaskGetIoCount(struct Task * task)183 static inline uint32_t osTaskGetIoCount(struct Task *task)
184 {
185 return atomicReadByte(&task->ioCount);
186 }
187
osTaskIndex(struct Task * task)188 static inline uint8_t osTaskIndex(struct Task *task)
189 {
190 // we don't need signed diff here: this way we simplify boundary check
191 size_t idx = task - &mTaskPool.data[0];
192 return idx >= MAX_TASKS || &mTaskPool.data[idx] != task ? NO_NODE : idx;
193 }
194
osTaskByIdx(size_t idx)195 static inline struct Task *osTaskByIdx(size_t idx)
196 {
197 return idx >= MAX_TASKS ? NULL : &mTaskPool.data[idx];
198 }
199
osGetCurrentTid()200 uint32_t osGetCurrentTid()
201 {
202 struct Task *task = osGetCurrentTask();
203 if (task == NULL) {
204 return UINT32_MAX;
205 }
206 return task->tid;
207 }
208
osSetCurrentTid(uint32_t tid)209 uint32_t osSetCurrentTid(uint32_t tid)
210 {
211 struct Task *task = osTaskByIdx(TID_TO_TASK_IDX(tid));
212
213 if (task && task->tid == tid) {
214 struct Task *preempted = osSetCurrentTask(task);
215 return preempted->tid;
216 }
217
218 return osGetCurrentTid();
219 }
220
osTaskListPeekHead(struct TaskList * listHead)221 static inline struct Task *osTaskListPeekHead(struct TaskList *listHead)
222 {
223 TaskIndex idx = listHead->next;
224 return idx == NO_NODE ? NULL : &mTaskPool.data[idx];
225 }
226
227 #ifdef DEBUG
dumpListItems(const char * p,struct TaskList * listHead)228 static void dumpListItems(const char *p, struct TaskList *listHead)
229 {
230 int i = 0;
231 struct Task *task;
232
233 osLog(LOG_ERROR, "List: %s (%p) [%u;%u]\n",
234 p,
235 listHead,
236 listHead ? listHead->prev : NO_NODE,
237 listHead ? listHead->next : NO_NODE
238 );
239 if (!listHead)
240 return;
241
242 for_each_task(listHead, task) {
243 osLog(LOG_ERROR, " item %d: task=%p TID=%04X [%u;%u;%u]\n",
244 i,
245 task,
246 task->tid,
247 task->list.prev,
248 osTaskIndex(task),
249 task->list.next
250 );
251 ++i;
252 }
253 }
254
dumpTaskList(const char * f,struct Task * task,struct TaskList * listHead)255 static void dumpTaskList(const char *f, struct Task *task, struct TaskList *listHead)
256 {
257 osLog(LOG_ERROR, "%s: pool: %p; task=%p [%u;%u;%u]; listHead=%p [%u;%u]\n",
258 f,
259 &mTaskPool,
260 task,
261 task ? task->list.prev : NO_NODE,
262 osTaskIndex(task),
263 task ? task->list.next : NO_NODE,
264 listHead,
265 listHead ? listHead->prev : NO_NODE,
266 listHead ? listHead->next : NO_NODE
267 );
268 dumpListItems("Tasks", &mTasks);
269 dumpListItems("Free Tasks", &mFreeTasks);
270 }
271 #else
272 #define dumpTaskList(a,b,c)
273 #endif
274
osTaskListRemoveTask(struct TaskList * listHead,struct Task * task)275 static inline void osTaskListRemoveTask(struct TaskList *listHead, struct Task *task)
276 {
277 if (task && listHead) {
278 struct TaskList *cur = &task->list;
279 TaskIndex left_idx = cur->prev;
280 TaskIndex right_idx = cur->next;
281 struct TaskList *left = left_idx == NO_NODE ? listHead : &mTaskPool.data[left_idx].list;
282 struct TaskList *right = right_idx == NO_NODE ? listHead : &mTaskPool.data[right_idx].list;
283 cur->prev = cur->next = NO_NODE;
284 left->next = right_idx;
285 right->prev = left_idx;
286 } else {
287 dumpTaskList(__func__, task, listHead);
288 }
289 }
290
osTaskListAddTail(struct TaskList * listHead,struct Task * task)291 static inline void osTaskListAddTail(struct TaskList *listHead, struct Task *task)
292 {
293 if (task && listHead) {
294 struct TaskList *cur = &task->list;
295 TaskIndex last_idx = listHead->prev;
296 TaskIndex new_idx = osTaskIndex(task);
297 struct TaskList *last = last_idx == NO_NODE ? listHead : &mTaskPool.data[last_idx].list;
298 cur->prev = last_idx;
299 cur->next = NO_NODE;
300 last->next = new_idx;
301 listHead->prev = new_idx;
302 } else {
303 dumpTaskList(__func__, task, listHead);
304 }
305 }
306
osAllocTask()307 static struct Task *osAllocTask()
308 {
309 struct Task *task = osTaskListPeekHead(&mFreeTasks);
310
311 if (task) {
312 osTaskListRemoveTask(&mFreeTasks, task);
313 uint16_t tid = task->tid;
314 memset(task, 0, sizeof(*task));
315 task->tid = tid;
316 }
317
318 return task;
319 }
320
osFreeTask(struct Task * task)321 static void osFreeTask(struct Task *task)
322 {
323 if (task) {
324 task->flags = 0;
325 task->ioCount = 0;
326 osTaskListAddTail(&mFreeTasks, task);
327 }
328 }
329
osRemoveTask(struct Task * task)330 static void osRemoveTask(struct Task *task)
331 {
332 osTaskListRemoveTask(&mTasks, task);
333 }
334
osAddTask(struct Task * task)335 static void osAddTask(struct Task *task)
336 {
337 osTaskListAddTail(&mTasks, task);
338 }
339
osTaskFindByTid(uint32_t tid)340 static inline struct Task* osTaskFindByTid(uint32_t tid)
341 {
342 TaskIndex idx = TID_TO_TASK_IDX(tid);
343
344 return idx < MAX_TASKS ? &mTaskPool.data[idx] : NULL;
345 }
346
osTaskInit(struct Task * task)347 static inline bool osTaskInit(struct Task *task)
348 {
349 struct Task *preempted = osSetCurrentTask(task);
350 bool done = cpuAppInit(task->app, &task->platInfo, task->tid);
351 osSetCurrentTask(preempted);
352 return done;
353 }
354
osTaskEnd(struct Task * task)355 static inline void osTaskEnd(struct Task *task)
356 {
357 struct Task *preempted = osSetCurrentTask(task);
358 uint16_t tid = task->tid;
359
360 cpuAppEnd(task->app, &task->platInfo);
361
362 // task was supposed to release it's resources,
363 // but we do our cleanup anyway
364 osSetCurrentTask(mSystemTask);
365 platFreeResources(tid); // HW resources cleanup (IRQ, DMA etc)
366 sensorUnregisterAll(tid);
367 timTimerCancelAll(tid);
368 heapFreeAll(tid);
369 // NOTE: we don't need to unsubscribe from events
370 osSetCurrentTask(preempted);
371 }
372
osTaskHandle(struct Task * task,uint32_t evtType,const void * evtData)373 static inline void osTaskHandle(struct Task *task, uint32_t evtType, const void* evtData)
374 {
375 struct Task *preempted = osSetCurrentTask(task);
376 cpuAppHandle(task->app, &task->platInfo, evtType, evtData);
377 osSetCurrentTask(preempted);
378 }
379
handleEventFreeing(uint32_t evtType,void * evtData,TaggedPtr evtFreeData)380 static void handleEventFreeing(uint32_t evtType, void *evtData, TaggedPtr evtFreeData) // watch out, this is synchronous
381 {
382 if ((taggedPtrIsPtr(evtFreeData) && !taggedPtrToPtr(evtFreeData)) ||
383 (taggedPtrIsUint(evtFreeData) && !taggedPtrToUint(evtFreeData)))
384 return;
385
386 if (taggedPtrIsPtr(evtFreeData))
387 ((EventFreeF)taggedPtrToPtr(evtFreeData))(evtData);
388 else {
389 struct AppEventFreeData fd = {.evtType = evtType, .evtData = evtData};
390 struct Task* task = osTaskFindByTid(taggedPtrToUint(evtFreeData));
391
392 if (!task)
393 osLog(LOG_ERROR, "EINCEPTION: Failed to find app to call app to free event sent to app(s).\n");
394 else
395 osTaskHandle(task, EVT_APP_FREE_EVT_DATA, &fd);
396 }
397 }
398
osInit(void)399 static void osInit(void)
400 {
401 heapInit();
402 platInitialize();
403
404 osLog(LOG_INFO, "SEOS Initializing\n");
405 cpuInitLate();
406
407 /* create the queues */
408 if (!(mEvtsInternal = evtQueueAlloc(512, handleEventFreeing))) {
409 osLog(LOG_INFO, "events failed to init\n");
410 return;
411 }
412
413 mMiscInternalThingsSlab = slabAllocatorNew(sizeof(union InternalThing), alignof(union InternalThing), 64 /* for now? */);
414 if (!mMiscInternalThingsSlab) {
415 osLog(LOG_INFO, "deferred actions list failed to init\n");
416 return;
417 }
418 }
419
osTaskFindByAppID(uint64_t appID)420 static struct Task* osTaskFindByAppID(uint64_t appID)
421 {
422 struct Task *task;
423
424 for_each_task(&mTasks, task) {
425 if (task->app && task->app->hdr.appId == appID)
426 return task;
427 }
428
429 return NULL;
430 }
431
osSegmentIteratorInit(struct SegmentIterator * it)432 void osSegmentIteratorInit(struct SegmentIterator *it)
433 {
434 uint32_t sz;
435 uint8_t *start = platGetSharedAreaInfo(&sz);
436
437 it->shared = (const struct Segment *)(start);
438 it->sharedEnd = (const struct Segment *)(start + sz);
439 it->seg = NULL;
440 }
441
osAppSegmentSetState(const struct AppHdr * app,uint32_t segState)442 bool osAppSegmentSetState(const struct AppHdr *app, uint32_t segState)
443 {
444 bool done;
445 struct Segment *seg = osGetSegment(app);
446 uint8_t state = segState;
447
448 if (!seg)
449 return false;
450
451 mpuAllowRamExecution(true);
452 mpuAllowRomWrite(true);
453 done = BL.blProgramShared(&seg->state, &state, sizeof(state), BL_FLASH_KEY1, BL_FLASH_KEY2);
454 mpuAllowRomWrite(false);
455 mpuAllowRamExecution(false);
456
457 return done;
458 }
459
osSegmentSetSize(struct Segment * seg,uint32_t size)460 bool osSegmentSetSize(struct Segment *seg, uint32_t size)
461 {
462 bool ret = true;
463
464 if (!seg)
465 return false;
466
467 if (size > SEG_SIZE_MAX) {
468 seg->state = SEG_ST_ERASED;
469 size = SEG_SIZE_MAX;
470 ret = false;
471 }
472 seg->size[0] = size;
473 seg->size[1] = size >> 8;
474 seg->size[2] = size >> 16;
475
476 return ret;
477 }
478
osSegmentGetEnd()479 struct Segment *osSegmentGetEnd()
480 {
481 uint32_t size;
482 uint8_t *start = platGetSharedAreaInfo(&size);
483 return (struct Segment *)(start + size);
484 }
485
osGetSegment(const struct AppHdr * app)486 struct Segment *osGetSegment(const struct AppHdr *app)
487 {
488 uint32_t size;
489 uint8_t *start = platGetSharedAreaInfo(&size);
490
491 return (struct Segment *)((uint8_t*)app &&
492 (uint8_t*)app >= start &&
493 (uint8_t*)app < (start + size) ?
494 (uint8_t*)app - sizeof(struct Segment) : NULL);
495 }
496
osEraseShared()497 bool osEraseShared()
498 {
499 wdtDisableClk();
500 mpuAllowRamExecution(true);
501 mpuAllowRomWrite(true);
502 (void)BL.blEraseShared(BL_FLASH_KEY1, BL_FLASH_KEY2);
503 mpuAllowRomWrite(false);
504 mpuAllowRamExecution(false);
505 wdtEnableClk();
506 return true;
507 }
508
osWriteShared(void * dest,const void * src,uint32_t len)509 bool osWriteShared(void *dest, const void *src, uint32_t len)
510 {
511 bool ret;
512
513 mpuAllowRamExecution(true);
514 mpuAllowRomWrite(true);
515 ret = BL.blProgramShared(dest, src, len, BL_FLASH_KEY1, BL_FLASH_KEY2);
516 mpuAllowRomWrite(false);
517 mpuAllowRamExecution(false);
518
519 if (!ret)
520 osLog(LOG_ERROR, "osWriteShared: blProgramShared return false\n");
521
522 return ret;
523 }
524
osAppSegmentCreate(uint32_t size)525 struct AppHdr *osAppSegmentCreate(uint32_t size)
526 {
527 struct SegmentIterator it;
528 const struct Segment *storageSeg = NULL;
529 struct AppHdr *app;
530
531 osSegmentIteratorInit(&it);
532 while (osSegmentIteratorNext(&it)) {
533 if (osSegmentGetState(it.seg) == SEG_ST_EMPTY) {
534 storageSeg = it.seg;
535 break;
536 }
537 }
538 if (!storageSeg || osSegmentSizeGetNext(storageSeg, size) > it.sharedEnd)
539 return NULL;
540
541 app = osSegmentGetData(storageSeg);
542 osAppSegmentSetState(app, SEG_ST_RESERVED);
543
544 return app;
545 }
546
osAppSegmentClose(struct AppHdr * app,uint32_t segDataSize,uint32_t segState)547 bool osAppSegmentClose(struct AppHdr *app, uint32_t segDataSize, uint32_t segState)
548 {
549 struct Segment seg;
550
551 // this is enough for holding padding to uint32_t and the footer
552 uint8_t footer[sizeof(uint32_t) + FOOTER_SIZE];
553 int footerLen;
554 bool ret;
555 uint32_t totalSize;
556 uint8_t *start = platGetSharedAreaInfo(&totalSize);
557 uint8_t *end = start + totalSize;
558 int32_t fullSize = segDataSize + sizeof(seg); // without footer or padding
559 struct Segment *storageSeg = osGetSegment(app);
560
561 // sanity check
562 if (segDataSize >= SEG_SIZE_MAX)
563 return false;
564
565 // physical limits check
566 if (osSegmentSizeAlignedWithFooter(segDataSize) + sizeof(struct Segment) > totalSize)
567 return false;
568
569 // available space check: we could truncate size, instead of disallowing it,
570 // but we know that we performed validation on the size before, in *Create call,
571 // and it was fine, so this must be a programming error, and so we fail.
572 // on a side note: size may grow or shrink compared to original estimate.
573 // typically it shrinks, since we skip some header info and padding, as well
574 // as signature blocks, but it is possible that at some point we may produce
575 // more data for some reason. At that time the logic here may need to change
576 if (osSegmentSizeGetNext(storageSeg, segDataSize) > (struct Segment*)end)
577 return false;
578
579 seg.state = segState;
580 osSegmentSetSize(&seg, segDataSize);
581
582 ret = osWriteShared((uint8_t*)storageSeg, (uint8_t*)&seg, sizeof(seg));
583
584 footerLen = (-fullSize) & 3;
585 memset(footer, 0x00, footerLen);
586
587 #ifdef SEGMENT_CRC_SUPPORT
588 struct SegmentFooter segFooter {
589 .crc = ~crc32(storageSeg, fullSize, ~0),
590 };
591 memcpy(&footer[footerLen], &segFooter, sizeof(segFooter));
592 footerLen += sizeof(segFooter);
593 #endif
594
595 if (ret && footerLen)
596 ret = osWriteShared((uint8_t*)storageSeg + fullSize, footer, footerLen);
597
598 return ret;
599 }
600
osAppWipeData(struct AppHdr * app)601 bool osAppWipeData(struct AppHdr *app)
602 {
603 struct Segment *seg = osGetSegment(app);
604 int32_t size = osSegmentGetSize(seg);
605 uint8_t *p = (uint8_t*)app;
606 uint32_t state = osSegmentGetState(seg);
607 uint8_t buf[256];
608 bool done = true;
609
610 if (!seg || size == SEG_SIZE_INVALID || state == SEG_ST_EMPTY) {
611 osLog(LOG_ERROR, "%s: can't erase segment: app=%p; seg=%p"
612 "; size=%" PRIu32
613 "; state=%" PRIu32
614 "\n",
615 __func__, app, seg, size, state);
616 return false;
617 }
618
619 size = osSegmentSizeAlignedWithFooter(size);
620
621 memset(buf, 0, sizeof(buf));
622 while (size > 0) {
623 uint32_t flashSz = size > sizeof(buf) ? sizeof(buf) : size;
624 // keep trying to zero-out stuff even in case of intermittent failures.
625 // flash write may occasionally fail on some byte, but it is not good enough
626 // reason to not rewrite other bytes
627 bool res = osWriteShared(p, buf, flashSz);
628 done = done && res;
629 size -= flashSz;
630 p += flashSz;
631 }
632
633 return done;
634 }
635
osAppIsValid(const struct AppHdr * app)636 static inline bool osAppIsValid(const struct AppHdr *app)
637 {
638 return app->hdr.magic == APP_HDR_MAGIC &&
639 app->hdr.fwVer == APP_HDR_VER_CUR &&
640 (app->hdr.fwFlags & FL_APP_HDR_APPLICATION) != 0 &&
641 app->hdr.payInfoType == LAYOUT_APP;
642 }
643
osExtAppIsValid(const struct AppHdr * app,uint32_t len)644 static bool osExtAppIsValid(const struct AppHdr *app, uint32_t len)
645 {
646 //TODO: when CRC support is ready, add CRC check here
647 return osAppIsValid(app) &&
648 len >= sizeof(*app) &&
649 osAppSegmentGetState(app) == SEG_ST_VALID &&
650 !(app->hdr.fwFlags & FL_APP_HDR_INTERNAL);
651 }
652
osIntAppIsValid(const struct AppHdr * app)653 static bool osIntAppIsValid(const struct AppHdr *app)
654 {
655 return osAppIsValid(app) &&
656 osAppSegmentGetState(app) == SEG_STATE_INVALID &&
657 (app->hdr.fwFlags & FL_APP_HDR_INTERNAL) != 0;
658 }
659
osExtAppErase(const struct AppHdr * app)660 static inline bool osExtAppErase(const struct AppHdr *app)
661 {
662 return osAppSegmentSetState(app, SEG_ST_ERASED);
663 }
664
osLoadApp(const struct AppHdr * app)665 static struct Task *osLoadApp(const struct AppHdr *app) {
666 struct Task *task;
667
668 task = osAllocTask();
669 if (!task) {
670 osLog(LOG_WARN, "External app id %016" PRIX64 " @ %p cannot be used as too many apps already exist.\n", app->hdr.appId, app);
671 return NULL;
672 }
673 task->app = app;
674 bool done = (app->hdr.fwFlags & FL_APP_HDR_INTERNAL) ?
675 cpuInternalAppLoad(task->app, &task->platInfo) :
676 cpuAppLoad(task->app, &task->platInfo);
677
678 if (!done) {
679 osLog(LOG_WARN, "App @ %p ID %016" PRIX64 " failed to load\n", app, app->hdr.appId);
680 osFreeTask(task);
681 task = NULL;
682 }
683
684 return task;
685 }
686
osUnloadApp(struct Task * task)687 static void osUnloadApp(struct Task *task)
688 {
689 // this is called on task that has stopped running, or had never run
690 cpuAppUnload(task->app, &task->platInfo);
691 osFreeTask(task);
692 }
693
osStartApp(const struct AppHdr * app)694 static bool osStartApp(const struct AppHdr *app)
695 {
696 bool done = false;
697 struct Task *task;
698
699 if ((task = osLoadApp(app)) != NULL) {
700 task->subbedEvtListSz = MAX_EMBEDDED_EVT_SUBS;
701 task->subbedEvents = task->subbedEventsInt;
702 MAKE_NEW_TID(task);
703
704 // print external NanoApp info to facilitate NanoApp debugging
705 if (!(task->app->hdr.fwFlags & FL_APP_HDR_INTERNAL))
706 osLog(LOG_INFO, "loaded app ID 0x%llx at flash base 0x%08x ram base 0x%08x; TID %04X\n",
707 task->app->hdr.appId, (uintptr_t) &task->app, (uintptr_t) task->platInfo.data, task->tid);
708
709 done = osTaskInit(task);
710
711 if (!done) {
712 osLog(LOG_WARN, "App @ %p ID %016" PRIX64 "failed to init\n", task->app, task->app->hdr.appId);
713 osUnloadApp(task);
714 } else {
715 osAddTask(task);
716 }
717 }
718
719 return done;
720 }
721
osStopTask(struct Task * task)722 static bool osStopTask(struct Task *task)
723 {
724 if (!task)
725 return false;
726
727 osTaskClrSetFlags(task, 0, FL_TASK_STOPPED);
728 osRemoveTask(task);
729
730 if (osTaskGetIoCount(task)) {
731 osTaskHandle(task, EVT_APP_STOP, NULL);
732 osEnqueueEvtOrFree(EVT_APP_END, task, NULL);
733 } else {
734 osTaskEnd(task);
735 osUnloadApp(task);
736 }
737
738 return true;
739 }
740
osExtAppFind(struct SegmentIterator * it,uint64_t appId)741 static bool osExtAppFind(struct SegmentIterator *it, uint64_t appId)
742 {
743 uint64_t vendor = APP_ID_GET_VENDOR(appId);
744 uint64_t seqId = APP_ID_GET_SEQ_ID(appId);
745 uint64_t curAppId;
746 const struct AppHdr *app;
747 const struct Segment *seg;
748
749 while (osSegmentIteratorNext(it)) {
750 seg = it->seg;
751 if (seg->state == SEG_ST_EMPTY)
752 break;
753 if (seg->state != SEG_ST_VALID)
754 continue;
755 app = osSegmentGetData(seg);
756 curAppId = app->hdr.appId;
757
758 if ((vendor == APP_VENDOR_ANY || vendor == APP_ID_GET_VENDOR(curAppId)) &&
759 (seqId == APP_SEQ_ID_ANY || seqId == APP_ID_GET_SEQ_ID(curAppId)))
760 return true;
761 }
762
763 return false;
764 }
765
osExtAppStopEraseApps(uint64_t appId,bool doErase)766 static uint32_t osExtAppStopEraseApps(uint64_t appId, bool doErase)
767 {
768 const struct AppHdr *app;
769 int32_t len;
770 struct Task *task;
771 struct SegmentIterator it;
772 uint32_t stopCount = 0;
773 uint32_t eraseCount = 0;
774 uint32_t appCount = 0;
775 uint32_t taskCount = 0;
776 struct MgmtStatus stat = { .value = 0 };
777
778 osSegmentIteratorInit(&it);
779 while (osExtAppFind(&it, appId)) {
780 app = osSegmentGetData(it.seg);
781 len = osSegmentGetSize(it.seg);
782 if (!osExtAppIsValid(app, len))
783 continue;
784 appCount++;
785 task = osTaskFindByAppID(app->hdr.appId);
786 if (task)
787 taskCount++;
788 if (task && task->app == app) {
789 if (osStopTask(task))
790 stopCount++;
791 else
792 continue;
793 if (doErase && osExtAppErase(app))
794 eraseCount++;
795 }
796 }
797 SET_COUNTER(stat.app, appCount);
798 SET_COUNTER(stat.task, taskCount);
799 SET_COUNTER(stat.op, stopCount);
800 SET_COUNTER(stat.erase, eraseCount);
801
802 return stat.value;
803 }
804
osExtAppStopApps(uint64_t appId)805 uint32_t osExtAppStopApps(uint64_t appId)
806 {
807 return osExtAppStopEraseApps(appId, false);
808 }
809
osExtAppEraseApps(uint64_t appId)810 uint32_t osExtAppEraseApps(uint64_t appId)
811 {
812 return osExtAppStopEraseApps(appId, true);
813 }
814
osScanExternal()815 static void osScanExternal()
816 {
817 struct SegmentIterator it;
818 osSegmentIteratorInit(&it);
819 while (osSegmentIteratorNext(&it)) {
820 switch (osSegmentGetState(it.seg)) {
821 case SEG_ST_EMPTY:
822 // everything looks good
823 osLog(LOG_INFO, "External area is good\n");
824 return;
825 case SEG_ST_ERASED:
826 case SEG_ST_VALID:
827 // this is valid stuff, ignore
828 break;
829 case SEG_ST_RESERVED:
830 default:
831 // something is wrong: erase everything
832 osLog(LOG_ERROR, "External area is damaged. Erasing\n");
833 osEraseShared();
834 return;
835 }
836 }
837 }
838
osExtAppStartApps(uint64_t appId)839 uint32_t osExtAppStartApps(uint64_t appId)
840 {
841 const struct AppHdr *app;
842 int32_t len;
843 struct SegmentIterator it;
844 struct SegmentIterator checkIt;
845 uint32_t startCount = 0;
846 uint32_t eraseCount = 0;
847 uint32_t appCount = 0;
848 uint32_t taskCount = 0;
849 struct MgmtStatus stat = { .value = 0 };
850
851 osScanExternal();
852
853 osSegmentIteratorInit(&it);
854 while (osExtAppFind(&it, appId)) {
855 app = osSegmentGetData(it.seg);
856 len = osSegmentGetSize(it.seg);
857
858 // skip erased or malformed apps
859 if (!osExtAppIsValid(app, len))
860 continue;
861
862 appCount++;
863 checkIt = it;
864 // find the most recent copy
865 while (osExtAppFind(&checkIt, app->hdr.appId)) {
866 if (osExtAppErase(app)) // erase the old one, so we skip it next time
867 eraseCount++;
868 app = osSegmentGetData(checkIt.seg);
869 }
870
871 if (osTaskFindByAppID(app->hdr.appId)) {
872 // this either the most recent external app with the same ID,
873 // or internal app with the same id; in both cases we do nothing
874 taskCount++;
875 continue;
876 }
877
878 if (osStartApp(app))
879 startCount++;
880 }
881 SET_COUNTER(stat.app, appCount);
882 SET_COUNTER(stat.task, taskCount);
883 SET_COUNTER(stat.op, startCount);
884 SET_COUNTER(stat.erase, eraseCount);
885
886 return stat.value;
887 }
888
osStartTasks(void)889 static void osStartTasks(void)
890 {
891 const struct AppHdr *app;
892 uint32_t i, nApps;
893 struct Task* task;
894 uint32_t status = 0;
895 uint32_t taskCnt = 0;
896
897 osLog(LOG_DEBUG, "Initializing task pool...\n");
898 list_init(&mTasks);
899 list_init(&mFreeTasks);
900 for (i = 0; i < MAX_TASKS; ++i) {
901 task = &mTaskPool.data[i];
902 list_init(&task->list);
903 osFreeTask(task);
904 }
905
906 mSystemTask = osAllocTask(); // this is a dummy task; holder of TID 0; all system code will run with TID 0
907 osSetCurrentTask(mSystemTask);
908 osLog(LOG_DEBUG, "System task is: %p\n", mSystemTask);
909
910 /* first enum all internal apps, making sure to check for dupes */
911 osLog(LOG_DEBUG, "Starting internal apps...\n");
912 for (i = 0, app = platGetInternalAppList(&nApps); i < nApps; i++, app++) {
913 if (!osIntAppIsValid(app)) {
914 osLog(LOG_WARN, "Invalid internal app @ %p ID %016" PRIX64
915 "header version: %" PRIu16
916 "\n",
917 app, app->hdr.appId, app->hdr.fwVer);
918 continue;
919 }
920
921 if (!(app->hdr.fwFlags & FL_APP_HDR_INTERNAL)) {
922 osLog(LOG_WARN, "Internal app is not marked: [%p]: flags: 0x%04" PRIX16
923 "; ID: %016" PRIX64
924 "; ignored\n",
925 app, app->hdr.fwFlags, app->hdr.appId);
926 continue;
927 }
928 if ((task = osTaskFindByAppID(app->hdr.appId))) {
929 osLog(LOG_WARN, "Internal app ID %016" PRIX64
930 "@ %p attempting to update internal app @ %p; app @%p ignored.\n",
931 app->hdr.appId, app, task->app, app);
932 continue;
933 }
934 if (osStartApp(app))
935 taskCnt++;
936 }
937
938 osLog(LOG_DEBUG, "Starting external apps...\n");
939 status = osExtAppStartApps(APP_ID_ANY);
940 osLog(LOG_DEBUG, "Started %" PRIu32 " internal apps; EXT status: %08" PRIX32 "\n", taskCnt, status);
941 }
942
osInternalEvtHandle(uint32_t evtType,void * evtData)943 static void osInternalEvtHandle(uint32_t evtType, void *evtData)
944 {
945 union InternalThing *da = (union InternalThing*)evtData;
946 struct Task *task;
947 uint32_t i;
948
949 switch (evtType) {
950 case EVT_SUBSCRIBE_TO_EVT:
951 case EVT_UNSUBSCRIBE_TO_EVT:
952 /* get task */
953 task = osTaskFindByTid(da->evtSub.tid);
954 if (!task)
955 break;
956
957 /* find if subscribed to this evt */
958 for (i = 0; i < task->subbedEvtCount && task->subbedEvents[i] != da->evtSub.evt; i++);
959
960 /* if unsub & found -> unsub */
961 if (evtType == EVT_UNSUBSCRIBE_TO_EVT && i != task->subbedEvtCount)
962 task->subbedEvents[i] = task->subbedEvents[--task->subbedEvtCount];
963 /* if sub & not found -> sub */
964 else if (evtType == EVT_SUBSCRIBE_TO_EVT && i == task->subbedEvtCount) {
965 if (task->subbedEvtListSz == task->subbedEvtCount) { /* enlarge the list */
966 uint32_t newSz = (task->subbedEvtListSz * 3 + 1) / 2;
967 uint32_t *newList = heapAlloc(sizeof(uint32_t[newSz])); /* grow by 50% */
968 if (newList) {
969 memcpy(newList, task->subbedEvents, sizeof(uint32_t[task->subbedEvtListSz]));
970 if (task->subbedEvents != task->subbedEventsInt)
971 heapFree(task->subbedEvents);
972 task->subbedEvents = newList;
973 task->subbedEvtListSz = newSz;
974 }
975 }
976 if (task->subbedEvtListSz > task->subbedEvtCount) { /* have space ? */
977 task->subbedEvents[task->subbedEvtCount++] = da->evtSub.evt;
978 }
979 }
980 break;
981
982 case EVT_APP_END:
983 task = evtData;
984 osTaskEnd(task);
985 osUnloadApp(task);
986 break;
987
988 case EVT_DEFERRED_CALLBACK:
989 da->deferred.callback(da->deferred.cookie);
990 break;
991
992 case EVT_PRIVATE_EVT:
993 task = osTaskFindByTid(da->privateEvt.toTid);
994 if (task) {
995 //private events cannot be retained
996 TaggedPtr *tmp = mCurEvtEventFreeingInfo;
997 mCurEvtEventFreeingInfo = NULL;
998
999 osTaskHandle(task, da->privateEvt.evtType, da->privateEvt.evtData);
1000
1001 mCurEvtEventFreeingInfo = tmp;
1002 }
1003
1004 handleEventFreeing(da->privateEvt.evtType, da->privateEvt.evtData, da->privateEvt.evtFreeInfo);
1005 break;
1006 }
1007 }
1008
abort(void)1009 void abort(void)
1010 {
1011 /* this is necessary for va_* funcs... */
1012 osLog(LOG_ERROR, "Abort called");
1013 while(1);
1014 }
1015
osRetainCurrentEvent(TaggedPtr * evtFreeingInfoP)1016 bool osRetainCurrentEvent(TaggedPtr *evtFreeingInfoP)
1017 {
1018 if (!mCurEvtEventFreeingInfo)
1019 return false;
1020
1021 *evtFreeingInfoP = *mCurEvtEventFreeingInfo;
1022 mCurEvtEventFreeingInfo = NULL;
1023 return true;
1024 }
1025
osFreeRetainedEvent(uint32_t evtType,void * evtData,TaggedPtr * evtFreeingInfoP)1026 void osFreeRetainedEvent(uint32_t evtType, void *evtData, TaggedPtr *evtFreeingInfoP)
1027 {
1028 handleEventFreeing(evtType, evtData, *evtFreeingInfoP);
1029 }
1030
osMainInit(void)1031 void osMainInit(void)
1032 {
1033 cpuInit();
1034 cpuIntsOff();
1035 osInit();
1036 timInit();
1037 sensorsInit();
1038 syscallInit();
1039 osApiExport(mMiscInternalThingsSlab);
1040 apIntInit();
1041 cpuIntsOn();
1042 wdtInit();
1043 osStartTasks();
1044
1045 //broadcast app start to all already-loaded apps
1046 (void)osEnqueueEvt(EVT_APP_START, NULL, NULL);
1047 }
1048
osMainDequeueLoop(void)1049 void osMainDequeueLoop(void)
1050 {
1051 TaggedPtr evtFreeingInfo;
1052 uint32_t evtType, j;
1053 void *evtData;
1054 struct Task *task;
1055 uint16_t tid;
1056
1057 /* get an event */
1058 if (!evtQueueDequeue(mEvtsInternal, &evtType, &evtData, &evtFreeingInfo, true))
1059 return;
1060
1061 evtType = EVENT_GET_EVENT(evtType);
1062 tid = EVENT_GET_ORIGIN(evtType);
1063 task = osTaskFindByTid(tid);
1064 if (task)
1065 osTaskAddIoCount(task, -1);
1066
1067 /* by default we free them when we're done with them */
1068 mCurEvtEventFreeingInfo = &evtFreeingInfo;
1069
1070 if (evtType < EVT_NO_FIRST_USER_EVENT) {
1071 /* handle deferred actions and other reserved events here */
1072 osInternalEvtHandle(evtType, evtData);
1073 } else {
1074 /* send this event to all tasks who want it */
1075 for_each_task(&mTasks, task) {
1076 for (j = 0; j < task->subbedEvtCount; j++) {
1077 if (task->subbedEvents[j] == evtType) {
1078 osTaskHandle(task, evtType, evtData);
1079 break;
1080 }
1081 }
1082 }
1083 }
1084
1085 /* free it */
1086 if (mCurEvtEventFreeingInfo)
1087 handleEventFreeing(evtType, evtData, evtFreeingInfo);
1088
1089 /* avoid some possible errors */
1090 mCurEvtEventFreeingInfo = NULL;
1091 }
1092
osMain(void)1093 void __attribute__((noreturn)) osMain(void)
1094 {
1095 osMainInit();
1096
1097 while (true)
1098 {
1099 osMainDequeueLoop();
1100 platPeriodic();
1101 }
1102 }
1103
osDeferredActionFreeF(void * event)1104 static void osDeferredActionFreeF(void* event)
1105 {
1106 slabAllocatorFree(mMiscInternalThingsSlab, event);
1107 }
1108
osEventSubscribeUnsubscribe(uint32_t tid,uint32_t evtType,bool sub)1109 static bool osEventSubscribeUnsubscribe(uint32_t tid, uint32_t evtType, bool sub)
1110 {
1111 union InternalThing *act = slabAllocatorAlloc(mMiscInternalThingsSlab);
1112
1113 if (!act)
1114 return false;
1115 act->evtSub.evt = evtType;
1116 act->evtSub.tid = tid;
1117
1118 return osEnqueueEvtOrFree(sub ? EVT_SUBSCRIBE_TO_EVT : EVT_UNSUBSCRIBE_TO_EVT, act, osDeferredActionFreeF);
1119 }
1120
osEventSubscribe(uint32_t tid,uint32_t evtType)1121 bool osEventSubscribe(uint32_t tid, uint32_t evtType)
1122 {
1123 (void)tid;
1124 return osEventSubscribeUnsubscribe(osGetCurrentTid(), evtType, true);
1125 }
1126
osEventUnsubscribe(uint32_t tid,uint32_t evtType)1127 bool osEventUnsubscribe(uint32_t tid, uint32_t evtType)
1128 {
1129 (void)tid;
1130 return osEventSubscribeUnsubscribe(osGetCurrentTid(), evtType, false);
1131 }
1132
osEnqueueEvtCommon(uint32_t evtType,void * evtData,TaggedPtr evtFreeInfo)1133 static bool osEnqueueEvtCommon(uint32_t evtType, void *evtData, TaggedPtr evtFreeInfo)
1134 {
1135 struct Task *task = osGetCurrentTask();
1136
1137 if (osTaskTestFlags(task, FL_TASK_STOPPED)) {
1138 handleEventFreeing(evtType, evtData, evtFreeInfo);
1139 return true;
1140 }
1141
1142 evtType = EVENT_WITH_ORIGIN(evtType, osGetCurrentTid());
1143 osTaskAddIoCount(task, 1);
1144
1145 if (evtQueueEnqueue(mEvtsInternal, evtType, evtData, evtFreeInfo, false))
1146 return true;
1147
1148 osTaskAddIoCount(task, -1);
1149 return false;
1150 }
1151
osEnqueueEvt(uint32_t evtType,void * evtData,EventFreeF evtFreeF)1152 bool osEnqueueEvt(uint32_t evtType, void *evtData, EventFreeF evtFreeF)
1153 {
1154 return osEnqueueEvtCommon(evtType, evtData, taggedPtrMakeFromPtr(evtFreeF));
1155 }
1156
osEnqueueEvtOrFree(uint32_t evtType,void * evtData,EventFreeF evtFreeF)1157 bool osEnqueueEvtOrFree(uint32_t evtType, void *evtData, EventFreeF evtFreeF)
1158 {
1159 bool success = osEnqueueEvt(evtType, evtData, evtFreeF);
1160
1161 if (!success && evtFreeF)
1162 evtFreeF(evtData);
1163
1164 return success;
1165 }
1166
osEnqueueEvtAsApp(uint32_t evtType,void * evtData,uint32_t fromAppTid)1167 bool osEnqueueEvtAsApp(uint32_t evtType, void *evtData, uint32_t fromAppTid)
1168 {
1169 // compatibility with existing external apps
1170 if (evtType & EVENT_TYPE_BIT_DISCARDABLE_COMPAT)
1171 evtType |= EVENT_TYPE_BIT_DISCARDABLE;
1172
1173 (void)fromAppTid;
1174 return osEnqueueEvtCommon(evtType, evtData, taggedPtrMakeFromUint(osGetCurrentTid()));
1175 }
1176
osDefer(OsDeferCbkF callback,void * cookie,bool urgent)1177 bool osDefer(OsDeferCbkF callback, void *cookie, bool urgent)
1178 {
1179 union InternalThing *act = slabAllocatorAlloc(mMiscInternalThingsSlab);
1180 if (!act)
1181 return false;
1182
1183 act->deferred.callback = callback;
1184 act->deferred.cookie = cookie;
1185
1186 if (evtQueueEnqueue(mEvtsInternal, EVT_DEFERRED_CALLBACK, act, taggedPtrMakeFromPtr(osDeferredActionFreeF), urgent))
1187 return true;
1188
1189 slabAllocatorFree(mMiscInternalThingsSlab, act);
1190 return false;
1191 }
1192
osEnqueuePrivateEvtEx(uint32_t evtType,void * evtData,TaggedPtr evtFreeInfo,uint32_t toTid)1193 static bool osEnqueuePrivateEvtEx(uint32_t evtType, void *evtData, TaggedPtr evtFreeInfo, uint32_t toTid)
1194 {
1195 union InternalThing *act = slabAllocatorAlloc(mMiscInternalThingsSlab);
1196 if (!act) {
1197 osLog(LOG_ERROR, "[seos] ERROR: osEnqueuePrivateEvtEx: call to slabAllocatorAlloc() failed\n");
1198 return false;
1199 }
1200
1201 act->privateEvt.evtType = evtType;
1202 act->privateEvt.evtData = evtData;
1203 act->privateEvt.evtFreeInfo = evtFreeInfo;
1204 act->privateEvt.toTid = toTid;
1205
1206 return osEnqueueEvtOrFree(EVT_PRIVATE_EVT, act, osDeferredActionFreeF);
1207 }
1208
osEnqueuePrivateEvt(uint32_t evtType,void * evtData,EventFreeF evtFreeF,uint32_t toTid)1209 bool osEnqueuePrivateEvt(uint32_t evtType, void *evtData, EventFreeF evtFreeF, uint32_t toTid)
1210 {
1211 return osEnqueuePrivateEvtEx(evtType, evtData, taggedPtrMakeFromPtr(evtFreeF), toTid);
1212 }
1213
osEnqueuePrivateEvtAsApp(uint32_t evtType,void * evtData,uint32_t fromAppTid,uint32_t toTid)1214 bool osEnqueuePrivateEvtAsApp(uint32_t evtType, void *evtData, uint32_t fromAppTid, uint32_t toTid)
1215 {
1216 (void)fromAppTid;
1217 return osEnqueuePrivateEvtEx(evtType, evtData, taggedPtrMakeFromUint(osGetCurrentTid()), toTid);
1218 }
1219
osTidById(uint64_t appId,uint32_t * tid)1220 bool osTidById(uint64_t appId, uint32_t *tid)
1221 {
1222 struct Task *task;
1223
1224 for_each_task(&mTasks, task) {
1225 if (task->app && task->app->hdr.appId == appId) {
1226 *tid = task->tid;
1227 return true;
1228 }
1229 }
1230
1231 return false;
1232 }
1233
osAppInfoById(uint64_t appId,uint32_t * appIdx,uint32_t * appVer,uint32_t * appSize)1234 bool osAppInfoById(uint64_t appId, uint32_t *appIdx, uint32_t *appVer, uint32_t *appSize)
1235 {
1236 uint32_t i = 0;
1237 struct Task *task;
1238
1239 for_each_task(&mTasks, task) {
1240 const struct AppHdr *app = task->app;
1241 if (app && app->hdr.appId == appId) {
1242 *appIdx = i;
1243 *appVer = app->hdr.appVer;
1244 *appSize = app->sect.rel_end;
1245 return true;
1246 }
1247 i++;
1248 }
1249
1250 return false;
1251 }
1252
osAppInfoByIndex(uint32_t appIdx,uint64_t * appId,uint32_t * appVer,uint32_t * appSize)1253 bool osAppInfoByIndex(uint32_t appIdx, uint64_t *appId, uint32_t *appVer, uint32_t *appSize)
1254 {
1255 struct Task *task;
1256 int i = 0;
1257
1258 for_each_task(&mTasks, task) {
1259 if (i != appIdx) {
1260 ++i;
1261 } else {
1262 const struct AppHdr *app = task->app;
1263 *appId = app->hdr.appId;
1264 *appVer = app->hdr.appVer;
1265 *appSize = app->sect.rel_end;
1266 return true;
1267 }
1268 }
1269
1270 return false;
1271 }
1272
osLogv(enum LogLevel level,const char * str,va_list vl)1273 void osLogv(enum LogLevel level, const char *str, va_list vl)
1274 {
1275 void *userData = platLogAllocUserData();
1276
1277 platLogPutcharF(userData, level);
1278 cvprintf(platLogPutcharF, userData, str, vl);
1279
1280 platLogFlush(userData);
1281 }
1282
osLog(enum LogLevel level,const char * str,...)1283 void osLog(enum LogLevel level, const char *str, ...)
1284 {
1285 va_list vl;
1286
1287 va_start(vl, str);
1288 osLogv(level, str, vl);
1289 va_end(vl);
1290 }
1291
1292
1293
1294
1295 //Google's public key for Google's apps' signing
1296 const uint8_t __attribute__ ((section (".pubkeys"))) _RSA_KEY_GOOGLE[] = {
1297 0xd9, 0xcd, 0x83, 0xae, 0xb5, 0x9e, 0xe4, 0x63, 0xf1, 0x4c, 0x26, 0x6a, 0x1c, 0xeb, 0x4c, 0x12,
1298 0x5b, 0xa6, 0x71, 0x7f, 0xa2, 0x4e, 0x7b, 0xa2, 0xee, 0x02, 0x86, 0xfc, 0x0d, 0x31, 0x26, 0x74,
1299 0x1e, 0x9c, 0x41, 0x43, 0xba, 0x16, 0xe9, 0x23, 0x4d, 0xfc, 0xc4, 0xca, 0xcc, 0xd5, 0x27, 0x2f,
1300 0x16, 0x4c, 0xe2, 0x85, 0x39, 0xb3, 0x0b, 0xcb, 0x73, 0xb6, 0x56, 0xc2, 0x98, 0x83, 0xf6, 0xfa,
1301 0x7a, 0x6e, 0xa0, 0x9a, 0xcc, 0x83, 0x97, 0x9d, 0xde, 0x89, 0xb2, 0xa3, 0x05, 0x46, 0x0c, 0x12,
1302 0xae, 0x01, 0xf8, 0x0c, 0xf5, 0x39, 0x32, 0xe5, 0x94, 0xb9, 0xa0, 0x8f, 0x19, 0xe4, 0x39, 0x54,
1303 0xad, 0xdb, 0x81, 0x60, 0x74, 0x63, 0xd5, 0x80, 0x3b, 0xd2, 0x88, 0xf4, 0xcb, 0x6b, 0x47, 0x28,
1304 0x80, 0xb0, 0xd1, 0x89, 0x6d, 0xd9, 0x62, 0x88, 0x81, 0xd6, 0xc0, 0x13, 0x88, 0x91, 0xfb, 0x7d,
1305 0xa3, 0x7f, 0xa5, 0x40, 0x12, 0xfb, 0x77, 0x77, 0x4c, 0x98, 0xe4, 0xd3, 0x62, 0x39, 0xcc, 0x63,
1306 0x34, 0x76, 0xb9, 0x12, 0x67, 0xfe, 0x83, 0x23, 0x5d, 0x40, 0x6b, 0x77, 0x93, 0xd6, 0xc0, 0x86,
1307 0x6c, 0x03, 0x14, 0xdf, 0x78, 0x2d, 0xe0, 0x9b, 0x5e, 0x05, 0xf0, 0x93, 0xbd, 0x03, 0x1d, 0x17,
1308 0x56, 0x88, 0x58, 0x25, 0xa6, 0xae, 0x63, 0xd2, 0x01, 0x43, 0xbb, 0x7e, 0x7a, 0xa5, 0x62, 0xdf,
1309 0x8a, 0x31, 0xbd, 0x24, 0x1b, 0x1b, 0xeb, 0xfe, 0xdf, 0xd1, 0x31, 0x61, 0x4a, 0xfa, 0xdd, 0x6e,
1310 0x62, 0x0c, 0xa9, 0xcd, 0x08, 0x0c, 0xa1, 0x1b, 0xe7, 0xf2, 0xed, 0x36, 0x22, 0xd0, 0x5d, 0x80,
1311 0x78, 0xeb, 0x6f, 0x5a, 0x58, 0x18, 0xb5, 0xaf, 0x82, 0x77, 0x4c, 0x95, 0xce, 0xc6, 0x4d, 0xda,
1312 0xca, 0xef, 0x68, 0xa6, 0x6d, 0x71, 0x4d, 0xf1, 0x14, 0xaf, 0x68, 0x25, 0xb8, 0xf3, 0xff, 0xbe,
1313 };
1314
1315
1316 #ifdef DEBUG
1317
1318 //debug key whose privatekey is checked in as misc/debug.privkey
1319 const uint8_t __attribute__ ((section (".pubkeys"))) _RSA_KEY_GOOGLE_DEBUG[] = {
1320 0x2d, 0xff, 0xa6, 0xb5, 0x65, 0x87, 0xbe, 0x61, 0xd1, 0xe1, 0x67, 0x10, 0xa1, 0x9b, 0xc6, 0xca,
1321 0xc8, 0xb1, 0xf0, 0xaa, 0x88, 0x60, 0x9f, 0xa1, 0x00, 0xa1, 0x41, 0x9a, 0xd8, 0xb4, 0xd1, 0x74,
1322 0x9f, 0x23, 0x28, 0x0d, 0xc2, 0xc4, 0x37, 0x15, 0xb1, 0x4a, 0x80, 0xca, 0xab, 0xb9, 0xba, 0x09,
1323 0x7d, 0xf8, 0x44, 0xd6, 0xa2, 0x72, 0x28, 0x12, 0x91, 0xf6, 0xa5, 0xea, 0xbd, 0xf8, 0x81, 0x6b,
1324 0xd2, 0x3c, 0x50, 0xa2, 0xc6, 0x19, 0x54, 0x48, 0x45, 0x8d, 0x92, 0xac, 0x01, 0xda, 0x14, 0x32,
1325 0xdb, 0x05, 0x82, 0x06, 0x30, 0x25, 0x09, 0x7f, 0x5a, 0xbb, 0x86, 0x64, 0x70, 0x98, 0x64, 0x1e,
1326 0xe6, 0xca, 0x1d, 0xc1, 0xcb, 0xb6, 0x23, 0xd2, 0x62, 0x00, 0x46, 0x97, 0xd5, 0xcc, 0xe6, 0x36,
1327 0x72, 0xec, 0x2e, 0x43, 0x1f, 0x0a, 0xaf, 0xf2, 0x51, 0xe1, 0xcd, 0xd2, 0x98, 0x5d, 0x7b, 0x64,
1328 0xeb, 0xd1, 0x35, 0x4d, 0x59, 0x13, 0x82, 0x6c, 0xbd, 0xc4, 0xa2, 0xfc, 0xad, 0x64, 0x73, 0xe2,
1329 0x71, 0xb5, 0xf4, 0x45, 0x53, 0x6b, 0xc3, 0x56, 0xb9, 0x8b, 0x3d, 0xeb, 0x00, 0x48, 0x6e, 0x29,
1330 0xb1, 0xb4, 0x8e, 0x2e, 0x43, 0x39, 0xef, 0x45, 0xa0, 0xb8, 0x8b, 0x5f, 0x80, 0xb5, 0x0c, 0xc3,
1331 0x03, 0xe3, 0xda, 0x51, 0xdc, 0xec, 0x80, 0x2c, 0x0c, 0xdc, 0xe2, 0x71, 0x0a, 0x14, 0x4f, 0x2c,
1332 0x22, 0x2b, 0x0e, 0xd1, 0x8b, 0x8f, 0x93, 0xd2, 0xf3, 0xec, 0x3a, 0x5a, 0x1c, 0xba, 0x80, 0x54,
1333 0x23, 0x7f, 0xb0, 0x54, 0x8b, 0xe3, 0x98, 0x22, 0xbb, 0x4b, 0xd0, 0x29, 0x5f, 0xce, 0xf2, 0xaa,
1334 0x99, 0x89, 0xf2, 0xb7, 0x5d, 0x8d, 0xb2, 0x72, 0x0b, 0x52, 0x02, 0xb8, 0xa4, 0x37, 0xa0, 0x3b,
1335 0xfe, 0x0a, 0xbc, 0xb3, 0xb3, 0xed, 0x8f, 0x8c, 0x42, 0x59, 0xbe, 0x4e, 0x31, 0xed, 0x11, 0x9b,
1336 };
1337
1338 #endif
1339