1 // Copyright 2018 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <stdlib.h>
16 #include <string.h>
17 #include <stdio.h>
18 #include <stdbool.h>
19
20 #include "esp_log.h"
21
22 #include "esp_event.h"
23 #include "esp_event_internal.h"
24 #include "esp_event_private.h"
25
26 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
27 #include "esp_timer.h"
28 #endif
29
30 /* ---------------------------- Definitions --------------------------------- */
31
32 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
33 // LOOP @<address, name> rx:<recieved events no.> dr:<dropped events no.>
34 #define LOOP_DUMP_FORMAT "LOOP @%p,%s rx:%u dr:%u\n"
35 // handler @<address> ev:<base, id> inv:<times invoked> time:<runtime>
36 #define HANDLER_DUMP_FORMAT " HANDLER @%p ev:%s,%s inv:%u time:%lld us\n"
37
38 #define PRINT_DUMP_INFO(dst, sz, ...) do { \
39 int cb = snprintf(dst, sz, __VA_ARGS__); \
40 dst += cb; \
41 sz -= cb; \
42 } while(0);
43 #endif
44
45 /* ------------------------- Static Variables ------------------------------- */
46
47 static const char* TAG = "event";
48 static const char* esp_event_any_base = "any";
49
50 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
51 static SLIST_HEAD(esp_event_loop_instance_list_t, esp_event_loop_instance) s_event_loops =
52 SLIST_HEAD_INITIALIZER(s_event_loops);
53
54 static portMUX_TYPE s_event_loops_spinlock = portMUX_INITIALIZER_UNLOCKED;
55 #endif
56
57
58 /* ------------------------- Static Functions ------------------------------- */
59
60 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
61
62
esp_event_dump_prepare(void)63 static int esp_event_dump_prepare(void)
64 {
65 esp_event_loop_instance_t* loop_it;
66 esp_event_loop_node_t *loop_node_it;
67 esp_event_base_node_t* base_node_it;
68 esp_event_id_node_t* id_node_it;
69 esp_event_handler_node_t* handler_it;
70
71 // Count the number of items to be printed. This is needed to compute how much memory to reserve.
72 int loops = 0, handlers = 0;
73
74 portENTER_CRITICAL(&s_event_loops_spinlock);
75
76 SLIST_FOREACH(loop_it, &s_event_loops, next) {
77 SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
78 SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
79 handlers++;
80 }
81
82 SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
83 SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
84 handlers++;
85 }
86 SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
87 SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
88 handlers++;
89 }
90 }
91 }
92 }
93 loops++;
94 }
95
96 portEXIT_CRITICAL(&s_event_loops_spinlock);
97
98 // Reserve slightly more memory than computed
99 int allowance = 3;
100 int size = (((loops + allowance) * (sizeof(LOOP_DUMP_FORMAT) + 10 + 20 + 2 * 11)) +
101 ((handlers + allowance) * (sizeof(HANDLER_DUMP_FORMAT) + 10 + 2 * 20 + 11 + 20)));
102
103 return size;
104 }
105 #endif
106
esp_event_loop_run_task(void * args)107 static void esp_event_loop_run_task(void* args)
108 {
109 esp_err_t err;
110 esp_event_loop_handle_t event_loop = (esp_event_loop_handle_t) args;
111
112 ESP_LOGD(TAG, "running task for loop %p", event_loop);
113
114 while(1) {
115 err = esp_event_loop_run(event_loop, portMAX_DELAY);
116 if (err != ESP_OK) {
117 break;
118 }
119 }
120
121 ESP_LOGE(TAG, "suspended task for loop %p", event_loop);
122 }
123
handler_execute(esp_event_loop_instance_t * loop,esp_event_handler_node_t * handler,esp_event_post_instance_t post)124 static void handler_execute(esp_event_loop_instance_t* loop, esp_event_handler_node_t *handler, esp_event_post_instance_t post)
125 {
126 ESP_LOGD(TAG, "running post %s:%d with handler %p and context %p on loop %p", post.base, post.id, handler->handler_ctx->handler, &handler->handler_ctx, loop);
127
128 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
129 int64_t start, diff;
130 start = esp_timer_get_time();
131 #endif
132 // Execute the handler
133 #if CONFIG_ESP_EVENT_POST_FROM_ISR
134 void* data_ptr = NULL;
135
136 if (post.data_set) {
137 if (post.data_allocated) {
138 data_ptr = post.data.ptr;
139 } else {
140 data_ptr = &post.data.val;
141 }
142 }
143
144 (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, data_ptr);
145 #else
146 (*(handler->handler_ctx->handler))(handler->handler_ctx->arg, post.base, post.id, post.data);
147 #endif
148
149 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
150 diff = esp_timer_get_time() - start;
151
152 xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
153
154 handler->invoked++;
155 handler->time += diff;
156
157 xSemaphoreGive(loop->profiling_mutex);
158 #endif
159 }
160
handler_instances_add(esp_event_handler_nodes_t * handlers,esp_event_handler_t event_handler,void * event_handler_arg,esp_event_handler_instance_context_t ** handler_ctx,bool legacy)161 static esp_err_t handler_instances_add(esp_event_handler_nodes_t* handlers, esp_event_handler_t event_handler, void* event_handler_arg, esp_event_handler_instance_context_t **handler_ctx, bool legacy)
162 {
163 esp_event_handler_node_t *handler_instance = calloc(1, sizeof(*handler_instance));
164
165 if (!handler_instance) return ESP_ERR_NO_MEM;
166
167 esp_event_handler_instance_context_t *context = calloc(1, sizeof(*context));
168
169 if (!context) {
170 free(handler_instance);
171 return ESP_ERR_NO_MEM;
172 }
173
174 context->handler = event_handler;
175 context->arg = event_handler_arg;
176 handler_instance->handler_ctx = context;
177
178 if (SLIST_EMPTY(handlers)) {
179 SLIST_INSERT_HEAD(handlers, handler_instance, next);
180 }
181 else {
182 esp_event_handler_node_t *it = NULL, *last = NULL;
183
184 SLIST_FOREACH(it, handlers, next) {
185 if (legacy) {
186 if(event_handler == it->handler_ctx->handler) {
187 it->handler_ctx->arg = event_handler_arg;
188 ESP_LOGW(TAG, "handler already registered, overwriting");
189 free(handler_instance);
190 free(context);
191 return ESP_OK;
192 }
193 }
194 last = it;
195 }
196
197 SLIST_INSERT_AFTER(last, handler_instance, next);
198 }
199
200 // If the caller didn't provide the handler instance context, don't set it.
201 // It will be removed once the event loop is deleted.
202 if (handler_ctx) {
203 *handler_ctx = context;
204 }
205
206 return ESP_OK;
207 }
208
base_node_add_handler(esp_event_base_node_t * base_node,int32_t id,esp_event_handler_t event_handler,void * event_handler_arg,esp_event_handler_instance_context_t ** handler_ctx,bool legacy)209 static esp_err_t base_node_add_handler(esp_event_base_node_t* base_node,
210 int32_t id,
211 esp_event_handler_t event_handler,
212 void *event_handler_arg,
213 esp_event_handler_instance_context_t **handler_ctx,
214 bool legacy)
215 {
216 if (id == ESP_EVENT_ANY_ID) {
217 return handler_instances_add(&(base_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
218 }
219 else {
220 esp_err_t err = ESP_OK;
221 esp_event_id_node_t *it = NULL, *id_node = NULL, *last_id_node = NULL;
222
223 SLIST_FOREACH(it, &(base_node->id_nodes), next) {
224 if (it->id == id) {
225 id_node = it;
226 }
227 last_id_node = it;
228 }
229
230 if (!last_id_node || !id_node) {
231 id_node = (esp_event_id_node_t*) calloc(1, sizeof(*id_node));
232
233 if (!id_node) {
234 ESP_LOGE(TAG, "alloc for new id node failed");
235 return ESP_ERR_NO_MEM;
236 }
237
238 id_node->id = id;
239
240 SLIST_INIT(&(id_node->handlers));
241
242 err = handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
243
244 if (err == ESP_OK) {
245 if (!last_id_node) {
246 SLIST_INSERT_HEAD(&(base_node->id_nodes), id_node, next);
247 }
248 else {
249 SLIST_INSERT_AFTER(last_id_node, id_node, next);
250 }
251 } else {
252 free(id_node);
253 }
254
255 return err;
256 }
257 else {
258 return handler_instances_add(&(id_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
259 }
260 }
261 }
262
loop_node_add_handler(esp_event_loop_node_t * loop_node,esp_event_base_t base,int32_t id,esp_event_handler_t event_handler,void * event_handler_arg,esp_event_handler_instance_context_t ** handler_ctx,bool legacy)263 static esp_err_t loop_node_add_handler(esp_event_loop_node_t* loop_node,
264 esp_event_base_t base,
265 int32_t id,
266 esp_event_handler_t event_handler,
267 void *event_handler_arg,
268 esp_event_handler_instance_context_t **handler_ctx,
269 bool legacy)
270 {
271 if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
272 return handler_instances_add(&(loop_node->handlers), event_handler, event_handler_arg, handler_ctx, legacy);
273 }
274 else {
275 esp_err_t err = ESP_OK;
276 esp_event_base_node_t *it = NULL, *base_node = NULL, *last_base_node = NULL;
277
278 SLIST_FOREACH(it, &(loop_node->base_nodes), next) {
279 if (it->base == base) {
280 base_node = it;
281 }
282 last_base_node = it;
283 }
284
285 if (!last_base_node ||
286 !base_node ||
287 (base_node && !SLIST_EMPTY(&(base_node->id_nodes)) && id == ESP_EVENT_ANY_ID) ||
288 (last_base_node && last_base_node->base != base && !SLIST_EMPTY(&(last_base_node->id_nodes)) && id == ESP_EVENT_ANY_ID)) {
289 base_node = (esp_event_base_node_t*) calloc(1, sizeof(*base_node));
290
291 if (!base_node) {
292 ESP_LOGE(TAG, "alloc mem for new base node failed");
293 return ESP_ERR_NO_MEM;
294 }
295
296 base_node->base = base;
297
298 SLIST_INIT(&(base_node->handlers));
299 SLIST_INIT(&(base_node->id_nodes));
300
301 err = base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
302
303 if (err == ESP_OK) {
304 if (!last_base_node) {
305 SLIST_INSERT_HEAD(&(loop_node->base_nodes), base_node, next);
306 }
307 else {
308 SLIST_INSERT_AFTER(last_base_node, base_node, next);
309 }
310 } else {
311 free(base_node);
312 }
313
314 return err;
315 } else {
316 return base_node_add_handler(base_node, id, event_handler, event_handler_arg, handler_ctx, legacy);
317 }
318 }
319 }
320
handler_instances_remove(esp_event_handler_nodes_t * handlers,esp_event_handler_instance_context_t * handler_ctx,bool legacy)321 static esp_err_t handler_instances_remove(esp_event_handler_nodes_t* handlers, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
322 {
323 esp_event_handler_node_t *it, *temp;
324
325 SLIST_FOREACH_SAFE(it, handlers, next, temp) {
326 if (legacy) {
327 if (it->handler_ctx->handler == handler_ctx->handler) {
328 SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
329 free(it->handler_ctx);
330 free(it);
331 return ESP_OK;
332 }
333 } else {
334 if (it->handler_ctx == handler_ctx) {
335 SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
336 free(it->handler_ctx);
337 free(it);
338 return ESP_OK;
339 }
340 }
341 }
342
343 return ESP_ERR_NOT_FOUND;
344 }
345
346
base_node_remove_handler(esp_event_base_node_t * base_node,int32_t id,esp_event_handler_instance_context_t * handler_ctx,bool legacy)347 static esp_err_t base_node_remove_handler(esp_event_base_node_t* base_node, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
348 {
349 if (id == ESP_EVENT_ANY_ID) {
350 return handler_instances_remove(&(base_node->handlers), handler_ctx, legacy);
351 }
352 else {
353 esp_event_id_node_t *it, *temp;
354 SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
355 if (it->id == id) {
356 esp_err_t res = handler_instances_remove(&(it->handlers), handler_ctx, legacy);
357
358 if (res == ESP_OK) {
359 if (SLIST_EMPTY(&(it->handlers))) {
360 SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
361 free(it);
362 return ESP_OK;
363 }
364 }
365 }
366 }
367 }
368
369 return ESP_ERR_NOT_FOUND;
370 }
371
loop_node_remove_handler(esp_event_loop_node_t * loop_node,esp_event_base_t base,int32_t id,esp_event_handler_instance_context_t * handler_ctx,bool legacy)372 static esp_err_t loop_node_remove_handler(esp_event_loop_node_t* loop_node, esp_event_base_t base, int32_t id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
373 {
374 if (base == esp_event_any_base && id == ESP_EVENT_ANY_ID) {
375 return handler_instances_remove(&(loop_node->handlers), handler_ctx, legacy);
376 }
377 else {
378 esp_event_base_node_t *it, *temp;
379 SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
380 if (it->base == base) {
381 esp_err_t res = base_node_remove_handler(it, id, handler_ctx, legacy);
382
383 if (res == ESP_OK) {
384 if (SLIST_EMPTY(&(it->handlers)) && SLIST_EMPTY(&(it->id_nodes))) {
385 SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
386 free(it);
387 return ESP_OK;
388 }
389 }
390 }
391 }
392 }
393
394 return ESP_ERR_NOT_FOUND;
395 }
396
handler_instances_remove_all(esp_event_handler_nodes_t * handlers)397 static void handler_instances_remove_all(esp_event_handler_nodes_t* handlers)
398 {
399 esp_event_handler_node_t *it, *temp;
400 SLIST_FOREACH_SAFE(it, handlers, next, temp) {
401 SLIST_REMOVE(handlers, it, esp_event_handler_node, next);
402 free(it->handler_ctx);
403 free(it);
404 }
405 }
406
base_node_remove_all_handler(esp_event_base_node_t * base_node)407 static void base_node_remove_all_handler(esp_event_base_node_t* base_node)
408 {
409 handler_instances_remove_all(&(base_node->handlers));
410
411 esp_event_id_node_t *it, *temp;
412 SLIST_FOREACH_SAFE(it, &(base_node->id_nodes), next, temp) {
413 handler_instances_remove_all(&(it->handlers));
414 SLIST_REMOVE(&(base_node->id_nodes), it, esp_event_id_node, next);
415 free(it);
416 }
417 }
418
loop_node_remove_all_handler(esp_event_loop_node_t * loop_node)419 static void loop_node_remove_all_handler(esp_event_loop_node_t* loop_node)
420 {
421 handler_instances_remove_all(&(loop_node->handlers));
422
423 esp_event_base_node_t *it, *temp;
424 SLIST_FOREACH_SAFE(it, &(loop_node->base_nodes), next, temp) {
425 base_node_remove_all_handler(it);
426 SLIST_REMOVE(&(loop_node->base_nodes), it, esp_event_base_node, next);
427 free(it);
428 }
429 }
430
post_instance_delete(esp_event_post_instance_t * post)431 static void inline __attribute__((always_inline)) post_instance_delete(esp_event_post_instance_t* post)
432 {
433 #if CONFIG_ESP_EVENT_POST_FROM_ISR
434 if (post->data_allocated && post->data.ptr) {
435 free(post->data.ptr);
436 }
437 #else
438 if (post->data) {
439 free(post->data);
440 }
441 #endif
442 memset(post, 0, sizeof(*post));
443 }
444
445 /* ---------------------------- Public API --------------------------------- */
446
esp_event_loop_create(const esp_event_loop_args_t * event_loop_args,esp_event_loop_handle_t * event_loop)447 esp_err_t esp_event_loop_create(const esp_event_loop_args_t* event_loop_args, esp_event_loop_handle_t* event_loop)
448 {
449 assert(event_loop_args);
450
451 esp_event_loop_instance_t* loop;
452 esp_err_t err = ESP_ERR_NO_MEM; // most likely error
453
454 loop = calloc(1, sizeof(*loop));
455 if (loop == NULL) {
456 ESP_LOGE(TAG, "alloc for event loop failed");
457 return err;
458 }
459
460 loop->queue = xQueueCreate(event_loop_args->queue_size , sizeof(esp_event_post_instance_t));
461 if (loop->queue == NULL) {
462 ESP_LOGE(TAG, "create event loop queue failed");
463 goto on_err;
464 }
465
466 loop->mutex = xSemaphoreCreateRecursiveMutex();
467 if (loop->mutex == NULL) {
468 ESP_LOGE(TAG, "create event loop mutex failed");
469 goto on_err;
470 }
471
472 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
473 loop->profiling_mutex = xSemaphoreCreateMutex();
474 if (loop->profiling_mutex == NULL) {
475 ESP_LOGE(TAG, "create event loop profiling mutex failed");
476 goto on_err;
477 }
478 #endif
479
480 SLIST_INIT(&(loop->loop_nodes));
481
482 // Create the loop task if requested
483 if (event_loop_args->task_name != NULL) {
484 BaseType_t task_created = xTaskCreatePinnedToCore(esp_event_loop_run_task, event_loop_args->task_name,
485 event_loop_args->task_stack_size, (void*) loop,
486 event_loop_args->task_priority, &(loop->task), event_loop_args->task_core_id);
487
488 if (task_created != pdPASS) {
489 ESP_LOGE(TAG, "create task for loop failed");
490 err = ESP_FAIL;
491 goto on_err;
492 }
493
494 loop->name = event_loop_args->task_name;
495
496 ESP_LOGD(TAG, "created task for loop %p", loop);
497 } else {
498 loop->name = "";
499 loop->task = NULL;
500 }
501
502 loop->running_task = NULL;
503
504 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
505 portENTER_CRITICAL(&s_event_loops_spinlock);
506 SLIST_INSERT_HEAD(&s_event_loops, loop, next);
507 portEXIT_CRITICAL(&s_event_loops_spinlock);
508 #endif
509
510 *event_loop = (esp_event_loop_handle_t) loop;
511
512 ESP_LOGD(TAG, "created event loop %p", loop);
513
514 return ESP_OK;
515
516 on_err:
517 if (loop->queue != NULL) {
518 vQueueDelete(loop->queue);
519 }
520
521 if (loop->mutex != NULL) {
522 vSemaphoreDelete(loop->mutex);
523 }
524
525 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
526 if (loop->profiling_mutex != NULL) {
527 vSemaphoreDelete(loop->profiling_mutex);
528 }
529 #endif
530
531 free(loop);
532
533 return err;
534 }
535
536 // On event lookup performance: The library implements the event list as a linked list, which results to O(n)
537 // lookup time. The test comparing this implementation to the O(lg n) performance of rbtrees
538 // (https://github.com/freebsd/freebsd/blob/master/sys/sys/tree.h)
539 // indicate that the difference is not that substantial, especially considering the additional
540 // pointers per node of rbtrees. Code for the rbtree implementation of the event loop library is archived
541 // in feature/esp_event_loop_library_rbtrees if needed.
esp_event_loop_run(esp_event_loop_handle_t event_loop,TickType_t ticks_to_run)542 esp_err_t esp_event_loop_run(esp_event_loop_handle_t event_loop, TickType_t ticks_to_run)
543 {
544 assert(event_loop);
545
546 esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
547 esp_event_post_instance_t post;
548 TickType_t marker = xTaskGetTickCount();
549 TickType_t end = 0;
550
551 #if (configUSE_16_BIT_TICKS == 1)
552 int32_t remaining_ticks = ticks_to_run;
553 #else
554 int64_t remaining_ticks = ticks_to_run;
555 #endif
556
557 while(xQueueReceive(loop->queue, &post, ticks_to_run) == pdTRUE) {
558 // The event has already been unqueued, so ensure it gets executed.
559 xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
560
561 loop->running_task = xTaskGetCurrentTaskHandle();
562
563 bool exec = false;
564
565 esp_event_handler_node_t *handler, *temp_handler;
566 esp_event_loop_node_t *loop_node, *temp_node;
567 esp_event_base_node_t *base_node, *temp_base;
568 esp_event_id_node_t *id_node, *temp_id_node;
569
570 SLIST_FOREACH_SAFE(loop_node, &(loop->loop_nodes), next, temp_node) {
571 // Execute loop level handlers
572 SLIST_FOREACH_SAFE(handler, &(loop_node->handlers), next, temp_handler) {
573 handler_execute(loop, handler, post);
574 exec |= true;
575 }
576
577 SLIST_FOREACH_SAFE(base_node, &(loop_node->base_nodes), next, temp_base) {
578 if (base_node->base == post.base) {
579 // Execute base level handlers
580 SLIST_FOREACH_SAFE(handler, &(base_node->handlers), next, temp_handler) {
581 handler_execute(loop, handler, post);
582 exec |= true;
583 }
584
585 SLIST_FOREACH_SAFE(id_node, &(base_node->id_nodes), next, temp_id_node) {
586 if (id_node->id == post.id) {
587 // Execute id level handlers
588 SLIST_FOREACH_SAFE(handler, &(id_node->handlers), next, temp_handler) {
589 handler_execute(loop, handler, post);
590 exec |= true;
591 }
592 // Skip to next base node
593 break;
594 }
595 }
596 }
597 }
598 }
599
600 esp_event_base_t base = post.base;
601 int32_t id = post.id;
602
603 post_instance_delete(&post);
604
605 if (ticks_to_run != portMAX_DELAY) {
606 end = xTaskGetTickCount();
607 remaining_ticks -= end - marker;
608 // If the ticks to run expired, return to the caller
609 if (remaining_ticks <= 0) {
610 xSemaphoreGiveRecursive(loop->mutex);
611 break;
612 } else {
613 marker = end;
614 }
615 }
616
617 loop->running_task = NULL;
618
619 xSemaphoreGiveRecursive(loop->mutex);
620
621 if (!exec) {
622 // No handlers were registered, not even loop/base level handlers
623 ESP_LOGD(TAG, "no handlers have been registered for event %s:%d posted to loop %p", base, id, event_loop);
624 }
625 }
626
627 return ESP_OK;
628 }
629
esp_event_loop_delete(esp_event_loop_handle_t event_loop)630 esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
631 {
632 assert(event_loop);
633
634 esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
635 SemaphoreHandle_t loop_mutex = loop->mutex;
636 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
637 SemaphoreHandle_t loop_profiling_mutex = loop->profiling_mutex;
638 #endif
639
640 xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
641
642 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
643 xSemaphoreTake(loop->profiling_mutex, portMAX_DELAY);
644 portENTER_CRITICAL(&s_event_loops_spinlock);
645 SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, next);
646 portEXIT_CRITICAL(&s_event_loops_spinlock);
647 #endif
648
649 // Delete the task if it was created
650 if (loop->task != NULL) {
651 vTaskDelete(loop->task);
652 }
653
654 // Remove all registered events and handlers in the loop
655 esp_event_loop_node_t *it, *temp;
656 SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
657 loop_node_remove_all_handler(it);
658 SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
659 free(it);
660 }
661
662 // Drop existing posts on the queue
663 esp_event_post_instance_t post;
664 while(xQueueReceive(loop->queue, &post, 0) == pdTRUE) {
665 post_instance_delete(&post);
666 }
667
668 // Cleanup loop
669 vQueueDelete(loop->queue);
670 free(loop);
671 // Free loop mutex before deleting
672 xSemaphoreGiveRecursive(loop_mutex);
673 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
674 xSemaphoreGive(loop_profiling_mutex);
675 vSemaphoreDelete(loop_profiling_mutex);
676 #endif
677 vSemaphoreDelete(loop_mutex);
678
679 ESP_LOGD(TAG, "deleted loop %p", (void*) event_loop);
680
681 return ESP_OK;
682 }
683
esp_event_handler_register_with_internal(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,esp_event_handler_t event_handler,void * event_handler_arg,esp_event_handler_instance_context_t ** handler_ctx_arg,bool legacy)684 esp_err_t esp_event_handler_register_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
685 int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
686 esp_event_handler_instance_context_t** handler_ctx_arg, bool legacy)
687 {
688 assert(event_loop);
689 assert(event_handler);
690
691 if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
692 ESP_LOGE(TAG, "registering to any event base with specific id unsupported");
693 return ESP_ERR_INVALID_ARG;
694 }
695
696 esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
697
698 if (event_base == ESP_EVENT_ANY_BASE) {
699 event_base = esp_event_any_base;
700 }
701
702 esp_err_t err = ESP_OK;
703
704 xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
705
706 esp_event_loop_node_t *loop_node = NULL, *last_loop_node = NULL;
707
708 SLIST_FOREACH(loop_node, &(loop->loop_nodes), next) {
709 last_loop_node = loop_node;
710 }
711
712 bool is_loop_level_handler = (event_base == esp_event_any_base) && (event_id == ESP_EVENT_ANY_ID);
713
714 if (!last_loop_node ||
715 (last_loop_node && !SLIST_EMPTY(&(last_loop_node->base_nodes)) && is_loop_level_handler)) {
716 loop_node = (esp_event_loop_node_t*) calloc(1, sizeof(*loop_node));
717
718 if (!loop_node) {
719 ESP_LOGE(TAG, "alloc for new loop node failed");
720 err = ESP_ERR_NO_MEM;
721 goto on_err;
722 }
723
724 SLIST_INIT(&(loop_node->handlers));
725 SLIST_INIT(&(loop_node->base_nodes));
726
727 err = loop_node_add_handler(loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
728
729 if (err == ESP_OK) {
730 if (!last_loop_node) {
731 SLIST_INSERT_HEAD(&(loop->loop_nodes), loop_node, next);
732 }
733 else {
734 SLIST_INSERT_AFTER(last_loop_node, loop_node, next);
735 }
736 } else {
737 free(loop_node);
738 }
739 }
740 else {
741 err = loop_node_add_handler(last_loop_node, event_base, event_id, event_handler, event_handler_arg, handler_ctx_arg, legacy);
742 }
743
744 on_err:
745 xSemaphoreGiveRecursive(loop->mutex);
746 return err;
747 }
748
esp_event_handler_register_with(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,esp_event_handler_t event_handler,void * event_handler_arg)749 esp_err_t esp_event_handler_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
750 int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg)
751 {
752 return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, NULL, true);
753 }
754
esp_event_handler_instance_register_with(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,esp_event_handler_t event_handler,void * event_handler_arg,esp_event_handler_instance_t * handler_ctx_arg)755 esp_err_t esp_event_handler_instance_register_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
756 int32_t event_id, esp_event_handler_t event_handler, void* event_handler_arg,
757 esp_event_handler_instance_t* handler_ctx_arg)
758 {
759 return esp_event_handler_register_with_internal(event_loop, event_base, event_id, event_handler, event_handler_arg, (esp_event_handler_instance_context_t**) handler_ctx_arg, false);
760 }
761
esp_event_handler_unregister_with_internal(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,esp_event_handler_instance_context_t * handler_ctx,bool legacy)762 esp_err_t esp_event_handler_unregister_with_internal(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
763 int32_t event_id, esp_event_handler_instance_context_t* handler_ctx, bool legacy)
764 {
765 assert(event_loop);
766 assert(handler_ctx);
767
768 if (event_base == ESP_EVENT_ANY_BASE && event_id != ESP_EVENT_ANY_ID) {
769 ESP_LOGE(TAG, "unregistering to any event base with specific id unsupported");
770 return ESP_FAIL;
771 }
772
773 if (event_base == ESP_EVENT_ANY_BASE) {
774 event_base = esp_event_any_base;
775 }
776
777 esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
778
779 xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
780
781 esp_event_loop_node_t *it, *temp;
782
783 SLIST_FOREACH_SAFE(it, &(loop->loop_nodes), next, temp) {
784 esp_err_t res = loop_node_remove_handler(it, event_base, event_id, handler_ctx, legacy);
785
786 if (res == ESP_OK && SLIST_EMPTY(&(it->base_nodes)) && SLIST_EMPTY(&(it->handlers))) {
787 SLIST_REMOVE(&(loop->loop_nodes), it, esp_event_loop_node, next);
788 free(it);
789 break;
790 }
791 }
792
793 xSemaphoreGiveRecursive(loop->mutex);
794
795 return ESP_OK;
796 }
797
esp_event_handler_unregister_with(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,esp_event_handler_t event_handler)798 esp_err_t esp_event_handler_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
799 int32_t event_id, esp_event_handler_t event_handler)
800 {
801 esp_event_handler_instance_context_t local_handler_ctx;
802 local_handler_ctx.handler = event_handler;
803 local_handler_ctx.arg = NULL;
804
805 return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, &local_handler_ctx, true);
806 }
807
esp_event_handler_instance_unregister_with(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,esp_event_handler_instance_t handler_ctx_arg)808 esp_err_t esp_event_handler_instance_unregister_with(esp_event_loop_handle_t event_loop, esp_event_base_t event_base,
809 int32_t event_id, esp_event_handler_instance_t handler_ctx_arg)
810 {
811 if (!handler_ctx_arg) return ESP_ERR_INVALID_ARG;
812
813 return esp_event_handler_unregister_with_internal(event_loop, event_base, event_id, (esp_event_handler_instance_context_t*) handler_ctx_arg, false);
814 }
815
esp_event_post_to(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,void * event_data,size_t event_data_size,TickType_t ticks_to_wait)816 esp_err_t esp_event_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
817 void* event_data, size_t event_data_size, TickType_t ticks_to_wait)
818 {
819 assert(event_loop);
820
821 if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
822 return ESP_ERR_INVALID_ARG;
823 }
824
825 esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
826
827 esp_event_post_instance_t post;
828 memset((void*)(&post), 0, sizeof(post));
829
830 if (event_data != NULL && event_data_size != 0) {
831 // Make persistent copy of event data on heap.
832 void* event_data_copy = calloc(1, event_data_size);
833
834 if (event_data_copy == NULL) {
835 return ESP_ERR_NO_MEM;
836 }
837
838 memcpy(event_data_copy, event_data, event_data_size);
839 #if CONFIG_ESP_EVENT_POST_FROM_ISR
840 post.data.ptr = event_data_copy;
841 post.data_allocated = true;
842 post.data_set = true;
843 #else
844 post.data = event_data_copy;
845 #endif
846 }
847 post.base = event_base;
848 post.id = event_id;
849
850 BaseType_t result = pdFALSE;
851
852 // Find the task that currently executes the loop. It is safe to query loop->task since it is
853 // not mutated since loop creation. ENSURE THIS REMAINS TRUE.
854 if (loop->task == NULL) {
855 // The loop has no dedicated task. Find out what task is currently running it.
856 result = xSemaphoreTakeRecursive(loop->mutex, ticks_to_wait);
857
858 if (result == pdTRUE) {
859 if (loop->running_task != xTaskGetCurrentTaskHandle()) {
860 xSemaphoreGiveRecursive(loop->mutex);
861 result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
862 } else {
863 xSemaphoreGiveRecursive(loop->mutex);
864 result = xQueueSendToBack(loop->queue, &post, 0);
865 }
866 }
867 } else {
868 // The loop has a dedicated task.
869 if (loop->task != xTaskGetCurrentTaskHandle()) {
870 result = xQueueSendToBack(loop->queue, &post, ticks_to_wait);
871 } else {
872 result = xQueueSendToBack(loop->queue, &post, 0);
873 }
874 }
875
876 if (result != pdTRUE) {
877 post_instance_delete(&post);
878
879 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
880 atomic_fetch_add(&loop->events_dropped, 1);
881 #endif
882 return ESP_ERR_TIMEOUT;
883 }
884
885 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
886 atomic_fetch_add(&loop->events_recieved, 1);
887 #endif
888
889 return ESP_OK;
890 }
891
892 #if CONFIG_ESP_EVENT_POST_FROM_ISR
esp_event_isr_post_to(esp_event_loop_handle_t event_loop,esp_event_base_t event_base,int32_t event_id,void * event_data,size_t event_data_size,BaseType_t * task_unblocked)893 esp_err_t esp_event_isr_post_to(esp_event_loop_handle_t event_loop, esp_event_base_t event_base, int32_t event_id,
894 void* event_data, size_t event_data_size, BaseType_t* task_unblocked)
895 {
896 assert(event_loop);
897
898 if (event_base == ESP_EVENT_ANY_BASE || event_id == ESP_EVENT_ANY_ID) {
899 return ESP_ERR_INVALID_ARG;
900 }
901
902 esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
903
904 esp_event_post_instance_t post;
905 memset((void*)(&post), 0, sizeof(post));
906
907 if (event_data_size > sizeof(post.data.val)) {
908 return ESP_ERR_INVALID_ARG;
909 }
910
911 if (event_data != NULL && event_data_size != 0) {
912 memcpy((void*)(&(post.data.val)), event_data, event_data_size);
913 post.data_allocated = false;
914 post.data_set = true;
915 }
916 post.base = event_base;
917 post.id = event_id;
918
919 BaseType_t result = pdFALSE;
920
921 // Post the event from an ISR,
922 result = xQueueSendToBackFromISR(loop->queue, &post, task_unblocked);
923
924 if (result != pdTRUE) {
925 post_instance_delete(&post);
926
927 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
928 atomic_fetch_add(&loop->events_dropped, 1);
929 #endif
930 return ESP_FAIL;
931 }
932
933 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
934 atomic_fetch_add(&loop->events_recieved, 1);
935 #endif
936
937 return ESP_OK;
938 }
939 #endif
940
esp_event_dump(FILE * file)941 esp_err_t esp_event_dump(FILE* file)
942 {
943 #ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
944 assert(file);
945
946 esp_event_loop_instance_t* loop_it;
947 esp_event_loop_node_t *loop_node_it;
948 esp_event_base_node_t* base_node_it;
949 esp_event_id_node_t* id_node_it;
950 esp_event_handler_node_t* handler_it;
951
952 // Allocate memory for printing
953 int sz = esp_event_dump_prepare();
954 char* buf = calloc(sz, sizeof(char));
955 char* dst = buf;
956
957 char id_str_buf[20];
958
959 // Print info to buffer
960 portENTER_CRITICAL(&s_event_loops_spinlock);
961
962 SLIST_FOREACH(loop_it, &s_event_loops, next) {
963 uint32_t events_recieved, events_dropped;
964
965 events_recieved = atomic_load(&loop_it->events_recieved);
966 events_dropped = atomic_load(&loop_it->events_dropped);
967
968 PRINT_DUMP_INFO(dst, sz, LOOP_DUMP_FORMAT, loop_it, loop_it->task != NULL ? loop_it->name : "none" ,
969 events_recieved, events_dropped);
970
971 int sz_bak = sz;
972
973 SLIST_FOREACH(loop_node_it, &(loop_it->loop_nodes), next) {
974 SLIST_FOREACH(handler_it, &(loop_node_it->handlers), next) {
975 PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, "ESP_EVENT_ANY_BASE",
976 "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
977 }
978
979 SLIST_FOREACH(base_node_it, &(loop_node_it->base_nodes), next) {
980 SLIST_FOREACH(handler_it, &(base_node_it->handlers), next) {
981 PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
982 "ESP_EVENT_ANY_ID", handler_it->invoked, handler_it->time);
983 }
984
985 SLIST_FOREACH(id_node_it, &(base_node_it->id_nodes), next) {
986 SLIST_FOREACH(handler_it, &(id_node_it->handlers), next) {
987 memset(id_str_buf, 0, sizeof(id_str_buf));
988 snprintf(id_str_buf, sizeof(id_str_buf), "%d", id_node_it->id);
989
990 PRINT_DUMP_INFO(dst, sz, HANDLER_DUMP_FORMAT, handler_it->handler_ctx->handler, base_node_it->base ,
991 id_str_buf, handler_it->invoked, handler_it->time);
992 }
993 }
994 }
995 }
996
997 // No handlers registered for this loop
998 if (sz == sz_bak) {
999 PRINT_DUMP_INFO(dst, sz, " NO HANDLERS REGISTERED\n");
1000 }
1001 }
1002
1003 portEXIT_CRITICAL(&s_event_loops_spinlock);
1004
1005 // Print the contents of the buffer to the file
1006 fprintf(file, buf);
1007
1008 // Free the allocated buffer
1009 free(buf);
1010 #endif
1011 return ESP_OK;
1012 }
1013