1 // Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15
16 /*
17 Architecture:
18
19 The whole SDIO slave peripheral consists of three parts: the registers (including the control registers of
20 interrupts and shared registers), the sending FIFO and the receiving FIFO. A document ``esp_slave_protocol.rst``
21 describes the functionality of the peripheral detailedly.
22 The host can access only one of those parts at once, and the hardware functions of these parts are totally
23 independent. Hence this driver is designed into these three independent parts. The shared registers are quite
24 simple. As well as the interrupts: when a slave interrupt is written by the host, the slave gets an interrupt;
25 when one of the host interrupt bits is active, slave hardware output interrupt signals on the DAT1 line.
26
27 For the FIFOs, the peripheral provides counters as registers so that the host can always know whether the slave
28 is ready to send/receive data. The driver resets the counters during initialization, and the host should somehow
29 inform the slave to reset the counters again if it should reboot (or lose the counter value for some reasons).
30 Then the host can read/write the FIFOs by CMD53 commands according to the counters.
31
32 Since we don't want to copy all the data from the buffer each time we use sending/receiving buffer,
33 the buffers are directly loaded onto the sending/receiving linked-list and taken off only after use.
34 Hence the driver takes ownership of the buffer when the buffer is fed to the driver.
35
36 The driver returns the ownership of buffers when a "finish" function is called. When the hardware finishes
37 the sending/receiving of a buffer, the ISR is invoked and it goes through the linked-list to see how many buffers
38 are freed after last interrupt, and send corresponding signals to the app.
39
40 The driver of FIFOs works as below:
41
42 1. The receive driver requires application to "register" a buffer before it's used. The driver
43 dynamically allocate a linked-list descriptor for the buffer, and return the descriptor as a handle
44 to the app.
45
46 Each time the app asks to receive by a buffer, the descriptor of the buffer is loaded onto the linked-list,
47 and the counter of receiving buffers is increased so that the host will know this by the receiving interrupt.
48 The hardware will automatically go through the linked list and write data into the buffers loaded on the
49 list.
50
51 The receiving driver sends a counting semaphore to the app for each buffer finished receiving. A task can only
52 check the linked list and fetch one finished buffer for a received semaphore.
53
54 2. The sending driver is slightly different due to different hardware working styles.
55 (TODO: re-write this part if the stitch mode is released)
56 The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified
57 until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during
58 the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from
59 the one in ``freertos/`` folder) holding descriptors to solve this:
60
61 1. The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during
62 initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one
63 to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one:
64 now the descriptor is in a ring.
65
66 2. The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor
67 indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a
68 ring-like linked-list. The app never touches the part of linked-list being used by the hardware.
69
70 3. When the hardware needs some data to send, it automatically pick a part of connected descriptors. According to the mode:
71 - Buffer mode: only pick the next one of the last sent one;
72 - Stream mode: pick the one above to the latest one.
73
74 The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so
75 that it looks like just a linear linked-list rather than a ring to the hardware.
76
77 4. The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should
78 start (in PACKET_MODE).
79
80 5. When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all
81 the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The
82 driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again.
83 */
84
85
86
87 #include <string.h>
88 #include "driver/sdio_slave.h"
89 #include "soc/sdio_slave_periph.h"
90 #include "esp32/rom/lldesc.h"
91 #include "esp_log.h"
92 #include "esp_intr_alloc.h"
93 #include "esp_osal/esp_osal.h"
94 #include "soc/soc_memory_layout.h"
95 #include "soc/gpio_periph.h"
96 #include "hal/cpu_hal.h"
97 #include "esp_osal/semphr.h"
98 #include "driver/periph_ctrl.h"
99 #include "driver/gpio.h"
100 #include "hal/sdio_slave_hal.h"
101 #include "hal/gpio_hal.h"
102
103
104 #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
105 SDIO_SLAVE_LOGE("%s", str);\
106 return ret_val;\
107 } }while (0)
108
109 static const char TAG[] = "sdio_slave";
110
111 #define SDIO_SLAVE_LOGE(s, ...) ESP_LOGE(TAG, "%s(%d): "s, __FUNCTION__,__LINE__,##__VA_ARGS__)
112 #define SDIO_SLAVE_LOGW(s, ...) ESP_LOGW(TAG, "%s: "s, __FUNCTION__,##__VA_ARGS__)
113
114
115 // sdio_slave_buf_handle_t is of type recv_desc_t*;
116 typedef struct recv_desc_s{
117 union {
118 struct {
119 // the third word, pointer to next desc, is shared with the tailq entry.
120 sdio_slave_hal_recv_desc_t hal_desc;
121 // when the forth word is used (not NULL), means the tailq is used, not in the receiving state.
122 uint32_t not_receiving;
123 };
124 struct {
125 // first 3 WORDs of this struct is defined by and compatible to the DMA link list format.
126 uint32_t _reserved0;
127 uint32_t _reserved1;
128 TAILQ_ENTRY(recv_desc_s) te; // tailq used to store the registered descriptors.
129 };
130 };
131 } recv_desc_t;
132
133
134 typedef TAILQ_HEAD(recv_tailq_head_s, recv_desc_s) recv_tailq_t;
135
136 typedef struct {
137 sdio_slave_config_t config;
138 sdio_slave_context_t *hal;
139 intr_handle_t intr_handle; //allocated interrupt handle
140 /*------- events ---------------*/
141 union {
142 SemaphoreHandle_t events[9]; // 0-7 for gp intr
143 struct {
144 SemaphoreHandle_t _events[8];
145 SemaphoreHandle_t recv_event; // 8 for recv
146 };
147 };
148 portMUX_TYPE reg_spinlock;
149 /*------- sending ---------------*/
150 //desc in the send_link_list are temporary, taken information and space from the ringbuf, return to ringbuf after use.
151 SemaphoreHandle_t remain_cnt;
152 portMUX_TYPE write_spinlock;
153 QueueHandle_t ret_queue;
154 /*------- receiving ---------------*/
155 recv_tailq_t recv_reg_list; // removed from the link list, registered but not used now
156 portMUX_TYPE recv_spinlock;
157 } sdio_context_t;
158
159 #define CONTEXT_INIT_VAL { \
160 .intr_handle = NULL, \
161 .hal = NULL, \
162 /*------- events ---------------*/ \
163 .events = {}, \
164 .reg_spinlock = portMUX_INITIALIZER_UNLOCKED, \
165 /*------- sending ---------------*/ \
166 .ret_queue = NULL, \
167 .write_spinlock = portMUX_INITIALIZER_UNLOCKED, \
168 /*------- receiving ---------------*/ \
169 .recv_reg_list = TAILQ_HEAD_INITIALIZER(context.recv_reg_list), \
170 .recv_spinlock = portMUX_INITIALIZER_UNLOCKED, \
171 }
172
173 static sdio_context_t context = CONTEXT_INIT_VAL;
174
175 static void sdio_intr(void*);
176 static void sdio_intr_host(void*);
177 static void sdio_intr_send(void*);
178 static void sdio_intr_recv(void*);
179
180 static esp_err_t send_flush_data(void);
181 static esp_err_t recv_flush_data(void);
182
183 static inline void critical_enter_recv(void);
184 static inline void critical_exit_recv(void);
185
186 static void deinit_context(void);
187
show_ll(lldesc_t * item)188 static inline void show_ll(lldesc_t *item)
189 {
190 ESP_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
191 ESP_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next);
192 }
193
dump_ll(lldesc_t * queue)194 static void __attribute((unused)) dump_ll(lldesc_t *queue)
195 {
196 int cnt = 0;
197 lldesc_t *item = queue;
198 while (item != NULL) {
199 cnt++;
200 show_ll(item);
201 item = STAILQ_NEXT(item, qe);
202 }
203 ESP_EARLY_LOGI(TAG, "total: %d", cnt);
204 }
205
deinit_context(void)206 static inline void deinit_context(void)
207 {
208 context.config = (sdio_slave_config_t){};
209 for(int i = 0; i < 9; i++) {
210 if (context.events[i] != NULL) {
211 vSemaphoreDelete(context.events[i]);
212 context.events[i] = NULL;
213 }
214 }
215 if (context.ret_queue != NULL) {
216 vQueueDelete(context.ret_queue);
217 context.ret_queue = NULL;
218 }
219 if (context.remain_cnt != NULL) vSemaphoreDelete(context.remain_cnt);
220 free(context.hal->send_desc_queue.data);
221 context.hal->send_desc_queue.data = NULL;
222 free(context.hal);
223 context.hal = NULL;
224 }
225
init_context(const sdio_slave_config_t * config)226 static esp_err_t init_context(const sdio_slave_config_t *config)
227 {
228 SDIO_SLAVE_CHECK(*(uint32_t*)&context.config == 0, "sdio slave already initialized", ESP_ERR_INVALID_STATE);
229 context = (sdio_context_t)CONTEXT_INIT_VAL;
230 context.config = *config;
231
232 //initialize and configure the HAL
233 context.hal = (sdio_slave_context_t*)heap_caps_calloc(sizeof(sdio_slave_context_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
234 if (context.hal == NULL) goto no_mem;
235
236 context.hal->sending_mode = config->sending_mode;
237 context.hal->timing = config->timing;
238 context.hal->send_queue_size = config->send_queue_size;
239 context.hal->recv_buffer_size = config->recv_buffer_size;
240 //initialize ringbuffer resources
241 sdio_ringbuf_t *buf = &(context.hal->send_desc_queue);
242 //one item is not used.
243 buf->size = SDIO_SLAVE_SEND_DESC_SIZE * (config->send_queue_size+1);
244 buf->data = (uint8_t*)heap_caps_malloc(buf->size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
245 if (buf->data == NULL) goto no_mem;
246
247 sdio_slave_hal_init(context.hal);
248
249 // in theory we can queue infinite buffers in the linked list, but for multi-core reason we have to use a queue to
250 // count the finished buffers.
251 context.recv_event = xSemaphoreCreateCounting(UINT32_MAX, 0);
252 for(int i = 0; i < 9; i++) {
253 if (i < 8) {
254 context.events[i] = xSemaphoreCreateBinary();
255 } //for 8, already created.
256 if (context.events[i] == NULL) {
257 SDIO_SLAVE_LOGE("event initialize failed");
258 goto no_mem;
259 }
260 }
261
262 context.remain_cnt = xSemaphoreCreateCounting(context.config.send_queue_size, context.config.send_queue_size);
263 if (context.remain_cnt == NULL) goto no_mem;
264
265 context.ret_queue = xQueueCreate(config->send_queue_size, sizeof(void*));
266 if (context.ret_queue == NULL) goto no_mem;
267
268 return ESP_OK;
269
270 no_mem:
271 deinit_context();
272 return ESP_ERR_NO_MEM;
273 }
274
configure_pin(int pin,uint32_t func,bool pullup)275 static void configure_pin(int pin, uint32_t func, bool pullup)
276 {
277 const int sdmmc_func = func;
278 const int drive_strength = 3;
279 assert(pin != -1);
280 uint32_t reg = GPIO_PIN_MUX_REG[pin];
281 assert(reg != UINT32_MAX);
282
283 PIN_INPUT_ENABLE(reg);
284 gpio_hal_iomux_func_sel(reg, sdmmc_func);
285 PIN_SET_DRV(reg, drive_strength);
286 gpio_pulldown_dis(pin);
287 if (pullup) {
288 gpio_pullup_en(pin);
289 }
290 }
291
sdio_slave_hw_init(sdio_slave_config_t * config)292 static inline esp_err_t sdio_slave_hw_init(sdio_slave_config_t *config)
293 {
294 //initialize pin
295 const sdio_slave_slot_info_t *slot = &sdio_slave_slot_info[1];
296
297 bool pullup = config->flags & SDIO_SLAVE_FLAG_INTERNAL_PULLUP;
298 configure_pin(slot->clk_gpio, slot->func, false); //clk doesn't need a pullup
299 configure_pin(slot->cmd_gpio, slot->func, pullup);
300 configure_pin(slot->d0_gpio, slot->func, pullup);
301 if ((config->flags & SDIO_SLAVE_FLAG_HOST_INTR_DISABLED)==0) {
302 configure_pin(slot->d1_gpio, slot->func, pullup);
303 }
304 if ((config->flags & SDIO_SLAVE_FLAG_DAT2_DISABLED)==0) {
305 configure_pin(slot->d2_gpio, slot->func, pullup);
306 }
307 configure_pin(slot->d3_gpio, slot->func, pullup);
308
309 //enable module and config
310 periph_module_reset(PERIPH_SDIO_SLAVE_MODULE);
311 periph_module_enable(PERIPH_SDIO_SLAVE_MODULE);
312
313 sdio_slave_hal_hw_init(context.hal);
314
315 return ESP_OK;
316 }
317
recover_pin(int pin,int sdio_func)318 static void recover_pin(int pin, int sdio_func)
319 {
320 uint32_t reg = GPIO_PIN_MUX_REG[pin];
321 assert(reg != UINT32_MAX);
322
323 int func = REG_GET_FIELD(reg, MCU_SEL);
324 if (func == sdio_func) {
325 gpio_set_direction(pin, GPIO_MODE_INPUT);
326 gpio_hal_iomux_func_sel(reg, PIN_FUNC_GPIO);
327 }
328 }
329
sdio_slave_hw_deinit(void)330 static void sdio_slave_hw_deinit(void)
331 {
332 const sdio_slave_slot_info_t *slot = &sdio_slave_slot_info[1];
333 recover_pin(slot->clk_gpio, slot->func);
334 recover_pin(slot->cmd_gpio, slot->func);
335 recover_pin(slot->d0_gpio, slot->func);
336 recover_pin(slot->d1_gpio, slot->func);
337 recover_pin(slot->d2_gpio, slot->func);
338 recover_pin(slot->d3_gpio, slot->func);
339 }
340
sdio_slave_initialize(sdio_slave_config_t * config)341 esp_err_t sdio_slave_initialize(sdio_slave_config_t *config)
342 {
343 esp_err_t r;
344 intr_handle_t intr_handle = NULL;
345 const int flags = 0;
346 r = esp_intr_alloc(ETS_SLC0_INTR_SOURCE, flags, sdio_intr, NULL, &intr_handle);
347 if (r != ESP_OK) return r;
348
349 r = init_context(config);
350 if (r != ESP_OK) return r;
351 context.intr_handle = intr_handle;
352
353 r = sdio_slave_hw_init(config);
354 if (r != ESP_OK) return r;
355
356 sdio_slave_reset();
357 return ESP_OK;
358 }
359
sdio_slave_deinit(void)360 void sdio_slave_deinit(void)
361 {
362 sdio_slave_hw_deinit();
363
364 //unregister all buffers registered but returned (not loaded)
365 recv_desc_t *temp_desc;
366 recv_desc_t *desc;
367 TAILQ_FOREACH_SAFE(desc, &context.recv_reg_list, te, temp_desc) {
368 TAILQ_REMOVE(&context.recv_reg_list, desc, te);
369 free(desc);
370 }
371 //unregister all buffers that is loaded and not returned
372 while (1) {
373 desc = (recv_desc_t*)sdio_slave_hal_recv_unload_desc(context.hal);
374 if (desc == NULL) break;
375 free(desc);
376 }
377 esp_err_t ret = esp_intr_free(context.intr_handle);
378 assert(ret==ESP_OK);
379 context.intr_handle = NULL;
380 deinit_context();
381 }
382
sdio_slave_start(void)383 esp_err_t sdio_slave_start(void)
384 {
385 esp_err_t ret;
386 sdio_slave_hostint_t intr = (sdio_slave_hostint_t)UINT32_MAX;
387 sdio_slave_hal_hostint_clear(context.hal, &intr);
388 ret = sdio_slave_hal_send_start(context.hal);
389 if (ret != ESP_OK) return ret;
390
391 critical_enter_recv();
392 sdio_slave_hal_recv_start(context.hal);
393 critical_exit_recv();
394 ret = ESP_OK;
395 if (ret != ESP_OK) return ret;
396
397 sdio_slave_hal_set_ioready(context.hal, true);
398 return ESP_OK;
399 }
400
sdio_slave_reset(void)401 esp_err_t sdio_slave_reset(void)
402 {
403 esp_err_t err;
404 err = send_flush_data();
405 if (err != ESP_OK) {
406 return err;
407 }
408
409 err = sdio_slave_hal_send_reset_counter(context.hal);
410 if (err != ESP_OK) {
411 return err;
412 }
413
414 err = recv_flush_data();
415 if (err != ESP_OK) {
416 return err;
417 }
418
419 critical_enter_recv();
420 sdio_slave_hal_recv_reset_counter(context.hal);
421 critical_exit_recv();
422 err = ESP_OK;
423 return err;
424 }
425
sdio_slave_stop(void)426 void sdio_slave_stop(void)
427 {
428 sdio_slave_hal_set_ioready(context.hal, false);
429 sdio_slave_hal_send_stop(context.hal);
430 sdio_slave_hal_recv_stop(context.hal);
431 }
432
sdio_intr(void * arg)433 static void sdio_intr(void* arg)
434 {
435 sdio_intr_send(arg);
436 sdio_intr_recv(arg);
437 sdio_intr_host(arg);
438 }
439
440 /*---------------------------------------------------------------------------
441 * Host
442 *--------------------------------------------------------------------------*/
sdio_intr_host(void * arg)443 static void sdio_intr_host(void* arg)
444 {
445 sdio_slave_ll_slvint_t int_val;
446 sdio_slave_hal_slvint_fetch_clear(context.hal, &int_val);
447 portBASE_TYPE yield = pdFALSE;
448 for(int i = 0; i < 8; i++) {
449 if (BIT(i) & int_val) {
450 if (context.config.event_cb != NULL) (*context.config.event_cb)(i);
451 xSemaphoreGiveFromISR(context.events[i], &yield);
452 }
453 }
454 if (yield) portYIELD_FROM_ISR();
455 }
456
sdio_slave_wait_int(int pos,TickType_t wait)457 esp_err_t sdio_slave_wait_int(int pos, TickType_t wait)
458 {
459 SDIO_SLAVE_CHECK(pos >= 0 && pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
460 return xSemaphoreTake(context.events[pos], wait);
461 }
462
sdio_slave_read_reg(int pos)463 uint8_t sdio_slave_read_reg(int pos)
464 {
465 if (pos >= 28 && pos <= 31) SDIO_SLAVE_LOGW("%s: interrupt reg, for reference", __FUNCTION__);
466 if (pos < 0 || pos >= 64) SDIO_SLAVE_LOGE("read register address wrong");
467 return sdio_slave_hal_host_get_reg(context.hal, pos);
468 }
469
sdio_slave_write_reg(int pos,uint8_t reg)470 esp_err_t sdio_slave_write_reg(int pos, uint8_t reg)
471 {
472 if (pos >= 28 && pos <= 31) {
473 SDIO_SLAVE_LOGE("interrupt reg, please use sdio_slave_clear_int");
474 return ESP_ERR_INVALID_ARG;
475 }
476 if (pos < 0 || pos >= 64) {
477 SDIO_SLAVE_LOGE("write register address wrong");
478 return ESP_ERR_INVALID_ARG;
479 }
480
481 portENTER_CRITICAL(&context.reg_spinlock);
482 sdio_slave_hal_host_set_reg(context.hal, pos, reg);
483 portEXIT_CRITICAL(&context.reg_spinlock);
484 return ESP_OK;
485 }
486
sdio_slave_get_host_intena(void)487 sdio_slave_hostint_t sdio_slave_get_host_intena(void)
488 {
489 sdio_slave_hostint_t host_int;
490 sdio_slave_hal_hostint_get_ena(context.hal, &host_int);
491 return host_int;
492 }
493
sdio_slave_set_host_intena(sdio_slave_hostint_t mask)494 void sdio_slave_set_host_intena(sdio_slave_hostint_t mask)
495 {
496 sdio_slave_hal_hostint_set_ena(context.hal, &mask);
497 }
498
sdio_slave_clear_host_int(sdio_slave_hostint_t mask)499 void sdio_slave_clear_host_int(sdio_slave_hostint_t mask)
500 {
501 sdio_slave_hal_hostint_clear(context.hal, &mask);
502 }
503
get_hostint_by_pos(int pos)504 static inline sdio_slave_hostint_t get_hostint_by_pos(int pos)
505 {
506 return (sdio_slave_hostint_t)BIT(pos);
507 }
508
sdio_slave_send_host_int(uint8_t pos)509 esp_err_t sdio_slave_send_host_int(uint8_t pos)
510 {
511 SDIO_SLAVE_CHECK(pos < 8, "interrupt num invalid", ESP_ERR_INVALID_ARG);
512 sdio_slave_hostint_t intr = get_hostint_by_pos(pos);
513 sdio_slave_hal_hostint_send(context.hal, &intr);
514 return ESP_OK;
515 }
516
517 /*---------------------------------------------------------------------------
518 * Send
519 *--------------------------------------------------------------------------*/
520
521 /* The link list is handled in the app, while counter and pointer processed in ISR.
522 * Driver abuse rx_done bit to invoke ISR.
523 * If driver is stopped, the link list is stopped as well as the ISR invoker.
524 */
525
sdio_intr_send(void * arg)526 static void sdio_intr_send(void* arg)
527 {
528 ESP_EARLY_LOGV(TAG, "intr_send");
529 portBASE_TYPE yield = pdFALSE;
530
531 // this interrupt is abused to get ISR invoked by app
532 sdio_slave_hal_send_handle_isr_invoke(context.hal);
533
534 uint32_t returned_cnt;
535 if (sdio_slave_hal_send_eof_happened(context.hal)) {
536 portBASE_TYPE ret = pdTRUE;
537
538 esp_err_t err;
539 while (1) {
540 void *finished_arg;
541 err = sdio_slave_hal_send_get_next_finished_arg(context.hal, &finished_arg, &returned_cnt);
542 if (err != ESP_OK) {
543 break;
544 }
545
546 assert(returned_cnt == 0);
547 ESP_EARLY_LOGV(TAG, "end: %x", finished_arg);
548 ret = xQueueSendFromISR(context.ret_queue, &finished_arg, &yield);
549 assert(ret == pdTRUE);
550 }
551 //get_next_finished_arg returns the total amount of returned descs.
552 for(size_t i = 0; i < returned_cnt; i++) {
553 portBASE_TYPE ret = xSemaphoreGiveFromISR(context.remain_cnt, &yield);
554 assert(ret == pdTRUE);
555 }
556 }
557
558 sdio_slave_hal_send_new_packet_if_exist(context.hal);
559
560 if (yield) portYIELD_FROM_ISR();
561 }
562
sdio_slave_send_queue(uint8_t * addr,size_t len,void * arg,TickType_t wait)563 esp_err_t sdio_slave_send_queue(uint8_t* addr, size_t len, void* arg, TickType_t wait)
564 {
565 SDIO_SLAVE_CHECK(len > 0, "len <= 0", ESP_ERR_INVALID_ARG);
566 SDIO_SLAVE_CHECK(esp_ptr_dma_capable(addr) && (uint32_t)addr%4==0, "buffer to send should be DMA capable and 32-bit aligned",
567 ESP_ERR_INVALID_ARG);
568
569 portBASE_TYPE cnt_ret = xSemaphoreTake(context.remain_cnt, wait);
570 if (cnt_ret != pdTRUE) return ESP_ERR_TIMEOUT;
571
572 portENTER_CRITICAL(&context.write_spinlock);
573 esp_err_t ret = sdio_slave_hal_send_queue(context.hal, addr, len, arg);
574 portEXIT_CRITICAL(&context.write_spinlock);
575 if (ret != ESP_OK) return ret;
576
577 return ESP_OK;
578 }
579
sdio_slave_send_get_finished(void ** out_arg,TickType_t wait)580 esp_err_t sdio_slave_send_get_finished(void** out_arg, TickType_t wait)
581 {
582 void* arg = NULL;
583 portBASE_TYPE err = xQueueReceive(context.ret_queue, &arg, wait);
584 if (out_arg) *out_arg = arg;
585 if (err != pdTRUE) return ESP_ERR_TIMEOUT;
586 return ESP_OK;
587 }
588
sdio_slave_transmit(uint8_t * addr,size_t len)589 esp_err_t sdio_slave_transmit(uint8_t* addr, size_t len)
590 {
591 uint32_t timestamp = cpu_hal_get_cycle_count();
592 uint32_t ret_stamp;
593
594 esp_err_t err = sdio_slave_send_queue(addr, len, (void*)timestamp, portMAX_DELAY);
595 if (err != ESP_OK) return err;
596 err = sdio_slave_send_get_finished((void**)&ret_stamp, portMAX_DELAY);
597 if (err != ESP_OK) return err;
598 SDIO_SLAVE_CHECK(ret_stamp == timestamp, "already sent without return before", ESP_ERR_INVALID_STATE);
599
600 return ESP_OK;
601 }
602
603 //clear data but keep counter
send_flush_data(void)604 static esp_err_t send_flush_data(void)
605 {
606 esp_err_t err;
607
608 while (1) {
609 void *finished_arg;
610 uint32_t return_cnt = 0;
611 err = sdio_slave_hal_send_flush_next_buffer(context.hal, &finished_arg, &return_cnt);
612 if (err == ESP_OK) {
613 portBASE_TYPE ret = xQueueSend(context.ret_queue, &finished_arg, portMAX_DELAY);
614 assert(ret == pdTRUE);
615 for (size_t i = 0; i < return_cnt; i++) {
616 portBASE_TYPE ret = xSemaphoreGive(context.remain_cnt);
617 assert(ret == pdTRUE);
618 }
619 } else {
620 if (err == ESP_ERR_NOT_FOUND) {
621 err = ESP_OK;
622 }
623 break;
624 }
625 }
626
627 if (err == ESP_ERR_INVALID_STATE) {
628 ESP_LOGE(TAG, "flush data when transmission started");
629 }
630 return err;
631 }
632
633 /*---------------------------------------------------------------------------
634 * Recv
635 *--------------------------------------------------------------------------*/
636 #define CHECK_HANDLE_IDLE(desc) do { if (desc == NULL || !desc->not_receiving) {\
637 return ESP_ERR_INVALID_ARG; } } while(0)
638
critical_enter_recv(void)639 static inline void critical_enter_recv(void)
640 {
641 portENTER_CRITICAL(&context.recv_spinlock);
642 }
643
critical_exit_recv(void)644 static inline void critical_exit_recv(void)
645 {
646 portEXIT_CRITICAL(&context.recv_spinlock);
647 }
648
649 // remove data, still increase the counter
recv_flush_data(void)650 static esp_err_t recv_flush_data(void)
651 {
652 while(1) {
653 portBASE_TYPE ret = xSemaphoreTake(context.recv_event, 0);
654 if (ret == pdFALSE) break;
655 critical_enter_recv();
656 sdio_slave_hal_recv_flush_one_buffer(context.hal);
657 critical_exit_recv();
658 }
659 return ESP_OK;
660 }
661
sdio_intr_recv(void * arg)662 static void sdio_intr_recv(void* arg)
663 {
664 portBASE_TYPE yield = 0;
665 while (sdio_slave_hal_recv_done(context.hal)) {
666 portENTER_CRITICAL_ISR(&context.recv_spinlock);
667 bool has_next_item = sdio_slave_hal_recv_has_next_item(context.hal);
668 portEXIT_CRITICAL_ISR(&context.recv_spinlock);
669 if (has_next_item) {
670 ESP_EARLY_LOGV(TAG, "intr_recv: Give");
671 xSemaphoreGiveFromISR(context.recv_event, &yield);
672 continue; //check the linked list again skip the interrupt checking
673 }
674 // if no more items on the list, go back and check again the interrupt,
675 // will loop until the interrupt bit is kept cleared.
676 }
677 if (yield) portYIELD_FROM_ISR();
678 }
679
sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle)680 esp_err_t sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle)
681 {
682 recv_desc_t *desc = (recv_desc_t*)handle;
683 CHECK_HANDLE_IDLE(desc);
684 assert(desc->not_receiving);
685
686 critical_enter_recv();
687 TAILQ_REMOVE(&context.recv_reg_list, desc, te);
688 desc->not_receiving = 0; //manually remove the prev link (by set not_receiving=0), to indicate this is in the queue
689 sdio_slave_hal_load_buf(context.hal, &desc->hal_desc);
690 critical_exit_recv();
691 return ESP_OK;
692 }
693
sdio_slave_recv_register_buf(uint8_t * start)694 sdio_slave_buf_handle_t sdio_slave_recv_register_buf(uint8_t *start)
695 {
696 SDIO_SLAVE_CHECK(esp_ptr_dma_capable(start) && (uint32_t)start%4==0,
697 "buffer to register should be DMA capable and 32-bit aligned", NULL);
698 recv_desc_t *desc = (recv_desc_t*)heap_caps_malloc(sizeof(recv_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
699 if (desc == NULL) {
700 SDIO_SLAVE_LOGE("cannot allocate lldesc for new buffer");
701 return NULL;
702 }
703
704 //initially in the reg list
705 sdio_slave_hal_recv_init_desc(context.hal, &desc->hal_desc, start);
706 critical_enter_recv();
707 TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te);
708 critical_exit_recv();
709 return desc;
710 }
711
sdio_slave_recv(sdio_slave_buf_handle_t * handle_ret,uint8_t ** out_addr,size_t * out_len,TickType_t wait)712 esp_err_t sdio_slave_recv(sdio_slave_buf_handle_t* handle_ret, uint8_t **out_addr, size_t *out_len, TickType_t wait)
713 {
714 SDIO_SLAVE_CHECK(handle_ret != NULL, "handle address cannot be 0", ESP_ERR_INVALID_ARG);
715 portBASE_TYPE ret = xSemaphoreTake(context.recv_event, wait);
716 if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
717
718 critical_enter_recv();
719 //remove from queue, add back to reg list.
720 recv_desc_t *desc = (recv_desc_t*)sdio_slave_hal_recv_unload_desc(context.hal);
721 assert(desc != NULL && desc->hal_desc.owner == 0);
722 TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te);
723 critical_exit_recv();
724
725 *handle_ret = (sdio_slave_buf_handle_t)desc;
726 if (out_addr) *out_addr = (uint8_t*)desc->hal_desc.buf;
727 if (out_len) *out_len = desc->hal_desc.length;
728 return ESP_OK;
729 }
730
sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle)731 esp_err_t sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle)
732 {
733 recv_desc_t *desc = (recv_desc_t*)handle;
734 CHECK_HANDLE_IDLE(desc); //in the queue, fail.
735
736 critical_enter_recv();
737 TAILQ_REMOVE(&context.recv_reg_list, desc, te);
738 critical_exit_recv();
739 free(desc);
740 return ESP_OK;
741 }
742
sdio_slave_recv_get_buf(sdio_slave_buf_handle_t handle,size_t * len_o)743 uint8_t* sdio_slave_recv_get_buf(sdio_slave_buf_handle_t handle, size_t *len_o)
744 {
745 if (handle == NULL) return NULL;
746 recv_desc_t *desc = (recv_desc_t*)handle;
747
748 if (len_o!= NULL) *len_o= desc->hal_desc.length;
749 return (uint8_t*)desc->hal_desc.buf;
750 }
751