• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // The HAL layer for SDIO slave (common part)
16 
17 #include <soc/slc_struct.h>
18 #include <soc/hinf_struct.h>
19 #include <hal/sdio_slave_types.h>
20 #include <soc/host_struct.h>
21 #include <string.h>
22 #include "hal/sdio_slave_hal.h"
23 #include "hal/hal_defs.h"
24 #include "esp_attr.h"
25 
26 
27 #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
28     HAL_LOGE(TAG, "%s", str);\
29     return ret_val;\
30 } }while (0)
31 
32 static const char TAG[] = "SDIO_HAL";
33 
34 static esp_err_t init_send_queue(sdio_slave_context_t *hal);
35 
36 /**************** Ring buffer for SDIO sending use *****************/
37 typedef enum {
38     RINGBUF_GET_ONE = 0,
39     RINGBUF_GET_ALL = 1,
40 } ringbuf_get_all_t;
41 
42 typedef enum {
43     RINGBUF_WRITE_PTR,
44     RINGBUF_READ_PTR,
45     RINGBUF_FREE_PTR,
46 } sdio_ringbuf_pointer_t;
47 
48 static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg);
49 static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all);
50 static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr);
51 
52 #define _SEND_DESC_NEXT(x)    STAILQ_NEXT(&((sdio_slave_hal_send_desc_t*)x)->dma_desc, qe)
53 #define SEND_DESC_NEXT(x)    (sdio_slave_hal_send_desc_t*)_SEND_DESC_NEXT(x)
54 #define SEND_DESC_NEXT_SET(x, target)    do { \
55         _SEND_DESC_NEXT(x)=(lldesc_t*)target; \
56     }while(0)
57 
link_desc_to_last(uint8_t * desc,void * arg)58 static esp_err_t link_desc_to_last(uint8_t* desc, void* arg)
59 {
60     SEND_DESC_NEXT_SET(arg, desc);
61     return ESP_OK;
62 }
63 
64 //calculate a pointer with offset to a original pointer of the specific ringbuffer
sdio_ringbuf_offset_ptr(sdio_ringbuf_t * buf,sdio_ringbuf_pointer_t ptr,uint32_t offset)65 static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset)
66 {
67     uint8_t *buf_ptr;
68     switch (ptr) {
69         case RINGBUF_WRITE_PTR:
70             buf_ptr = buf->write_ptr;
71             break;
72         case RINGBUF_READ_PTR:
73             buf_ptr = buf->read_ptr;
74             break;
75         case RINGBUF_FREE_PTR:
76             buf_ptr = buf->free_ptr;
77             break;
78         default:
79             abort();
80     }
81 
82     uint8_t *offset_ptr=buf_ptr+offset;
83     if (offset_ptr >= buf->data + buf->size) {
84         offset_ptr -= buf->size;
85     }
86     return offset_ptr;
87 }
88 
sdio_ringbuf_send(sdio_ringbuf_t * buf,esp_err_t (* copy_callback)(uint8_t *,void *),void * arg)89 static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg)
90 {
91     uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, RINGBUF_WRITE_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
92     esp_err_t err = ESP_OK;
93     if (copy_callback) {
94         (*copy_callback)(get_ptr, arg);
95     }
96     if (err != ESP_OK) return err;
97 
98     buf->write_ptr = get_ptr;
99     return ESP_OK;
100 }
101 
102 // this ringbuf is a return-before-recv-again strategy
103 // since this is designed to be called in the ISR, no parallel logic
sdio_ringbuf_recv(sdio_ringbuf_t * buf,uint8_t ** start,uint8_t ** end,ringbuf_get_all_t get_all)104 static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all)
105 {
106     assert(buf->free_ptr == buf->read_ptr);   //must return before recv again
107     if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output
108     if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data
109 
110     uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
111 
112     if (get_all != RINGBUF_GET_ONE) {
113         buf->read_ptr = buf->write_ptr;
114     } else {
115         buf->read_ptr = get_start;
116     }
117 
118     if (start != NULL) {
119         *start = get_start;
120     }
121     if (end != NULL) {
122         *end = buf->read_ptr;
123     }
124     return ESP_OK;
125 }
126 
sdio_ringbuf_return(sdio_ringbuf_t * buf,uint8_t * ptr)127 static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
128 {
129     assert(sdio_ringbuf_offset_ptr(buf, RINGBUF_FREE_PTR, SDIO_SLAVE_SEND_DESC_SIZE) == ptr);
130     size_t size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size;
131     size_t count = size / SDIO_SLAVE_SEND_DESC_SIZE;
132     assert(count * SDIO_SLAVE_SEND_DESC_SIZE==size);
133     buf->free_ptr = buf->read_ptr;
134     return count;
135 }
136 
sdio_ringbuf_peek_front(sdio_ringbuf_t * buf)137 static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf)
138 {
139     if (buf->read_ptr != buf->write_ptr) {
140         return sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
141     } else {
142         return NULL;
143     }
144 }
145 
sdio_ringbuf_peek_rear(sdio_ringbuf_t * buf)146 static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf)
147 {
148     return buf->write_ptr;
149 }
150 
sdio_ringbuf_empty(sdio_ringbuf_t * buf)151 static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf)
152 {
153     return (buf->read_ptr == buf->write_ptr);
154 }
155 
156 /**************** End of Ring buffer *****************/
157 
sdio_slave_hal_init(sdio_slave_context_t * hal)158 void sdio_slave_hal_init(sdio_slave_context_t *hal)
159 {
160     hal->host = sdio_slave_ll_get_host(0);
161     hal->slc = sdio_slave_ll_get_slc(0);
162     hal->hinf = sdio_slave_ll_get_hinf(0);
163     hal->send_state = STATE_IDLE;
164     hal->recv_link_list = (sdio_slave_hal_recv_stailq_t)STAILQ_HEAD_INITIALIZER(hal->recv_link_list);
165 
166     init_send_queue(hal);
167 }
168 
sdio_slave_hal_hw_init(sdio_slave_context_t * hal)169 void sdio_slave_hal_hw_init(sdio_slave_context_t *hal)
170 {
171     sdio_slave_ll_init(hal->slc);
172     sdio_slave_ll_enable_hs(hal->hinf, true);
173     sdio_slave_ll_set_timing(hal->host, hal->timing);
174     sdio_slave_ll_slvint_t intr_ena = 0xff;
175     sdio_slave_ll_slvint_set_ena(hal->slc, &intr_ena);
176 }
177 
init_send_queue(sdio_slave_context_t * hal)178 static esp_err_t init_send_queue(sdio_slave_context_t *hal)
179 {
180     esp_err_t ret;
181     esp_err_t rcv_res;
182     sdio_ringbuf_t *buf = &(hal->send_desc_queue);
183 
184     //initialize pointers
185     buf->write_ptr = buf->data;
186     buf->read_ptr = buf->data;
187     buf->free_ptr = buf->data;
188 
189     sdio_slave_hal_send_desc_t *first = NULL, *last = NULL;
190     //no copy for the first descriptor
191 
192     ret = sdio_ringbuf_send(buf, NULL, NULL);
193     if (ret != ESP_OK) return ret;
194 
195     //loop in the ringbuf to link all the desc one after another as a ring
196     for (int i = 0; i < hal->send_queue_size + 1; i++) {
197         rcv_res = sdio_ringbuf_recv(buf, (uint8_t **) &last, NULL, RINGBUF_GET_ONE);
198         assert (rcv_res == ESP_OK);
199 
200         ret = sdio_ringbuf_send(buf, link_desc_to_last, last);
201         if (ret != ESP_OK) return ret;
202 
203         sdio_ringbuf_return(buf, (uint8_t *) last);
204     }
205 
206     first = NULL;
207     last = NULL;
208     //clear the queue
209     rcv_res = sdio_ringbuf_recv(buf, (uint8_t **) &first, (uint8_t **) &last, RINGBUF_GET_ALL);
210     assert (rcv_res == ESP_OK);
211     assert(first == last); //there should be only one desc remain
212     sdio_ringbuf_return(buf, (uint8_t *) first);
213     return ESP_OK;
214 }
215 
sdio_slave_hal_set_ioready(sdio_slave_context_t * hal,bool ready)216 void sdio_slave_hal_set_ioready(sdio_slave_context_t *hal, bool ready)
217 {
218     sdio_slave_ll_set_ioready(hal->hinf, ready);   //set IO ready to 1 to allow host to use
219 }
220 
221 
222 /*---------------------------------------------------------------------------
223  *                  Send
224  *
225  *  The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified
226  *  until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during
227  *  the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from
228  *  the one in ``freertos/`` folder) holding descriptors to solve this:
229 
230  *  1.  The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during
231  *      initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one
232  *      to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one:
233  *      now the descriptor is in a ring.
234 
235  *  2.  The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor
236  *      indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a
237  *      ring-like linked-list. The app never touches the part of linked-list being used by the hardware.
238 
239  *  3.  When the hardware needs some data to send, it automatically pick a part of linked descriptors. According to the mode:
240  *          - Buffer mode: only pick the next one to the last one sent;
241  *          - Stream mode: pick the whole unsent linked list, starting from the one above, to the latest linked one.
242 
243  *      The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so
244  *      that it looks like just a linear linked-list rather than a ring to the hardware.
245 
246  *  4.  The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should
247  *      start (in PACKET_MODE).
248 
249  *  5.  When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all
250  *      the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The
251  *      driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again.
252 ----------------------------------------------------------------------------*/
send_set_state(sdio_slave_context_t * hal,send_state_t state)253 static inline void send_set_state(sdio_slave_context_t *hal, send_state_t state)
254 {
255     hal->send_state = state;
256 }
257 
send_get_state(sdio_slave_context_t * hal)258 static inline send_state_t send_get_state(sdio_slave_context_t* hal)
259 {
260     return hal->send_state;
261 }
262 
263 DMA_ATTR static const lldesc_t start_desc = {
264     .owner = 1,
265     .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used
266     .size = 1,
267     .length = 1,
268     .eof = 1,
269 };
270 
271 //force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared.
send_isr_invoker_enable(const sdio_slave_context_t * hal)272 static void send_isr_invoker_enable(const sdio_slave_context_t *hal)
273 {
274     sdio_slave_ll_send_reset(hal->slc);
275     sdio_slave_ll_send_start(hal->slc, &start_desc);
276     //wait for rx_done
277     while(!sdio_slave_ll_send_invoker_ready(hal->slc));
278     sdio_slave_ll_send_stop(hal->slc);
279     sdio_slave_ll_send_hostint_clr(hal->host);
280 }
281 
send_isr_invoker_disable(sdio_slave_context_t * hal)282 static void send_isr_invoker_disable(sdio_slave_context_t *hal)
283 {
284     sdio_slave_ll_send_part_done_clear(hal->slc);
285 }
286 
sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t * hal)287 void sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t *hal)
288 {
289     sdio_slave_ll_send_part_done_intr_ena(hal->slc, false);
290 }
291 
292 //start hw operation with existing data (if exist)
sdio_slave_hal_send_start(sdio_slave_context_t * hal)293 esp_err_t sdio_slave_hal_send_start(sdio_slave_context_t *hal)
294 {
295     SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
296                      "already started", ESP_ERR_INVALID_STATE);
297     send_set_state(hal, STATE_WAIT_FOR_START);
298     send_isr_invoker_enable(hal);
299     sdio_slave_ll_send_intr_clr(hal->slc);
300     sdio_slave_ll_send_intr_ena(hal->slc, true);
301     return ESP_OK;
302 }
303 
304 //only stop hw operations, no touch to data as well as counter
sdio_slave_hal_send_stop(sdio_slave_context_t * hal)305 void sdio_slave_hal_send_stop(sdio_slave_context_t *hal)
306 {
307     sdio_slave_ll_send_stop(hal->slc);
308     send_isr_invoker_disable(hal);
309     sdio_slave_ll_send_intr_ena(hal->slc, false);
310     send_set_state(hal, STATE_IDLE);
311 }
312 
send_new_packet(sdio_slave_context_t * hal)313 static void send_new_packet(sdio_slave_context_t *hal)
314 {
315     // since eof is changed, we have to stop and reset the link list,
316     // and restart new link list operation
317     sdio_slave_hal_send_desc_t *const start_desc = hal->in_flight_head;
318     sdio_slave_hal_send_desc_t *const end_desc = hal->in_flight_end;
319     assert(start_desc != NULL && end_desc != NULL);
320 
321     sdio_slave_ll_send_stop(hal->slc);
322     sdio_slave_ll_send_reset(hal->slc);
323     sdio_slave_ll_send_start(hal->slc, (lldesc_t*)start_desc);
324 
325     // update pkt_len register to allow host reading.
326     sdio_slave_ll_send_write_len(hal->slc, end_desc->pkt_len);
327     ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", end_desc->pkt_len, sdio_slave_ll_send_read_len(hal->host));
328 
329     send_set_state(hal, STATE_SENDING);
330 
331     ESP_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len);
332 }
333 
send_check_new_packet(sdio_slave_context_t * hal)334 static esp_err_t send_check_new_packet(sdio_slave_context_t *hal)
335 {
336     esp_err_t ret;
337     sdio_slave_hal_send_desc_t *start = NULL;
338     sdio_slave_hal_send_desc_t *end = NULL;
339     if (hal->sending_mode == SDIO_SLAVE_SEND_PACKET) {
340         ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &start, (uint8_t **) &end, RINGBUF_GET_ONE);
341     } else { //stream mode
342         ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &start, (uint8_t **) &end, RINGBUF_GET_ALL);
343     }
344     if (ret == ESP_OK) {
345         hal->in_flight_head = start;
346         hal->in_flight_end = end;
347         end->dma_desc.eof = 1;
348         //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``.
349         hal->in_flight_next = SEND_DESC_NEXT(end);
350         SEND_DESC_NEXT_SET(end, NULL);
351     }
352     return ESP_OK;
353 }
354 
sdio_slave_hal_send_eof_happened(sdio_slave_context_t * hal)355 bool sdio_slave_hal_send_eof_happened(sdio_slave_context_t* hal)
356 {
357     // Goto idle state (cur_start=NULL) if transmission done,
358     // also update sequence and recycle descs.
359     if (sdio_slave_ll_send_done(hal->slc)) {
360         //check current state
361         assert(send_get_state(hal) == STATE_SENDING);
362         sdio_slave_ll_send_intr_clr(hal->slc);
363         return true;
364     } else {
365         return false;
366     }
367 }
368 
369 //clear counter but keep data
sdio_slave_hal_send_reset_counter(sdio_slave_context_t * hal)370 esp_err_t sdio_slave_hal_send_reset_counter(sdio_slave_context_t* hal)
371 {
372     SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
373                      "reset counter when transmission started", ESP_ERR_INVALID_STATE);
374 
375     sdio_slave_ll_send_write_len(hal->slc, 0);
376     ESP_EARLY_LOGV(TAG, "last_len: %08X", sdio_slave_ll_send_read_len(hal->host));
377 
378     hal->tail_pkt_len = 0;
379     sdio_slave_hal_send_desc_t *desc = hal->in_flight_head;
380     while(desc != NULL) {
381         hal->tail_pkt_len += desc->dma_desc.length;
382         desc->pkt_len = hal->tail_pkt_len;
383         desc = SEND_DESC_NEXT(desc);
384     }
385     // in theory the desc should be the one right next to the last of in_flight_head,
386     // but the link of last is NULL, so get the desc from the ringbuf directly.
387     desc = (sdio_slave_hal_send_desc_t*)sdio_ringbuf_peek_front(&(hal->send_desc_queue));
388     while(desc != NULL) {
389         hal->tail_pkt_len += desc->dma_desc.length;
390         desc->pkt_len = hal->tail_pkt_len;
391         desc = SEND_DESC_NEXT(desc);
392     }
393 
394     return ESP_OK;
395 }
396 
send_get_inflight_desc(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_returned_cnt,bool init)397 static esp_err_t send_get_inflight_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_returned_cnt,
398                                         bool init)
399 {
400     esp_err_t ret;
401     if (init) {
402         assert(hal->returned_desc == NULL);
403         hal->returned_desc = hal->in_flight_head;
404         send_set_state(hal, STATE_GETTING_RESULT);
405     }
406 
407     if (hal->returned_desc != NULL) {
408         *out_arg = hal->returned_desc->arg;
409         hal->returned_desc = SEND_DESC_NEXT(hal->returned_desc);
410         ret = ESP_OK;
411     } else {
412         if (hal->in_flight_head != NULL) {
413             // fix the link broken of last desc when being sent
414             assert(hal->in_flight_end != NULL);
415             SEND_DESC_NEXT_SET(hal->in_flight_end, hal->in_flight_next);
416 
417             *out_returned_cnt = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*)hal->in_flight_head);
418         }
419 
420         hal->in_flight_head = NULL;
421         hal->in_flight_end = NULL;
422 
423         ret = ESP_ERR_NOT_FOUND;
424     }
425     return ret;
426 }
427 
send_get_unsent_desc(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_return_cnt)428 static esp_err_t send_get_unsent_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
429 {
430     esp_err_t ret;
431     sdio_slave_hal_send_desc_t *head, *tail;
432     ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &head, (uint8_t **) &tail, RINGBUF_GET_ONE);
433 
434     if (ret == ESP_OK) {
435         //currently each packet takes only one desc.
436         assert(head == tail);
437         (*out_arg) = head->arg;
438         (*out_return_cnt) = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*) head);
439     } else if (ret == ESP_ERR_NOT_FOUND) {
440         // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to
441         // send never queued.
442         // Go to idle state (cur_end!=NULL and cur_start=NULL)
443         send_set_state(hal, STATE_IDLE);
444         hal->tail_pkt_len = sdio_slave_ll_send_read_len(hal->host);
445     }
446     return ret;
447 }
448 
sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_returned_cnt)449 esp_err_t sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t *hal, void **out_arg, uint32_t* out_returned_cnt)
450 {
451     bool init = (send_get_state(hal) == STATE_SENDING);
452     if (init) {
453         assert(hal->in_flight_head != NULL);
454     } else {
455         assert(send_get_state(hal) == STATE_GETTING_RESULT);
456     }
457     *out_returned_cnt = 0;
458 
459     esp_err_t ret = send_get_inflight_desc(hal, out_arg, out_returned_cnt, init);
460 
461     if (ret == ESP_ERR_NOT_FOUND) {
462         // Go to wait for packet state
463         send_set_state(hal, STATE_WAIT_FOR_START);
464     }
465     return ret;
466 }
467 
468 
sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t * hal,void ** out_arg,uint32_t * out_return_cnt)469 esp_err_t sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
470 {
471     esp_err_t ret = ESP_OK;
472     *out_return_cnt = 0;
473     bool init = (send_get_state(hal) == STATE_IDLE);
474     if (!init) {
475         if (send_get_state(hal) != STATE_GETTING_RESULT && send_get_state(hal) != STATE_GETTING_UNSENT_DESC) {
476             return ESP_ERR_INVALID_STATE;
477         }
478     }
479 
480     if (init || send_get_state(hal) == STATE_GETTING_RESULT) {
481         ret = send_get_inflight_desc(hal, out_arg, out_return_cnt, init);
482         if (ret == ESP_ERR_NOT_FOUND) {
483             send_set_state(hal, STATE_GETTING_UNSENT_DESC);
484         }
485     }
486     if (send_get_state(hal) == STATE_GETTING_UNSENT_DESC) {
487         ret = send_get_unsent_desc(hal, out_arg, out_return_cnt);
488         if (ret == ESP_ERR_NOT_FOUND) {
489             send_set_state(hal, STATE_IDLE);
490         }
491     }
492     return ret;
493 }
494 
sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t * hal)495 esp_err_t sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t *hal)
496 {
497     esp_err_t ret;
498     // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready.
499     // Note we may also enter this state by stopping sending in the app.
500     if (send_get_state(hal) == STATE_WAIT_FOR_START) {
501         if (hal->in_flight_head == NULL) {
502             send_check_new_packet(hal);
503         }
504         // Go to sending state (cur_start and cur_end != NULL) if has packet to send.
505         if (hal->in_flight_head) {
506             send_new_packet(hal);
507             ret = ESP_OK;
508         } else {
509             ret = ESP_ERR_NOT_FOUND;
510         }
511     } else {
512         ret = ESP_ERR_INVALID_STATE;
513     }
514     return ret;
515 }
516 
send_write_desc(uint8_t * desc,void * arg)517 static esp_err_t send_write_desc(uint8_t* desc, void* arg)
518 {
519     sdio_slave_hal_send_desc_t* next_desc = SEND_DESC_NEXT(desc);
520     memcpy(desc, arg, sizeof(sdio_slave_hal_send_desc_t));
521     SEND_DESC_NEXT_SET(desc, next_desc);
522     return ESP_OK;
523 }
524 
send_isr_invoke(sdio_slave_context_t * hal)525 static void send_isr_invoke(sdio_slave_context_t *hal)
526 {
527     sdio_slave_ll_send_part_done_intr_ena(hal->slc, true);
528 }
529 
sdio_slave_hal_send_queue(sdio_slave_context_t * hal,uint8_t * addr,size_t len,void * arg)530 esp_err_t sdio_slave_hal_send_queue(sdio_slave_context_t* hal, uint8_t *addr, size_t len, void *arg)
531 {
532     hal->tail_pkt_len += len;
533     sdio_slave_hal_send_desc_t new_desc = {
534         .dma_desc = {
535             .size   =   len,
536             .length =   len,
537             .buf    =   addr,
538             .owner  =   1,
539             // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent
540             .eof    =   (hal->sending_mode == SDIO_SLAVE_SEND_PACKET),
541         },
542         .arg    =   arg,
543         .pkt_len = hal->tail_pkt_len,
544     };
545 
546     esp_err_t ret = sdio_ringbuf_send(&(hal->send_desc_queue), send_write_desc, &new_desc);
547     send_isr_invoke(hal);
548     return ret;
549 }
550 
551 /*---------------------------------------------------------------------------
552  *                  Receive
553  *--------------------------------------------------------------------------*/
554 
recv_get_first_empty_buf(sdio_slave_context_t * hal)555 static lldesc_t* recv_get_first_empty_buf(sdio_slave_context_t* hal)
556 {
557     sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
558     lldesc_t *desc = STAILQ_FIRST(queue);
559     while(desc && desc->owner == 0) {
560         desc = STAILQ_NEXT(desc, qe);
561     }
562     return desc;
563 }
564 
sdio_slave_hal_recv_stop(sdio_slave_context_t * hal)565 void sdio_slave_hal_recv_stop(sdio_slave_context_t* hal)
566 {
567     sdio_slave_ll_set_ioready(hal->hinf, false); //set IO ready to 0 to stop host from using
568     sdio_slave_ll_send_stop(hal->slc);
569     sdio_slave_ll_recv_stop(hal->slc);
570     sdio_slave_ll_recv_intr_ena(hal->slc, false);
571 }
572 
573 //touching linked list, should be protected by spinlock
sdio_slave_hal_recv_has_next_item(sdio_slave_context_t * hal)574 bool sdio_slave_hal_recv_has_next_item(sdio_slave_context_t* hal)
575 {
576 
577     if (hal->recv_cur_ret == NULL || hal->recv_cur_ret->owner != 0) return false;
578 
579     // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty,
580     // in this case the ``tx_done`` should happen no longer until new desc is appended.
581     // The app is responsible to place the pointer to the right place again when appending new desc.
582 
583     hal->recv_cur_ret = STAILQ_NEXT(hal->recv_cur_ret, qe);
584     return true;
585 }
586 
sdio_slave_hal_recv_done(sdio_slave_context_t * hal)587 bool sdio_slave_hal_recv_done(sdio_slave_context_t *hal)
588 {
589     bool ret = sdio_slave_ll_recv_done(hal->slc);
590     if (ret) {
591         sdio_slave_ll_recv_done_clear(hal->slc);
592     }
593     return ret;
594 }
595 
sdio_slave_hal_recv_unload_desc(sdio_slave_context_t * hal)596 lldesc_t *sdio_slave_hal_recv_unload_desc(sdio_slave_context_t *hal)
597 {
598     sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
599     lldesc_t *desc = STAILQ_FIRST(queue);
600     if (desc) {
601         STAILQ_REMOVE_HEAD(queue, qe);
602     }
603     return desc;
604 }
605 
sdio_slave_hal_recv_init_desc(sdio_slave_context_t * hal,lldesc_t * desc,uint8_t * start)606 void sdio_slave_hal_recv_init_desc(sdio_slave_context_t* hal, lldesc_t *desc, uint8_t *start)
607 {
608     *desc = (lldesc_t) {
609         .size = hal->recv_buffer_size,
610         .buf = start,
611     };
612 }
613 
sdio_slave_hal_recv_start(sdio_slave_context_t * hal)614 void sdio_slave_hal_recv_start(sdio_slave_context_t *hal)
615 {
616     sdio_slave_ll_recv_reset(hal->slc);
617     lldesc_t *desc = recv_get_first_empty_buf(hal);
618     if (!desc) {
619         HAL_LOGD(TAG, "recv: restart without desc");
620     } else {
621         //the counter is handled when add/flush/reset
622         sdio_slave_ll_recv_start(hal->slc, desc);
623         sdio_slave_ll_recv_intr_ena(hal->slc, true);
624     }
625 }
626 
sdio_slave_hal_recv_reset_counter(sdio_slave_context_t * hal)627 void sdio_slave_hal_recv_reset_counter(sdio_slave_context_t *hal)
628 {
629     sdio_slave_ll_recv_size_reset(hal->slc);
630     lldesc_t *desc = recv_get_first_empty_buf(hal);
631     while (desc != NULL) {
632         sdio_slave_ll_recv_size_inc(hal->slc);
633         desc = STAILQ_NEXT(desc, qe);
634     }
635 }
636 
sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t * hal)637 void sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t *hal)
638 {
639     sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
640     lldesc_t *desc = STAILQ_FIRST(queue);
641     assert (desc != NULL && desc->owner == 0);
642     STAILQ_REMOVE_HEAD(queue, qe);
643     desc->owner = 1;
644     STAILQ_INSERT_TAIL(queue, desc, qe);
645     sdio_slave_ll_recv_size_inc(hal->slc);
646     //we only add it to the tail here, without start the DMA nor increase buffer num.
647 }
648 
sdio_slave_hal_load_buf(sdio_slave_context_t * hal,lldesc_t * desc)649 void sdio_slave_hal_load_buf(sdio_slave_context_t *hal, lldesc_t *desc)
650 {
651     sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
652     desc->owner = 1;
653 
654     lldesc_t *const tail = STAILQ_LAST(queue, lldesc_s, qe);
655 
656     STAILQ_INSERT_TAIL(queue, desc, qe);
657     if (hal->recv_cur_ret == NULL) {
658         hal->recv_cur_ret = desc;
659     }
660 
661     if (tail == NULL) {
662         //no one in the ll, start new ll operation.
663         sdio_slave_ll_recv_start(hal->slc, desc);
664         sdio_slave_ll_recv_intr_ena(hal->slc, true);
665         HAL_LOGV(TAG, "recv_load_buf: start new");
666     } else {
667         //restart former ll operation
668         sdio_slave_ll_recv_restart(hal->slc);
669         HAL_LOGV(TAG, "recv_load_buf: restart");
670     }
671     sdio_slave_ll_recv_size_inc(hal->slc);
672 }
673 
show_queue_item(lldesc_t * item)674 static inline void show_queue_item(lldesc_t *item)
675 {
676     ESP_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
677     ESP_EARLY_LOGI(TAG, "   buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next);
678 }
679 
dump_queue(sdio_slave_hal_recv_stailq_t * queue)680 static void __attribute((unused)) dump_queue(sdio_slave_hal_recv_stailq_t *queue)
681 {
682     int cnt = 0;
683     lldesc_t *item = NULL;
684     ESP_EARLY_LOGI(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last);
685     STAILQ_FOREACH(item, queue, qe) {
686         cnt++;
687         show_queue_item(item);
688     }
689     ESP_EARLY_LOGI(TAG, "total: %d", cnt);
690 }
691 
692 /*---------------------------------------------------------------------------
693  *                  Host
694  *--------------------------------------------------------------------------*/
sdio_slave_hal_hostint_get_ena(sdio_slave_context_t * hal,sdio_slave_hostint_t * out_int_mask)695 void sdio_slave_hal_hostint_get_ena(sdio_slave_context_t *hal, sdio_slave_hostint_t *out_int_mask)
696 {
697     *out_int_mask = sdio_slave_ll_host_get_intena(hal->host);
698 }
699 
sdio_slave_hal_hostint_clear(sdio_slave_context_t * hal,const sdio_slave_hostint_t * mask)700 void sdio_slave_hal_hostint_clear(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
701 {
702     sdio_slave_ll_host_intr_clear(hal->host, mask);//clear all interrupts
703 }
704 
sdio_slave_hal_hostint_set_ena(sdio_slave_context_t * hal,const sdio_slave_hostint_t * mask)705 void sdio_slave_hal_hostint_set_ena(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
706 {
707     sdio_slave_ll_host_set_intena(hal->host, mask);
708 }
709 
sdio_slave_hal_hostint_send(sdio_slave_context_t * hal,const sdio_slave_hostint_t * mask)710 void sdio_slave_hal_hostint_send(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
711 {
712     sdio_slave_ll_host_send_int(hal->slc, mask);
713 }
714 
sdio_slave_hal_host_get_reg(sdio_slave_context_t * hal,int pos)715 uint8_t sdio_slave_hal_host_get_reg(sdio_slave_context_t *hal, int pos)
716 {
717     return sdio_slave_ll_host_get_reg(hal->host, pos);
718 }
sdio_slave_hal_host_set_reg(sdio_slave_context_t * hal,int pos,uint8_t reg)719 void sdio_slave_hal_host_set_reg(sdio_slave_context_t *hal, int pos, uint8_t reg)
720 {
721     sdio_slave_ll_host_set_reg(hal->host, pos, reg);
722 }
723 
sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t * hal,sdio_slave_ll_slvint_t * out_int_mask)724 void sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t *hal, sdio_slave_ll_slvint_t *out_int_mask)
725 {
726     sdio_slave_ll_slvint_fetch_clear(hal->slc, out_int_mask);
727 }
728