• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <string.h>
16 #include "esp_err.h"
17 #include "esp_log.h"
18 #include "esp_pm.h"
19 #include "esp_osal/esp_osal.h"
20 #include "esp_osal/queue.h"
21 #include "esp_osal/semphr.h"
22 #include "esp_osal/task.h"
23 #include "soc/sdmmc_periph.h"
24 #include "soc/soc_memory_layout.h"
25 #include "driver/sdmmc_types.h"
26 #include "driver/sdmmc_defs.h"
27 #include "driver/sdmmc_host.h"
28 #include "sdmmc_private.h"
29 
30 
31 /* Number of DMA descriptors used for transfer.
32  * Increasing this value above 4 doesn't improve performance for the usual case
33  * of SD memory cards (most data transfers are multiples of 512 bytes).
34  */
35 #define SDMMC_DMA_DESC_CNT  4
36 
37 static const char* TAG = "sdmmc_req";
38 
39 typedef enum {
40     SDMMC_IDLE,
41     SDMMC_SENDING_CMD,
42     SDMMC_SENDING_DATA,
43     SDMMC_BUSY,
44 } sdmmc_req_state_t;
45 
46 typedef struct {
47     uint8_t* ptr;
48     size_t size_remaining;
49     size_t next_desc;
50     size_t desc_remaining;
51 } sdmmc_transfer_state_t;
52 
53 const uint32_t SDMMC_DATA_ERR_MASK =
54         SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
55         SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE  |
56         SDMMC_INTMASK_EBE;
57 
58 const uint32_t SDMMC_DMA_DONE_MASK =
59         SDMMC_IDMAC_INTMASK_RI | SDMMC_IDMAC_INTMASK_TI |
60         SDMMC_IDMAC_INTMASK_NI;
61 
62 const uint32_t SDMMC_CMD_ERR_MASK =
63         SDMMC_INTMASK_RTO |
64         SDMMC_INTMASK_RCRC |
65         SDMMC_INTMASK_RESP_ERR;
66 
67 static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
68 static sdmmc_transfer_state_t s_cur_transfer = { 0 };
69 static QueueHandle_t s_request_mutex;
70 static bool s_is_app_cmd;   // This flag is set if the next command is an APP command
71 #ifdef CONFIG_PM_ENABLE
72 static esp_pm_lock_handle_t s_pm_lock;
73 #endif
74 
75 static esp_err_t handle_idle_state_events(void);
76 static sdmmc_hw_cmd_t make_hw_cmd(sdmmc_command_t* cmd);
77 static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
78         sdmmc_event_t* unhandled_events);
79 static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
80         sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
81 static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
82 static void fill_dma_descriptors(size_t num_desc);
83 static size_t get_free_descriptors_count(void);
84 static bool wait_for_busy_cleared(int timeout_ms);
85 
sdmmc_host_transaction_handler_init(void)86 esp_err_t sdmmc_host_transaction_handler_init(void)
87 {
88     assert(s_request_mutex == NULL);
89     s_request_mutex = xSemaphoreCreateMutex();
90     if (!s_request_mutex) {
91         return ESP_ERR_NO_MEM;
92     }
93     s_is_app_cmd = false;
94 #ifdef CONFIG_PM_ENABLE
95     esp_err_t err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "sdmmc", &s_pm_lock);
96     if (err != ESP_OK) {
97         vSemaphoreDelete(s_request_mutex);
98         s_request_mutex = NULL;
99         return err;
100     }
101 #endif
102     return ESP_OK;
103 }
104 
sdmmc_host_transaction_handler_deinit(void)105 void sdmmc_host_transaction_handler_deinit(void)
106 {
107     assert(s_request_mutex);
108 #ifdef CONFIG_PM_ENABLE
109     esp_pm_lock_delete(s_pm_lock);
110     s_pm_lock = NULL;
111 #endif
112     vSemaphoreDelete(s_request_mutex);
113     s_request_mutex = NULL;
114 }
115 
sdmmc_host_do_transaction(int slot,sdmmc_command_t * cmdinfo)116 esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
117 {
118     esp_err_t ret;
119     xSemaphoreTake(s_request_mutex, portMAX_DELAY);
120 #ifdef CONFIG_PM_ENABLE
121     esp_pm_lock_acquire(s_pm_lock);
122 #endif
123     // dispose of any events which happened asynchronously
124     handle_idle_state_events();
125     // convert cmdinfo to hardware register value
126     sdmmc_hw_cmd_t hw_cmd = make_hw_cmd(cmdinfo);
127     if (cmdinfo->data) {
128         // Length should be either <4 or >=4 and =0 (mod 4).
129         if (cmdinfo->datalen >= 4 && cmdinfo->datalen % 4 != 0) {
130             ESP_LOGD(TAG, "%s: invalid size: total=%d",
131                     __func__, cmdinfo->datalen);
132             ret = ESP_ERR_INVALID_SIZE;
133             goto out;
134         }
135         if ((intptr_t) cmdinfo->data % 4 != 0 ||
136                 !esp_ptr_dma_capable(cmdinfo->data)) {
137             ESP_LOGD(TAG, "%s: buffer %p can not be used for DMA", __func__, cmdinfo->data);
138             ret = ESP_ERR_INVALID_ARG;
139             goto out;
140         }
141         // this clears "owned by IDMAC" bits
142         memset(s_dma_desc, 0, sizeof(s_dma_desc));
143         // initialize first descriptor
144         s_dma_desc[0].first_descriptor = 1;
145         // save transfer info
146         s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
147         s_cur_transfer.size_remaining = cmdinfo->datalen;
148         s_cur_transfer.next_desc = 0;
149         s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
150         // prepare descriptors
151         fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
152         // write transfer info into hardware
153         sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen);
154     }
155     // write command into hardware, this also sends the command to the card
156     ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
157     if (ret != ESP_OK) {
158         goto out;
159     }
160     // process events until transfer is complete
161     cmdinfo->error = ESP_OK;
162     sdmmc_req_state_t state = SDMMC_SENDING_CMD;
163     sdmmc_event_t unhandled_events = { 0 };
164     while (state != SDMMC_IDLE) {
165         ret = handle_event(cmdinfo, &state, &unhandled_events);
166         if (ret != ESP_OK) {
167             break;
168         }
169     }
170     if (ret == ESP_OK && (cmdinfo->flags & SCF_WAIT_BUSY)) {
171         if (!wait_for_busy_cleared(cmdinfo->timeout_ms)) {
172             ret = ESP_ERR_TIMEOUT;
173         }
174     }
175     s_is_app_cmd = (ret == ESP_OK && cmdinfo->opcode == MMC_APP_CMD);
176 
177 out:
178 #ifdef CONFIG_PM_ENABLE
179     esp_pm_lock_release(s_pm_lock);
180 #endif
181     xSemaphoreGive(s_request_mutex);
182     return ret;
183 }
184 
get_free_descriptors_count(void)185 static size_t get_free_descriptors_count(void)
186 {
187     const size_t next = s_cur_transfer.next_desc;
188     size_t count = 0;
189     /* Starting with the current DMA descriptor, count the number of
190      * descriptors which have 'owned_by_idmac' set to 0. These are the
191      * descriptors already processed by the DMA engine.
192      */
193     for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
194         sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
195         if (desc->owned_by_idmac) {
196             break;
197         }
198         ++count;
199         if (desc->next_desc_ptr == NULL) {
200             /* final descriptor in the chain */
201             break;
202         }
203     }
204     return count;
205 }
206 
fill_dma_descriptors(size_t num_desc)207 static void fill_dma_descriptors(size_t num_desc)
208 {
209     for (size_t i = 0; i < num_desc; ++i) {
210         if (s_cur_transfer.size_remaining == 0) {
211             return;
212         }
213         const size_t next = s_cur_transfer.next_desc;
214         sdmmc_desc_t* desc = &s_dma_desc[next];
215         assert(!desc->owned_by_idmac);
216         size_t size_to_fill =
217             (s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
218                 s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
219         bool last = size_to_fill == s_cur_transfer.size_remaining;
220         desc->last_descriptor = last;
221         desc->second_address_chained = 1;
222         desc->owned_by_idmac = 1;
223         desc->buffer1_ptr = s_cur_transfer.ptr;
224         desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
225         assert(size_to_fill < 4 || size_to_fill % 4 == 0);
226         desc->buffer1_size = (size_to_fill + 3) & (~3);
227 
228         s_cur_transfer.size_remaining -= size_to_fill;
229         s_cur_transfer.ptr += size_to_fill;
230         s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
231         ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
232                 num_desc, next, s_cur_transfer.size_remaining,
233                 s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
234     }
235 }
236 
handle_idle_state_events(void)237 static esp_err_t handle_idle_state_events(void)
238 {
239     /* Handle any events which have happened in between transfers.
240      * Under current assumptions (no SDIO support) only card detect events
241      * can happen in the idle state.
242      */
243     sdmmc_event_t evt;
244     while (sdmmc_host_wait_for_event(0, &evt) == ESP_OK) {
245         if (evt.sdmmc_status & SDMMC_INTMASK_CD) {
246             ESP_LOGV(TAG, "card detect event");
247             evt.sdmmc_status &= ~SDMMC_INTMASK_CD;
248         }
249         if (evt.sdmmc_status != 0 || evt.dma_status != 0) {
250             ESP_LOGE(TAG, "handle_idle_state_events unhandled: %08x %08x",
251                     evt.sdmmc_status, evt.dma_status);
252         }
253 
254     }
255     return ESP_OK;
256 }
257 
258 
handle_event(sdmmc_command_t * cmd,sdmmc_req_state_t * state,sdmmc_event_t * unhandled_events)259 static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
260         sdmmc_event_t* unhandled_events)
261 {
262     sdmmc_event_t event;
263     esp_err_t err = sdmmc_host_wait_for_event(cmd->timeout_ms / portTICK_PERIOD_MS, &event);
264     if (err != ESP_OK) {
265         ESP_LOGE(TAG, "sdmmc_host_wait_for_event returned 0x%x", err);
266         if (err == ESP_ERR_TIMEOUT) {
267             sdmmc_host_dma_stop();
268         }
269         return err;
270     }
271     ESP_LOGV(TAG, "sdmmc_handle_event: event %08x %08x, unhandled %08x %08x",
272             event.sdmmc_status, event.dma_status,
273             unhandled_events->sdmmc_status, unhandled_events->dma_status);
274     event.sdmmc_status |= unhandled_events->sdmmc_status;
275     event.dma_status |= unhandled_events->dma_status;
276     process_events(event, cmd, state, unhandled_events);
277     ESP_LOGV(TAG, "sdmmc_handle_event: events unhandled: %08x %08x", unhandled_events->sdmmc_status, unhandled_events->dma_status);
278     return ESP_OK;
279 }
280 
cmd_needs_auto_stop(const sdmmc_command_t * cmd)281 static bool cmd_needs_auto_stop(const sdmmc_command_t* cmd)
282 {
283     /* SDMMC host needs an "auto stop" flag for the following commands: */
284     return cmd->datalen > 0 &&
285            (cmd->opcode == MMC_WRITE_BLOCK_MULTIPLE ||
286             cmd->opcode == MMC_READ_BLOCK_MULTIPLE ||
287             cmd->opcode == MMC_WRITE_DAT_UNTIL_STOP ||
288             cmd->opcode == MMC_READ_DAT_UNTIL_STOP);
289 }
290 
make_hw_cmd(sdmmc_command_t * cmd)291 static sdmmc_hw_cmd_t make_hw_cmd(sdmmc_command_t* cmd)
292 {
293     sdmmc_hw_cmd_t res = { 0 };
294 
295     res.cmd_index = cmd->opcode;
296     if (cmd->opcode == MMC_STOP_TRANSMISSION) {
297         res.stop_abort_cmd = 1;
298     } else if (cmd->opcode == MMC_GO_IDLE_STATE) {
299         res.send_init = 1;
300     } else {
301         res.wait_complete = 1;
302     }
303     if (cmd->opcode == MMC_GO_IDLE_STATE) {
304         res.send_init = 1;
305     }
306     if (cmd->flags & SCF_RSP_PRESENT) {
307         res.response_expect = 1;
308         if (cmd->flags & SCF_RSP_136) {
309             res.response_long = 1;
310         }
311     }
312     if (cmd->flags & SCF_RSP_CRC) {
313         res.check_response_crc = 1;
314     }
315     res.use_hold_reg = 1;
316     if (cmd->data) {
317         res.data_expected = 1;
318         if ((cmd->flags & SCF_CMD_READ) == 0) {
319             res.rw = 1;
320         }
321         assert(cmd->datalen % cmd->blklen == 0);
322         res.send_auto_stop = cmd_needs_auto_stop(cmd) ? 1 : 0;
323     }
324     ESP_LOGV(TAG, "%s: opcode=%d, rexp=%d, crc=%d, auto_stop=%d", __func__,
325             res.cmd_index, res.response_expect, res.check_response_crc,
326             res.send_auto_stop);
327     return res;
328 }
329 
process_command_response(uint32_t status,sdmmc_command_t * cmd)330 static void process_command_response(uint32_t status, sdmmc_command_t* cmd)
331 {
332     if (cmd->flags & SCF_RSP_PRESENT) {
333         if (cmd->flags & SCF_RSP_136) {
334             /* Destination is 4-byte aligned, can memcopy from peripheral registers */
335             memcpy(cmd->response, (uint32_t*) SDMMC.resp, 4 * sizeof(uint32_t));
336         } else {
337             cmd->response[0] = SDMMC.resp[0];
338             cmd->response[1] = 0;
339             cmd->response[2] = 0;
340             cmd->response[3] = 0;
341         }
342     }
343     esp_err_t err = ESP_OK;
344     if (status & SDMMC_INTMASK_RTO) {
345         // response timeout is only possible when response is expected
346         assert(cmd->flags & SCF_RSP_PRESENT);
347         err = ESP_ERR_TIMEOUT;
348     } else if ((cmd->flags & SCF_RSP_CRC) && (status & SDMMC_INTMASK_RCRC)) {
349         err = ESP_ERR_INVALID_CRC;
350     } else if (status & SDMMC_INTMASK_RESP_ERR) {
351         err = ESP_ERR_INVALID_RESPONSE;
352     }
353     if (err != ESP_OK) {
354         cmd->error = err;
355         if (cmd->data) {
356             sdmmc_host_dma_stop();
357         }
358         ESP_LOGD(TAG, "%s: error 0x%x  (status=%08x)", __func__, err, status);
359     }
360 }
361 
process_data_status(uint32_t status,sdmmc_command_t * cmd)362 static void process_data_status(uint32_t status, sdmmc_command_t* cmd)
363 {
364     if (status & SDMMC_DATA_ERR_MASK) {
365         if (status & SDMMC_INTMASK_DTO) {
366             cmd->error = ESP_ERR_TIMEOUT;
367         } else if (status & SDMMC_INTMASK_DCRC) {
368             cmd->error = ESP_ERR_INVALID_CRC;
369         } else if ((status & SDMMC_INTMASK_EBE) &&
370                 (cmd->flags & SCF_CMD_READ) == 0) {
371             cmd->error = ESP_ERR_TIMEOUT;
372         } else {
373             cmd->error = ESP_FAIL;
374         }
375         SDMMC.ctrl.fifo_reset = 1;
376     }
377     if (cmd->error != 0) {
378         if (cmd->data) {
379             sdmmc_host_dma_stop();
380         }
381         ESP_LOGD(TAG, "%s: error 0x%x (status=%08x)", __func__, cmd->error, status);
382     }
383 
384 }
385 
mask_check_and_clear(uint32_t * state,uint32_t mask)386 static inline bool mask_check_and_clear(uint32_t* state, uint32_t mask) {
387     bool ret = ((*state) & mask) != 0;
388     *state &= ~mask;
389     return ret;
390 }
391 
process_events(sdmmc_event_t evt,sdmmc_command_t * cmd,sdmmc_req_state_t * pstate,sdmmc_event_t * unhandled_events)392 static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
393         sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events)
394 {
395     const char* const s_state_names[] __attribute__((unused)) = {
396         "IDLE",
397         "SENDING_CMD",
398         "SENDIND_DATA",
399         "BUSY"
400     };
401     sdmmc_event_t orig_evt = evt;
402     ESP_LOGV(TAG, "%s: state=%s evt=%x dma=%x", __func__, s_state_names[*pstate],
403             evt.sdmmc_status, evt.dma_status);
404     sdmmc_req_state_t next_state = *pstate;
405     sdmmc_req_state_t state = (sdmmc_req_state_t) -1;
406     while (next_state != state) {
407         state = next_state;
408         switch (state) {
409             case SDMMC_IDLE:
410                 break;
411 
412             case SDMMC_SENDING_CMD:
413                 if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_CMD_ERR_MASK)) {
414                     process_command_response(orig_evt.sdmmc_status, cmd);
415                     break;      // Need to wait for the CMD_DONE interrupt
416                 }
417                 if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_CMD_DONE)) {
418                     process_command_response(orig_evt.sdmmc_status, cmd);
419                     if (cmd->error != ESP_OK) {
420                         next_state = SDMMC_IDLE;
421                         break;
422                     }
423 
424                     if (cmd->data == NULL) {
425                         next_state = SDMMC_IDLE;
426                     } else {
427                         next_state = SDMMC_SENDING_DATA;
428                     }
429                 }
430                 break;
431 
432 
433             case SDMMC_SENDING_DATA:
434                 if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_DATA_ERR_MASK)) {
435                     process_data_status(orig_evt.sdmmc_status, cmd);
436                     sdmmc_host_dma_stop();
437                 }
438                 if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
439                     s_cur_transfer.desc_remaining--;
440                     if (s_cur_transfer.size_remaining) {
441                         int desc_to_fill = get_free_descriptors_count();
442                         fill_dma_descriptors(desc_to_fill);
443                         sdmmc_host_dma_resume();
444                     }
445                     if (s_cur_transfer.desc_remaining == 0) {
446                         next_state = SDMMC_BUSY;
447                     }
448                 }
449                 if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
450                     // On start bit error, DATA_DONE interrupt will not be generated
451                     next_state = SDMMC_IDLE;
452                     break;
453                 }
454                 break;
455 
456             case SDMMC_BUSY:
457                 if (!mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_DATA_OVER)) {
458                     break;
459                 }
460                 process_data_status(orig_evt.sdmmc_status, cmd);
461                 next_state = SDMMC_IDLE;
462                 break;
463         }
464         ESP_LOGV(TAG, "%s state=%s next_state=%s", __func__, s_state_names[state], s_state_names[next_state]);
465     }
466     *pstate = state;
467     *unhandled_events = evt;
468     return ESP_OK;
469 }
470 
wait_for_busy_cleared(int timeout_ms)471 static bool wait_for_busy_cleared(int timeout_ms)
472 {
473     if (timeout_ms == 0) {
474         return !sdmmc_host_card_busy();
475     }
476 
477     /* It would have been nice to do this without polling, however the peripheral
478      * can only generate Busy Clear Interrupt for data write commands, and waiting
479      * for busy clear is mostly needed for other commands such as MMC_SWITCH.
480      */
481     int timeout_ticks = (timeout_ms + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS;
482     while (timeout_ticks-- > 0) {
483         if (!sdmmc_host_card_busy()) {
484             return true;
485         }
486         vTaskDelay(1);
487     }
488     return false;
489 }
490