1 // Copyright 2010-2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "esp_log.h"
16 #include "esp_osal/esp_osal.h"
17 #include "esp_osal/semphr.h"
18 #include "esp_osal/queue.h"
19 #include "ringbuf.h"
20 #include "driver/gpio.h"
21 #include "driver/spi_common_internal.h"
22 #include "driver/spi_slave_hd.h"
23 #include "hal/spi_slave_hd_hal.h"
24
25
26 #if (SOC_SPI_PERIPH_NUM == 2)
27 #define VALID_HOST(x) ((x) == SPI2_HOST)
28 #elif (SOC_SPI_PERIPH_NUM == 3)
29 #define VALID_HOST(x) ((x) >= SPI2_HOST && (x) <= SPI3_HOST)
30 #endif
31 #define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
32
33 typedef struct {
34 bool dma_enabled;
35 int max_transfer_sz;
36 uint32_t flags;
37 portMUX_TYPE int_spinlock;
38 intr_handle_t intr;
39 intr_handle_t intr_dma;
40 spi_slave_hd_callback_config_t callback;
41 spi_slave_hd_hal_context_t hal;
42 bool append_mode;
43
44 QueueHandle_t tx_trans_queue;
45 QueueHandle_t tx_ret_queue;
46 QueueHandle_t rx_trans_queue;
47 QueueHandle_t rx_ret_queue;
48 QueueHandle_t tx_cnting_sem;
49 QueueHandle_t rx_cnting_sem;
50
51 spi_slave_hd_data_t* tx_desc;
52 spi_slave_hd_data_t* rx_desc;
53 #ifdef CONFIG_PM_ENABLE
54 esp_pm_lock_handle_t pm_lock;
55 #endif
56 } spi_slave_hd_slot_t;
57
58 static spi_slave_hd_slot_t *spihost[SOC_SPI_PERIPH_NUM];
59 static const char TAG[] = "slave_hd";
60
61 static void spi_slave_hd_intr_segment(void *arg);
62 #if CONFIG_IDF_TARGET_ESP32S2
63 //Append mode is only supported on ESP32S2 now
64 static void spi_slave_hd_intr_append(void *arg);
65 #endif
66
spi_slave_hd_init(spi_host_device_t host_id,const spi_bus_config_t * bus_config,const spi_slave_hd_slot_config_t * config)67 esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config,
68 const spi_slave_hd_slot_config_t *config)
69 {
70 bool spi_chan_claimed;
71 bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
72 uint32_t actual_tx_dma_chan = 0;
73 uint32_t actual_rx_dma_chan = 0;
74 esp_err_t ret = ESP_OK;
75
76 SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
77 #if CONFIG_IDF_TARGET_ESP32S2
78 SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == (int)host_id || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
79 #elif SOC_GDMA_SUPPORTED
80 SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG);
81 #endif
82 #if !CONFIG_IDF_TARGET_ESP32S2
83 //Append mode is only supported on ESP32S2 now
84 SPIHD_CHECK(append_mode == 0, "Append mode is only supported on ESP32S2 now", ESP_ERR_INVALID_ARG);
85 #endif
86
87 spi_chan_claimed = spicommon_periph_claim(host_id, "slave_hd");
88 SPIHD_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
89
90 spi_slave_hd_slot_t* host = calloc(1, sizeof(spi_slave_hd_slot_t));
91 if (host == NULL) {
92 ret = ESP_ERR_NO_MEM;
93 goto cleanup;
94 }
95 spihost[host_id] = host;
96 host->int_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
97 host->dma_enabled = (config->dma_chan != SPI_DMA_DISABLED);
98
99 if (host->dma_enabled) {
100 ret = spicommon_slave_dma_chan_alloc(host_id, config->dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
101 if (ret != ESP_OK) {
102 goto cleanup;
103 }
104 }
105
106 ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &host->flags);
107 if (ret != ESP_OK) {
108 goto cleanup;
109 }
110 gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
111 spicommon_cs_initialize(host_id, config->spics_io_num, 0,
112 !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
113 host->append_mode = append_mode;
114
115 spi_slave_hd_hal_config_t hal_config = {
116 .host_id = host_id,
117 .dma_in = SPI_LL_GET_HW(host_id),
118 .dma_out = SPI_LL_GET_HW(host_id),
119 .dma_enabled = host->dma_enabled,
120 .tx_dma_chan = actual_tx_dma_chan,
121 .rx_dma_chan = actual_rx_dma_chan,
122 .append_mode = append_mode,
123 .mode = config->mode,
124 .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
125 .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
126 };
127
128 if (host->dma_enabled) {
129 //Malloc for all the DMA descriptors
130 uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size(&host->hal, bus_config->max_transfer_sz);
131 host->hal.dmadesc_tx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
132 host->hal.dmadesc_rx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
133 if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx) {
134 ret = ESP_ERR_NO_MEM;
135 goto cleanup;
136 }
137
138 //Get the actual SPI bus transaction size in bytes.
139 host->max_transfer_sz = spi_salve_hd_hal_get_max_bus_size(&host->hal);
140 } else {
141 //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
142 host->max_transfer_sz = 0;
143 }
144
145 //Init the hal according to the hal_config set above
146 spi_slave_hd_hal_init(&host->hal, &hal_config);
147
148 #ifdef CONFIG_PM_ENABLE
149 ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", &host->pm_lock);
150 if (ret != ESP_OK) {
151 goto cleanup;
152 }
153 // Lock APB frequency while SPI slave driver is in use
154 esp_pm_lock_acquire(host->pm_lock);
155 #endif //CONFIG_PM_ENABLE
156
157 //Create Queues and Semaphores
158 host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
159 host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
160 if (!host->append_mode) {
161 host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
162 host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
163 if (!host->tx_trans_queue || !host->rx_trans_queue) {
164 ret = ESP_ERR_NO_MEM;
165 goto cleanup;
166 }
167 }
168 #if CONFIG_IDF_TARGET_ESP32S2
169 //Append mode is only supported on ESP32S2 now
170 else {
171 host->tx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
172 host->rx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
173 if (!host->tx_cnting_sem || !host->rx_cnting_sem) {
174 ret = ESP_ERR_NO_MEM;
175 goto cleanup;
176 }
177 }
178 #endif //#if CONFIG_IDF_TARGET_ESP32S2
179
180 //Alloc intr
181 if (!host->append_mode) {
182 ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_segment,
183 (void *)host, &host->intr);
184 if (ret != ESP_OK) {
185 goto cleanup;
186 }
187 ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_segment,
188 (void *)host, &host->intr_dma);
189 if (ret != ESP_OK) {
190 goto cleanup;
191 }
192 }
193 #if CONFIG_IDF_TARGET_ESP32S2
194 //Append mode is only supported on ESP32S2 now
195 else {
196 ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_append,
197 (void *)host, &host->intr);
198 if (ret != ESP_OK) {
199 goto cleanup;
200 }
201 ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
202 (void *)host, &host->intr_dma);
203 if (ret != ESP_OK) {
204 goto cleanup;
205 }
206 }
207 #endif //#if CONFIG_IDF_TARGET_ESP32S2
208
209 //Init callbacks
210 memcpy((uint8_t*)&host->callback, (uint8_t*)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
211 spi_event_t event = 0;
212 if (host->callback.cb_buffer_tx!=NULL) event |= SPI_EV_BUF_TX;
213 if (host->callback.cb_buffer_rx!=NULL) event |= SPI_EV_BUF_RX;
214 if (host->callback.cb_cmd9!=NULL) event |= SPI_EV_CMD9;
215 if (host->callback.cb_cmdA!=NULL) event |= SPI_EV_CMDA;
216 spi_slave_hd_hal_enable_event_intr(&host->hal, event);
217
218 return ESP_OK;
219
220 cleanup:
221 // Memory free is in the deinit function
222 spi_slave_hd_deinit(host_id);
223 return ret;
224 }
225
spi_slave_hd_deinit(spi_host_device_t host_id)226 esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
227 {
228 spi_slave_hd_slot_t *host = spihost[host_id];
229 if (host == NULL) return ESP_ERR_INVALID_ARG;
230
231 if (host->tx_trans_queue) vQueueDelete(host->tx_trans_queue);
232 if (host->tx_ret_queue) vQueueDelete(host->tx_ret_queue);
233 if (host->rx_trans_queue) vQueueDelete(host->rx_trans_queue);
234 if (host->rx_ret_queue) vQueueDelete(host->rx_ret_queue);
235 if (host->tx_cnting_sem) vSemaphoreDelete(host->tx_cnting_sem);
236 if (host->rx_cnting_sem) vSemaphoreDelete(host->rx_cnting_sem);
237 if (host) {
238 free(host->hal.dmadesc_tx);
239 free(host->hal.dmadesc_rx);
240 esp_intr_free(host->intr);
241 esp_intr_free(host->intr_dma);
242 #ifdef CONFIG_PM_ENABLE
243 if (host->pm_lock) {
244 esp_pm_lock_release(host->pm_lock);
245 esp_pm_lock_delete(host->pm_lock);
246 }
247 #endif
248 }
249
250 spicommon_periph_free(host_id);
251 if (host->dma_enabled) {
252 spicommon_slave_free_dma(host_id);
253 }
254 free(host);
255 spihost[host_id] = NULL;
256 return ESP_OK;
257 }
258
tx_invoke(spi_slave_hd_slot_t * host)259 static void tx_invoke(spi_slave_hd_slot_t* host)
260 {
261 portENTER_CRITICAL(&host->int_spinlock);
262 spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_SEND);
263 portEXIT_CRITICAL(&host->int_spinlock);
264 }
265
rx_invoke(spi_slave_hd_slot_t * host)266 static void rx_invoke(spi_slave_hd_slot_t* host)
267 {
268 portENTER_CRITICAL(&host->int_spinlock);
269 spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_RECV);
270 portEXIT_CRITICAL(&host->int_spinlock);
271 }
272
intr_check_clear_callback(spi_slave_hd_slot_t * host,spi_event_t ev,slave_cb_t cb)273 static inline IRAM_ATTR BaseType_t intr_check_clear_callback(spi_slave_hd_slot_t* host, spi_event_t ev, slave_cb_t cb)
274 {
275 BaseType_t cb_awoken = pdFALSE;
276 if (spi_slave_hd_hal_check_clear_event(&host->hal, ev) && cb) {
277 spi_slave_hd_event_t event = {.event = ev};
278 cb(host->callback.arg, &event, &cb_awoken);
279 }
280 return cb_awoken;
281 }
282
spi_slave_hd_intr_segment(void * arg)283 static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
284 {
285 spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
286 spi_slave_hd_callback_config_t *callback = &host->callback;
287 spi_slave_hd_hal_context_t *hal = &host->hal;
288 BaseType_t awoken = pdFALSE;
289 BaseType_t ret;
290
291 awoken |= intr_check_clear_callback(host, SPI_EV_BUF_TX, callback->cb_buffer_tx);
292 awoken |= intr_check_clear_callback(host, SPI_EV_BUF_RX, callback->cb_buffer_rx);
293 awoken |= intr_check_clear_callback(host, SPI_EV_CMD9, callback->cb_cmd9);
294 awoken |= intr_check_clear_callback(host, SPI_EV_CMDA, callback->cb_cmdA);
295
296 bool tx_done = false;
297 bool rx_done = false;
298
299 portENTER_CRITICAL_ISR(&host->int_spinlock);
300 if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
301 tx_done = true;
302 }
303 if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
304 rx_done = true;
305 }
306 portEXIT_CRITICAL_ISR(&host->int_spinlock);
307
308 if (tx_done) {
309 bool ret_queue = true;
310 if (callback->cb_sent) {
311 spi_slave_hd_event_t ev = {
312 .event = SPI_EV_SEND,
313 .trans = host->tx_desc,
314 };
315 BaseType_t cb_awoken = pdFALSE;
316 ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
317 awoken |= cb_awoken;
318 }
319 if (ret_queue) {
320 ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_desc, &awoken);
321 // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
322 assert(ret == pdTRUE);
323 }
324 host->tx_desc = NULL;
325 }
326 if (rx_done) {
327 bool ret_queue = true;
328 host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
329 if (callback->cb_recv) {
330 spi_slave_hd_event_t ev = {
331 .event = SPI_EV_RECV,
332 .trans = host->rx_desc,
333 };
334 BaseType_t cb_awoken = pdFALSE;
335 ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
336 awoken |= cb_awoken;
337 }
338 if (ret_queue) {
339 ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_desc, &awoken);
340 // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
341 assert(ret == pdTRUE);
342 }
343 host->rx_desc = NULL;
344 }
345
346 bool tx_sent = false;
347 bool rx_sent = false;
348 if (!host->tx_desc) {
349 ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
350 if (ret == pdTRUE) {
351 spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
352 tx_sent = true;
353 if (callback->cb_send_dma_ready) {
354 spi_slave_hd_event_t ev = {
355 .event = SPI_EV_SEND_DMA_READY,
356 .trans = host->tx_desc,
357 };
358 BaseType_t cb_awoken = pdFALSE;
359 callback->cb_send_dma_ready(callback->arg, &ev, &cb_awoken);
360 awoken |= cb_awoken;
361 }
362 }
363 }
364 if (!host->rx_desc) {
365 ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
366 if (ret == pdTRUE) {
367 spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
368 rx_sent = true;
369 if (callback->cb_recv_dma_ready) {
370 spi_slave_hd_event_t ev = {
371 .event = SPI_EV_RECV_DMA_READY,
372 .trans = host->rx_desc,
373 };
374 BaseType_t cb_awoken = pdFALSE;
375 callback->cb_recv_dma_ready(callback->arg, &ev, &cb_awoken);
376 awoken |= cb_awoken;
377 }
378 }
379 }
380
381 portENTER_CRITICAL_ISR(&host->int_spinlock);
382 if (tx_sent) {
383 spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_SEND);
384 }
385 if (rx_sent) {
386 spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_RECV);
387 }
388 portEXIT_CRITICAL_ISR(&host->int_spinlock);
389
390 if (awoken==pdTRUE) portYIELD_FROM_ISR();
391 }
392
393 #if CONFIG_IDF_TARGET_ESP32S2
394 //Append mode is only supported on ESP32S2 now
spi_slave_hd_intr_append(void * arg)395 static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
396 {
397 spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
398 spi_slave_hd_callback_config_t *callback = &host->callback;
399 spi_slave_hd_hal_context_t *hal = &host->hal;
400 BaseType_t awoken = pdFALSE;
401 BaseType_t ret;
402
403 bool tx_done = false;
404 bool rx_done = false;
405 portENTER_CRITICAL_ISR(&host->int_spinlock);
406 if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_SEND)) {
407 tx_done = true;
408 }
409 if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_RECV)) {
410 rx_done = true;
411 }
412 portEXIT_CRITICAL_ISR(&host->int_spinlock);
413
414 if (tx_done) {
415 spi_slave_hd_data_t *trans_desc;
416 while (1) {
417 bool trans_finish = false;
418 trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
419 if (!trans_finish) {
420 break;
421 }
422
423 bool ret_queue = true;
424 if (callback->cb_sent) {
425 spi_slave_hd_event_t ev = {
426 .event = SPI_EV_SEND,
427 .trans = trans_desc,
428 };
429 BaseType_t cb_awoken = pdFALSE;
430 ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
431 awoken |= cb_awoken;
432 }
433
434 if (ret_queue) {
435 ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
436 assert(ret == pdTRUE);
437
438 ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
439 assert(ret == pdTRUE);
440 }
441 }
442 }
443
444 if (rx_done) {
445 spi_slave_hd_data_t *trans_desc;
446 size_t trans_len;
447 while (1) {
448 bool trans_finish = false;
449 trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
450 if (!trans_finish) {
451 break;
452 }
453 trans_desc->trans_len = trans_len;
454
455 bool ret_queue = true;
456 if (callback->cb_recv) {
457 spi_slave_hd_event_t ev = {
458 .event = SPI_EV_RECV,
459 .trans = trans_desc,
460 };
461 BaseType_t cb_awoken = pdFALSE;
462 ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
463 awoken |= cb_awoken;
464 }
465
466 if (ret_queue) {
467 ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
468 assert(ret == pdTRUE);
469
470 ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
471 assert(ret == pdTRUE);
472 }
473 }
474 }
475
476 if (awoken==pdTRUE) portYIELD_FROM_ISR();
477 }
478 #endif //#if CONFIG_IDF_TARGET_ESP32S2
479
get_ret_queue_result(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t ** out_trans,TickType_t timeout)480 static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
481 {
482 spi_slave_hd_slot_t *host = spihost[host_id];
483 spi_slave_hd_data_t *trans;
484 BaseType_t ret;
485
486 if (chan == SPI_SLAVE_CHAN_TX) {
487 ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
488 } else {
489 ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
490 }
491 if (ret == pdFALSE) {
492 return ESP_ERR_TIMEOUT;
493 }
494
495 *out_trans = trans;
496 return ESP_OK;
497 }
498
499 //---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
spi_slave_hd_queue_trans(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t * trans,TickType_t timeout)500 esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t* trans, TickType_t timeout)
501 {
502 spi_slave_hd_slot_t* host = spihost[host_id];
503
504 SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
505 SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
506 SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
507 SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
508
509 if (chan == SPI_SLAVE_CHAN_TX) {
510 BaseType_t ret = xQueueSend(host->tx_trans_queue, &trans, timeout);
511 if (ret == pdFALSE) {
512 return ESP_ERR_TIMEOUT;
513 }
514 tx_invoke(host);
515 } else { //chan == SPI_SLAVE_CHAN_RX
516 BaseType_t ret = xQueueSend(host->rx_trans_queue, &trans, timeout);
517 if (ret == pdFALSE) {
518 return ESP_ERR_TIMEOUT;
519 }
520 rx_invoke(host);
521 }
522 return ESP_OK;
523 }
524
spi_slave_hd_get_trans_res(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t ** out_trans,TickType_t timeout)525 esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t** out_trans, TickType_t timeout)
526 {
527 esp_err_t ret;
528 spi_slave_hd_slot_t* host = spihost[host_id];
529
530 SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
531 SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
532 ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
533
534 return ret;
535 }
536
spi_slave_hd_read_buffer(spi_host_device_t host_id,int addr,uint8_t * out_data,size_t len)537 void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_data, size_t len)
538 {
539 spi_slave_hd_hal_read_buffer(&spihost[host_id]->hal, addr, out_data, len);
540 }
541
spi_slave_hd_write_buffer(spi_host_device_t host_id,int addr,uint8_t * data,size_t len)542 void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *data, size_t len)
543 {
544 spi_slave_hd_hal_write_buffer(&spihost[host_id]->hal, addr, data, len);
545 }
546
547 #if CONFIG_IDF_TARGET_ESP32S2
548 //Append mode is only supported on ESP32S2 now
549 //---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
spi_slave_hd_append_trans(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t * trans,TickType_t timeout)550 esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
551 {
552 esp_err_t err;
553 spi_slave_hd_slot_t *host = spihost[host_id];
554 spi_slave_hd_hal_context_t *hal = &host->hal;
555
556 SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
557 SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
558 SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
559 SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
560 SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
561
562 if (chan == SPI_SLAVE_CHAN_TX) {
563 BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
564 if (ret == pdFALSE) {
565 return ESP_ERR_TIMEOUT;
566 }
567 err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
568 } else {
569 BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
570 if (ret == pdFALSE) {
571 return ESP_ERR_TIMEOUT;
572 }
573 err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
574 }
575 if (err != ESP_OK) {
576 ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");
577 }
578
579 return err;
580 }
581
spi_slave_hd_get_append_trans_res(spi_host_device_t host_id,spi_slave_chan_t chan,spi_slave_hd_data_t ** out_trans,TickType_t timeout)582 esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
583 {
584 esp_err_t ret;
585 spi_slave_hd_slot_t* host = spihost[host_id];
586
587 SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
588 SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
589 ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
590
591 return ret;
592 }
593 #endif //#if CONFIG_IDF_TARGET_ESP32S2
594