• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <stdarg.h>
16 #include <sys/param.h>  //For max/min
17 #include "esp_attr.h"
18 #include "esp_private/system_internal.h"
19 #include "esp_spi_flash.h"   //for ``g_flash_guard_default_ops``
20 #include "esp_flash.h"
21 #include "esp_flash_partitions.h"
22 #include "esp_osal/esp_osal.h"
23 #include "esp_osal/task.h"
24 #include "hal/spi_types.h"
25 #include "sdkconfig.h"
26 #include "esp_log.h"
27 
28 #include "esp_rom_sys.h"
29 
30 #include "driver/spi_common_internal.h"
31 
32 static const char TAG[] = "spi_flash";
33 
34 /*
35  * OS functions providing delay service and arbitration among chips, and with the cache.
36  *
37  * The cache needs to be disabled when chips on the SPI1 bus is under operation, hence these functions need to be put
38  * into the IRAM,and their data should be put into the DRAM.
39  */
40 
41 typedef struct {
42     spi_bus_lock_dev_handle_t dev_lock;
43 } app_func_arg_t;
44 
45 /*
46  * Time yield algorithm:
47  * Every time spi_flash_os_check_yield() is called:
48  *
49  * 1. If the time since last end() function is longer than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS (time
50  *     to yield), all counters will be reset, as if the yield has just ends;
51  * 2. If the time since last yield() is longer than CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS, will
52  *    return a yield request. When the yield() is called, all counters will be reset.
53  * Note: Short intervals between start() and end() after the last yield() will not reset the
54  *       counter mentioned in #2, but still be counted into the time mentioned in #2.
55  */
56 typedef struct {
57     app_func_arg_t common_arg; //shared args, must be the first item
58     bool no_protect;    //to decide whether to check protected region (for the main chip) or not.
59     uint32_t acquired_since_us;    // Time since last explicit yield()
60     uint32_t released_since_us;    // Time since last end() (implicit yield)
61 } spi1_app_func_arg_t;
62 
63 static inline IRAM_ATTR void on_spi1_released(spi1_app_func_arg_t* ctx);
64 static inline IRAM_ATTR void on_spi1_acquired(spi1_app_func_arg_t* ctx);
65 static inline IRAM_ATTR void on_spi1_yielded(spi1_app_func_arg_t* ctx);
66 static inline IRAM_ATTR bool on_spi1_check_yield(spi1_app_func_arg_t* ctx);
67 
cache_enable(void * arg)68 IRAM_ATTR static void cache_enable(void* arg)
69 {
70 #ifndef CONFIG_SPI_FLASH_AUTO_SUSPEND
71     g_flash_guard_default_ops.end();
72 #endif
73 }
74 
cache_disable(void * arg)75 IRAM_ATTR static void cache_disable(void* arg)
76 {
77 #ifndef CONFIG_SPI_FLASH_AUTO_SUSPEND
78     g_flash_guard_default_ops.start();
79 #endif
80 }
81 
spi_start(void * arg)82 static IRAM_ATTR esp_err_t spi_start(void *arg)
83 {
84     spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t *)arg)->dev_lock;
85 
86     // wait for other devices (or cache) to finish their operation
87     esp_err_t ret = spi_bus_lock_acquire_start(dev_lock, portMAX_DELAY);
88     if (ret != ESP_OK) {
89         return ret;
90     }
91     spi_bus_lock_touch(dev_lock);
92     return ESP_OK;
93 }
94 
spi_end(void * arg)95 static IRAM_ATTR esp_err_t spi_end(void *arg)
96 {
97     return spi_bus_lock_acquire_end(((app_func_arg_t *)arg)->dev_lock);
98 }
99 
spi1_start(void * arg)100 static IRAM_ATTR esp_err_t spi1_start(void *arg)
101 {
102 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
103     //use the lock to disable the cache and interrupts before using the SPI bus
104     return spi_start(arg);
105 #else
106     //directly disable the cache and interrupts when lock is not used
107     cache_disable(NULL);
108     on_spi1_acquired((spi1_app_func_arg_t*)arg);
109     return ESP_OK;
110 #endif
111 }
112 
spi1_end(void * arg)113 static IRAM_ATTR esp_err_t spi1_end(void *arg)
114 {
115     esp_err_t ret = ESP_OK;
116 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
117     ret = spi_end(arg);
118 #else
119     cache_enable(NULL);
120 #endif
121     on_spi1_released((spi1_app_func_arg_t*)arg);
122     return ret;
123 }
124 
spi1_flash_os_check_yield(void * arg,uint32_t chip_status,uint32_t * out_request)125 static IRAM_ATTR esp_err_t spi1_flash_os_check_yield(void *arg, uint32_t chip_status, uint32_t* out_request)
126 {
127     assert (chip_status == 0);  //TODO: support suspend
128     esp_err_t ret = ESP_ERR_TIMEOUT;    //Nothing happened
129     uint32_t request = 0;
130 
131     if (on_spi1_check_yield((spi1_app_func_arg_t *)arg)) {
132         request = SPI_FLASH_YIELD_REQ_YIELD;
133         ret = ESP_OK;
134     }
135     if (out_request) {
136         *out_request = request;
137     }
138     return ret;
139 }
140 
spi1_flash_os_yield(void * arg,uint32_t * out_status)141 static IRAM_ATTR esp_err_t spi1_flash_os_yield(void *arg, uint32_t* out_status)
142 {
143 #ifdef CONFIG_SPI_FLASH_ERASE_YIELD_TICKS
144     vTaskDelay(CONFIG_SPI_FLASH_ERASE_YIELD_TICKS);
145 #else
146     vTaskDelay(1);
147 #endif
148     on_spi1_yielded((spi1_app_func_arg_t*)arg);
149     return ESP_OK;
150 }
151 
delay_us(void * arg,uint32_t us)152 static IRAM_ATTR esp_err_t delay_us(void *arg, uint32_t us)
153 {
154     esp_rom_delay_us(us);
155     return ESP_OK;
156 }
157 
get_buffer_malloc(void * arg,size_t reqest_size,size_t * out_size)158 static IRAM_ATTR void* get_buffer_malloc(void* arg, size_t reqest_size, size_t* out_size)
159 {
160     /* Allocate temporary internal buffer to use for the actual read. If the preferred size
161         doesn't fit in free internal memory, allocate the largest available free block.
162 
163         (May need to shrink read_chunk_size and retry due to race conditions with other tasks
164         also allocating from the heap.)
165     */
166     void* ret = NULL;
167     unsigned retries = 5;
168     size_t read_chunk_size = reqest_size;
169     while(ret == NULL && retries--) {
170         read_chunk_size = MIN(read_chunk_size, heap_caps_get_largest_free_block(MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT));
171         read_chunk_size = (read_chunk_size + 3) & ~3;
172         ret = heap_caps_malloc(read_chunk_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
173     }
174     ESP_LOGV(TAG, "allocate temp buffer: %p (%d)", ret, read_chunk_size);
175     *out_size = (ret != NULL? read_chunk_size: 0);
176     return ret;
177 }
178 
release_buffer_malloc(void * arg,void * temp_buf)179 static IRAM_ATTR void release_buffer_malloc(void* arg, void *temp_buf)
180 {
181     free(temp_buf);
182 }
183 
main_flash_region_protected(void * arg,size_t start_addr,size_t size)184 static IRAM_ATTR esp_err_t main_flash_region_protected(void* arg, size_t start_addr, size_t size)
185 {
186     if (((spi1_app_func_arg_t*)arg)->no_protect || esp_partition_main_flash_region_safe(start_addr, size)) {
187         //ESP_OK = 0, also means protected==0
188         return ESP_OK;
189     } else {
190         return ESP_ERR_NOT_SUPPORTED;
191     }
192 }
193 
194 static DRAM_ATTR spi1_app_func_arg_t main_flash_arg = {};
195 
196 //for SPI1, we have to disable the cache and interrupts before using the SPI bus
197 static const DRAM_ATTR esp_flash_os_functions_t esp_flash_spi1_default_os_functions = {
198     .start = spi1_start,
199     .end = spi1_end,
200     .region_protected = main_flash_region_protected,
201     .delay_us = delay_us,
202     .get_temp_buffer = get_buffer_malloc,
203     .release_temp_buffer = release_buffer_malloc,
204     .check_yield = spi1_flash_os_check_yield,
205     .yield = spi1_flash_os_yield,
206 };
207 
208 static const esp_flash_os_functions_t esp_flash_spi23_default_os_functions = {
209     .start = spi_start,
210     .end = spi_end,
211     .delay_us = delay_us,
212     .get_temp_buffer = get_buffer_malloc,
213     .release_temp_buffer = release_buffer_malloc,
214     .region_protected = NULL,
215     .check_yield = NULL,
216     .yield = NULL,
217 };
218 
register_dev(int host_id)219 static spi_bus_lock_dev_handle_t register_dev(int host_id)
220 {
221     spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
222     spi_bus_lock_dev_handle_t dev_handle;
223     spi_bus_lock_dev_config_t config = {.flags = SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED};
224     esp_err_t err = spi_bus_lock_register_dev(lock, &config, &dev_handle);
225     if (err != ESP_OK) {
226         return NULL;
227     }
228     return dev_handle;
229 }
230 
esp_flash_init_os_functions(esp_flash_t * chip,int host_id,int * out_dev_id)231 esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id, int* out_dev_id)
232 {
233     spi_bus_lock_dev_handle_t dev_handle = NULL;
234 
235     // Skip initializing the bus lock when the bus is SPI1 and the bus is not shared with SPI Master
236     // driver, leaving dev_handle = NULL
237     bool skip_register_dev = (host_id == SPI1_HOST);
238 #if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
239     skip_register_dev = false;
240 #endif
241     if (!skip_register_dev) {
242         dev_handle = register_dev(host_id);
243     }
244 
245     if (host_id == SPI1_HOST) {
246         //SPI1
247         chip->os_func = &esp_flash_spi1_default_os_functions;
248         chip->os_func_data = heap_caps_malloc(sizeof(spi1_app_func_arg_t),
249                                          MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
250         if (chip->os_func_data == NULL) {
251             return ESP_ERR_NO_MEM;
252         }
253         *(spi1_app_func_arg_t*) chip->os_func_data = (spi1_app_func_arg_t) {
254             .common_arg = {
255                 .dev_lock = dev_handle,
256             },
257             .no_protect = true,
258         };
259     } else if (host_id == SPI2_HOST || host_id == SPI3_HOST) {
260         //SPI2, SPI3
261         chip->os_func = &esp_flash_spi23_default_os_functions;
262         chip->os_func_data = heap_caps_malloc(sizeof(app_func_arg_t),
263                                          MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
264         if (chip->os_func_data == NULL) {
265             return ESP_ERR_NO_MEM;
266         }
267         *(app_func_arg_t*) chip->os_func_data = (app_func_arg_t) {
268                 .dev_lock = dev_handle,
269         };
270     } else {
271         return ESP_ERR_INVALID_ARG;
272     }
273 
274     // Bus lock not initialized, the device ID should be directly given by application.
275     if (dev_handle) {
276         *out_dev_id = spi_bus_lock_get_dev_id(dev_handle);
277     }
278 
279     return ESP_OK;
280 }
281 
esp_flash_deinit_os_functions(esp_flash_t * chip)282 esp_err_t esp_flash_deinit_os_functions(esp_flash_t* chip)
283 {
284     if (chip->os_func_data) {
285         spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t*)chip->os_func_data)->dev_lock;
286         // SPI bus lock is possible not used on SPI1 bus
287         if (dev_lock) {
288             spi_bus_lock_unregister_dev(dev_lock);
289         }
290         free(chip->os_func_data);
291     }
292     chip->os_func = NULL;
293     chip->os_func_data = NULL;
294     return ESP_OK;
295 }
296 
esp_flash_init_main_bus_lock(void)297 esp_err_t esp_flash_init_main_bus_lock(void)
298 {
299     spi_bus_lock_init_main_bus();
300     spi_bus_lock_set_bg_control(g_main_spi_bus_lock, cache_enable, cache_disable, NULL);
301 
302     esp_err_t err = spi_bus_lock_init_main_dev();
303     if (err != ESP_OK) {
304         return err;
305     }
306     return ESP_OK;
307 }
308 
esp_flash_app_enable_os_functions(esp_flash_t * chip)309 esp_err_t esp_flash_app_enable_os_functions(esp_flash_t* chip)
310 {
311     main_flash_arg = (spi1_app_func_arg_t) {
312         .common_arg = {
313             .dev_lock = g_spi_lock_main_flash_dev,   //for SPI1,
314         },
315         .no_protect = false,
316     };
317     chip->os_func = &esp_flash_spi1_default_os_functions;
318     chip->os_func_data = &main_flash_arg;
319     return ESP_OK;
320 }
321 
322 // The goal of this part is to manually insert one valid task execution interval, if the time since
323 // last valid interval exceed the limitation (CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS).
324 //
325 // Valid task execution interval: continuous time with the cache enabled, which is longer than
326 // CONFIG_SPI_FLASH_ERASE_YIELD_TICKS. Yield time shorter than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS is
327 // not treated as valid interval.
on_spi1_check_yield(spi1_app_func_arg_t * ctx)328 static inline IRAM_ATTR bool on_spi1_check_yield(spi1_app_func_arg_t* ctx)
329 {
330 #ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
331     uint32_t time = esp_system_get_time();
332     // We handle the reset here instead of in `on_spi1_acquired()`, when acquire() and release() is
333     // larger than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, to save one `esp_system_get_time()` call
334     if ((time - ctx->released_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_TICKS * portTICK_PERIOD_MS * 1000) {
335         // Reset the acquired time as if the yield has just happened.
336         ctx->acquired_since_us = time;
337     } else if ((time - ctx->acquired_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS * 1000) {
338         return true;
339     }
340 #endif
341     return false;
342 }
on_spi1_released(spi1_app_func_arg_t * ctx)343 static inline IRAM_ATTR void on_spi1_released(spi1_app_func_arg_t* ctx)
344 {
345 #ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
346     ctx->released_since_us = esp_system_get_time();
347 #endif
348 }
349 
on_spi1_acquired(spi1_app_func_arg_t * ctx)350 static inline IRAM_ATTR void on_spi1_acquired(spi1_app_func_arg_t* ctx)
351 {
352     // Ideally, when the time after `on_spi1_released()` before this function is called is larger
353     // than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, the acquired time should be reset. We assume the
354     // time after `on_spi1_check_yield()` before this function is so short that we can do the reset
355     // in that function instead.
356 }
357 
on_spi1_yielded(spi1_app_func_arg_t * ctx)358 static inline IRAM_ATTR void on_spi1_yielded(spi1_app_func_arg_t* ctx)
359 {
360     uint32_t time = esp_system_get_time();
361     ctx->acquired_since_us = time;
362 }
363