• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #pragma once
15 
16 #include <stdint.h>
17 #include <stdbool.h>
18 #include "sdkconfig.h"
19 #include "soc/cpu.h"
20 #include "hal/cpu_hal.h"
21 #include "soc/soc_memory_layout.h"
22 #include "soc/compare_set.h"
23 
24 #if __XTENSA__
25 #include "xtensa/xtruntime.h"
26 #endif
27 
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31 
32 #ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
33 #define NEED_VOLATILE_MUX volatile
34 #else
35 #define NEED_VOLATILE_MUX
36 #endif
37 
38 #define SPINLOCK_FREE          0xB33FFFFF
39 #define SPINLOCK_WAIT_FOREVER  (-1)
40 #define SPINLOCK_NO_WAIT        0
41 #define SPINLOCK_INITIALIZER   {.owner = SPINLOCK_FREE,.count = 0}
42 #define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
43 
44 typedef struct {
45     NEED_VOLATILE_MUX uint32_t owner;
46     NEED_VOLATILE_MUX uint32_t count;
47 }spinlock_t;
48 /**
49  * @brief Initialize a lock to its default state - unlocked
50  * @param lock - spinlock object to initialize
51  */
spinlock_initialize(spinlock_t * lock)52 static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
53 {
54     assert(lock);
55 #if !CONFIG_FREERTOS_UNICORE
56     lock->owner = SPINLOCK_FREE;
57     lock->count = 0;
58 #endif
59 }
60 /**
61  * @brief Top level spinlock acquire function, spins until get the lock
62  * @param lock - target spinlock object
63  * @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
64  */
spinlock_acquire(spinlock_t * lock,int32_t timeout)65 static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
66 {
67 #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
68     uint32_t result;
69     uint32_t irq_status;
70     uint32_t ccount_start;
71     uint32_t core_id, other_core_id;
72 
73     assert(lock);
74     irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
75 
76     if(timeout != SPINLOCK_WAIT_FOREVER){
77         RSR(CCOUNT, ccount_start);
78     }
79 
80     /*spin until we own a core */
81     RSR(PRID, core_id);
82 
83     /* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
84 
85     other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
86     do {
87 
88         /* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
89          * CORE_ID_REGVAL_APP:
90          *  - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
91          *  - If "our" core_id, we can drop through immediately.
92          *  - If "other_core_id", we spin here.
93          */
94         result = core_id;
95 
96 #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
97         if (esp_ptr_external_ram(lock)) {
98             compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
99         } else {
100 #endif
101         compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
102 #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
103         }
104 #endif
105         if(result != other_core_id) {
106             break;
107         }
108 
109         if (timeout != SPINLOCK_WAIT_FOREVER) {
110             uint32_t ccount_now;
111             ccount_now = cpu_hal_get_cycle_count();
112             if (ccount_now - ccount_start > (unsigned)timeout) {
113                 XTOS_RESTORE_INTLEVEL(irq_status);
114                 return false;
115             }
116         }
117     }while(1);
118 
119     /* any other value implies memory corruption or uninitialized mux */
120     assert(result == core_id || result == SPINLOCK_FREE);
121     assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
122     assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
123 
124     lock->count++;
125     XTOS_RESTORE_INTLEVEL(irq_status);
126     return true;
127 
128 #else  // !CONFIG_FREERTOS_UNICORE
129     return true;
130 #endif
131 }
132 
133 /**
134  * @brief Top level spinlock unlock function, unlocks a previously locked spinlock
135  * @param lock - target, locked before, spinlock object
136  */
spinlock_release(spinlock_t * lock)137 static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
138 {
139 #if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
140     uint32_t irq_status;
141     uint32_t core_id;
142 
143     assert(lock);
144     irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
145 
146     RSR(PRID, core_id);
147     assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
148     lock->count--;
149 
150     if(!lock->count) {
151         lock->owner = SPINLOCK_FREE;
152     } else {
153         assert(lock->count < 0x100); // Indicates memory corruption
154     }
155 
156     XTOS_RESTORE_INTLEVEL(irq_status);
157 #endif
158 }
159 
160 #ifdef __cplusplus
161 }
162 #endif
163